text
stringlengths 0
3.34M
|
---|
{-# LANGUAGE AllowAmbiguousTypes #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DuplicateRecordFields #-}
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedLabels #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeApplications #-}
{-# LANGUAGE TypeFamilies #-}
-- |
-- Copyright: © 2018-2020 IOHK
-- License: Apache-2.0
--
-- Provides wallet layer functions that are used by API layer. Uses both
-- "Cardano.Wallet.DB" and "Cardano.Wallet.Network" to realize its role as
-- being intermediary between the three.
--
-- Functions of the wallet layer are often parameterized with variables
-- following the convention below:
--
-- - @s@: A __s__tate used to keep track of known addresses. Typically, possible
-- values for this parameter are described in 'Cardano.Wallet.AddressDiscovery' sub-modules.
-- For instance @SeqState@ or @Rnd State@.
--
-- - @t@: A __t__arget backend which captures details specific to a particular chain
-- producer (binary formats, fee policy, networking layer).
--
-- - @k@: A __k__ey derivation scheme intrisically connected to the underlying discovery
-- state @s@. This describes how the hierarchical structure of a wallet is
-- defined as well as the relationship between secret keys and public
-- addresses.
module Cardano.Wallet
(
-- * Developement
-- $Development
-- * WalletLayer
WalletLayer (..)
-- * Capabilities
-- $Capabilities
, HasDBLayer
, dbLayer
, HasLogger
, logger
, HasNetworkLayer
, networkLayer
, HasTransactionLayer
, transactionLayer
, HasGenesisData
, genesisData
-- * Interface
-- ** Wallet
, createWallet
, createIcarusWallet
, attachPrivateKeyFromPwd
, attachPrivateKeyFromPwdHash
, listUtxoStatistics
, readWallet
, deleteWallet
, restoreWallet
, updateWallet
, updateWalletPassphrase
, walletSyncProgress
, fetchRewardBalance
, manageRewardBalance
, rollbackBlocks
, checkWalletIntegrity
, readNextWithdrawal
, ErrWalletAlreadyExists (..)
, ErrNoSuchWallet (..)
, ErrListUTxOStatistics (..)
, ErrUpdatePassphrase (..)
, ErrFetchRewards (..)
, ErrCheckWalletIntegrity (..)
, ErrWalletNotResponding (..)
-- ** Address
, createRandomAddress
, importRandomAddresses
, listAddresses
, normalizeDelegationAddress
, ErrCreateRandomAddress(..)
, ErrImportRandomAddress(..)
-- ** Payment
, selectCoinsExternal
, selectCoinsForPayment
, estimateFeeForPayment
, signPayment
, guardCoinSelection
, ErrSelectCoinsExternal (..)
, ErrSelectForPayment (..)
, ErrSignPayment (..)
, ErrCoinSelection (..)
, ErrAdjustForFee (..)
, ErrValidateSelection
, ErrNotASequentialWallet (..)
, ErrUTxOTooSmall (..)
-- ** Migration
, selectCoinsForMigration
, ErrSelectForMigration (..)
-- ** Delegation
, PoolRetirementEpochInfo (..)
, joinStakePool
, quitStakePool
, selectCoinsForDelegation
, estimateFeeForDelegation
, signDelegation
, guardJoin
, guardQuit
, ErrJoinStakePool (..)
, ErrCannotJoin (..)
, ErrQuitStakePool (..)
, ErrCannotQuit (..)
, ErrSelectForDelegation (..)
, ErrSignDelegation (..)
-- ** Fee Estimation
, FeeEstimation (..)
, estimateFeeForCoinSelection
, feeOpts
, coinSelOpts
, handleCannotCover
-- ** Transaction
, forgetPendingTx
, listTransactions
, getTransaction
, submitExternalTx
, signTx
, submitTx
, ErrMkTx (..)
, ErrSubmitTx (..)
, ErrSubmitExternalTx (..)
, ErrRemovePendingTx (..)
, ErrPostTx (..)
, ErrDecodeSignedTx (..)
, ErrListTransactions (..)
, ErrGetTransaction (..)
, ErrNoSuchTransaction (..)
, ErrNetworkUnavailable (..)
, ErrCurrentNodeTip (..)
, ErrStartTimeLaterThanEndTime (..)
-- ** Root Key
, withRootKey
, ErrWithRootKey (..)
, ErrWrongPassphrase (..)
-- * Logging
, WalletLog (..)
) where
import Prelude hiding
( log )
import Cardano.Address.Derivation
( XPrv )
import Cardano.BM.Data.Severity
( Severity (..) )
import Cardano.BM.Data.Tracer
( HasPrivacyAnnotation (..), HasSeverityAnnotation (..) )
import Cardano.Slotting.Slot
( SlotNo (..) )
import Cardano.Wallet.DB
( DBLayer (..)
, ErrNoSuchWallet (..)
, ErrRemovePendingTx (..)
, ErrWalletAlreadyExists (..)
, PrimaryKey (..)
, sparseCheckpoints
)
import Cardano.Wallet.Network
( ErrCurrentNodeTip (..)
, ErrGetAccountBalance (..)
, ErrNetworkUnavailable (..)
, ErrPostTx (..)
, FollowAction (..)
, FollowExit (..)
, FollowLog (..)
, NetworkLayer (..)
, follow
)
import Cardano.Wallet.Primitive.AddressDerivation
( DelegationAddress (..)
, Depth (..)
, DerivationType (..)
, ErrWrongPassphrase (..)
, HardDerivation (..)
, Index (..)
, MkKeyFingerprint (..)
, Passphrase
, PaymentAddress (..)
, WalletKey (..)
, checkPassphrase
, deriveRewardAccount
, encryptPassphrase
, liftIndex
, preparePassphrase
)
import Cardano.Wallet.Primitive.AddressDerivation.Byron
( ByronKey )
import Cardano.Wallet.Primitive.AddressDerivation.Icarus
( IcarusKey )
import Cardano.Wallet.Primitive.AddressDiscovery
( CompareDiscovery (..)
, GenChange (..)
, HasRewardAccount (..)
, IsOurs (..)
, IsOwned (..)
, KnownAddresses (..)
)
import Cardano.Wallet.Primitive.AddressDiscovery.Random
( RndState )
import Cardano.Wallet.Primitive.AddressDiscovery.Sequential
( SeqState
, defaultAddressPoolGap
, mkSeqStateFromRootXPrv
, mkUnboundedAddressPoolGap
, shrinkPool
)
import Cardano.Wallet.Primitive.CoinSelection
( CoinSelection (..)
, CoinSelectionOptions (..)
, ErrCoinSelection (..)
, feeBalance
)
import Cardano.Wallet.Primitive.CoinSelection.Migration
( depleteUTxO, idealBatchSize )
import Cardano.Wallet.Primitive.Fee
( ErrAdjustForFee (..)
, Fee (..)
, FeeOptions (..)
, OnDanglingChange (..)
, adjustForFee
)
import Cardano.Wallet.Primitive.Model
( Wallet
, applyBlocks
, availableUTxO
, blockchainParameters
, currentTip
, getState
, initWallet
, updateState
)
import Cardano.Wallet.Primitive.Slotting
( TimeInterpreter, slotRangeFromTimeRange, startTime )
import Cardano.Wallet.Primitive.SyncProgress
( SyncProgress, SyncTolerance (..), syncProgress )
import Cardano.Wallet.Primitive.Types
( Address (..)
, AddressState (..)
, Block (..)
, BlockHeader (..)
, ChimericAccount (..)
, Coin (..)
, DelegationCertificate (..)
, Direction (..)
, FeePolicy (LinearFee)
, GenesisParameters (..)
, Hash (..)
, IsDelegatingTo (..)
, NetworkParameters (..)
, PassphraseScheme (..)
, PoolId (..)
, PoolLifeCycleStatus (..)
, ProtocolParameters (..)
, Range (..)
, SealedTx
, SortOrder (..)
, TransactionInfo (..)
, Tx
, TxMeta (..)
, TxOut (..)
, TxStatus (..)
, UTxO (..)
, UTxOStatistics
, UnsignedTx (..)
, WalletDelegation (..)
, WalletDelegationStatus (..)
, WalletId (..)
, WalletMetadata (..)
, WalletName (..)
, WalletPassphraseInfo (..)
, computeUtxoStatistics
, dlgCertPoolId
, fromTransactionInfo
, log10
, wholeRange
)
import Cardano.Wallet.Transaction
( DelegationAction (..)
, ErrDecodeSignedTx (..)
, ErrMkTx (..)
, ErrValidateSelection
, TransactionLayer (..)
)
import Control.Exception
( Exception )
import Control.Monad
( forM, forM_, replicateM, unless, when )
import Control.Monad.IO.Class
( MonadIO, liftIO )
import Control.Monad.Trans.Class
( lift )
import Control.Monad.Trans.Except
( ExceptT (..)
, catchE
, except
, mapExceptT
, runExceptT
, throwE
, withExceptT
)
import Control.Monad.Trans.Maybe
( MaybeT (..), maybeToExceptT )
import Control.Monad.Trans.State.Strict
( runStateT, state )
import Control.Tracer
( Tracer, contramap, traceWith )
import Data.ByteString
( ByteString )
import Data.Coerce
( coerce )
import Data.Either
( partitionEithers )
import Data.Either.Extra
( eitherToMaybe )
import Data.Foldable
( fold )
import Data.Function
( (&) )
import Data.Functor
( ($>) )
import Data.Generics.Internal.VL.Lens
( Lens', view, (^.) )
import Data.Generics.Labels
()
import Data.Generics.Product.Typed
( HasType, typed )
import Data.List
( scanl' )
import Data.List.NonEmpty
( NonEmpty )
import Data.Maybe
( mapMaybe )
import Data.Quantity
( Quantity (..) )
import Data.Set
( Set )
import Data.Text.Class
( ToText (..) )
import Data.Time.Clock
( UTCTime, getCurrentTime )
import Data.Vector.Shuffle
( shuffle )
import Data.Word
( Word16, Word64 )
import Fmt
( blockListF, pretty, (+|), (|+) )
import GHC.Generics
( Generic )
import Numeric.Natural
( Natural )
import Safe
( lastMay )
import Statistics.Quantile
( medianUnbiased, quantiles )
import qualified Cardano.Wallet.Primitive.AddressDiscovery.Random as Rnd
import qualified Cardano.Wallet.Primitive.AddressDiscovery.Sequential as Seq
import qualified Cardano.Wallet.Primitive.CoinSelection.Random as CoinSelection
import qualified Cardano.Wallet.Primitive.Types as W
import qualified Data.List as L
import qualified Data.List.NonEmpty as NE
import qualified Data.Map as Map
import qualified Data.Set as Set
import qualified Data.Text as T
import qualified Data.Vector as V
-- $Development
-- __Naming Conventions__
--
-- Components inside a particular context `ctx` can be called via dedicated
-- lenses (see Cardano.Wallet#Capabilities). These components are extracted from the context
-- in a @where@ clause according to the following naming convention:
--
-- - @db = ctx ^. dbLayer \@s \\@k@ for the 'DBLayer'.
-- - @tr = ctx ^. logger@ for the Logger.
-- - @nw = ctx ^. networkLayer \@t@ for the 'NetworkLayer'.
-- - @tl = ctx ^. transactionLayer \\@k@ for the 'TransactionLayer'.
-- - @re = ctx ^. workerRegistry@ for the 'WorkerRegistry'.
--
-- __TroubleShooting__
--
-- @
-- • Overlapping instances for HasType (DBLayer IO s t k) ctx
-- arising from a use of ‘myFunction’
-- Matching instances:
-- @
--
-- Occurs when a particular function is missing a top-level constraint
-- (because it uses another function that demands such constraint). Here,
-- `myFunction` needs its surrounding context `ctx` to have a `DBLayer` but
-- the constraint is missing from its host function.
--
-- __Fix__: Add "@HasDBLayer s t k@" as a class-constraint to the surrounding function.
--
-- @
-- • Overlapping instances for HasType (DBLayer IO s t0 k0) ctx
-- arising from a use of ‘myFunction’
-- Matching givens (or their superclasses):
-- @
--
-- Occurs when a function is called in a context where type-level parameters
-- can be inferred. Here, `myFunction` is called but it is unclear
-- whether the parameter `t0` and `k0` of its context are the same as the ones
-- from the function at the call-site.
--
-- __Fix__: Add type-applications at the call-site "@myFunction \@ctx \@s \\@k@"
data WalletLayer s t (k :: Depth -> * -> *)
= WalletLayer
(Tracer IO WalletLog)
(Block, NetworkParameters, SyncTolerance)
(NetworkLayer IO t Block)
(TransactionLayer t k)
(DBLayer IO s k)
deriving (Generic)
{-------------------------------------------------------------------------------
Capabilities
-------------------------------------------------------------------------------}
-- $Capabilities
-- Each function in the wallet layer is defined in function of a non-specialized
-- context `ctx`. That context may require some extra capabilities via
-- class-constraints in the function signature. Capabilities are expressed in the
-- form of a "@HasXXX@" class-constraints sometimes with extra type parameters.
--
-- For example:
--
-- @
-- listWallets
-- :: forall ctx s t k.
-- ( HasDBLayer s t k ctx
-- )
-- => ctx
-- -> IO [WalletId]
-- @
--
-- Requires that the given context has an access to a database layer 'DBLayer'
-- parameterized over the wallet state, a network target and a key derivation
-- scheme. Components are pulled from the context generically (i.e. the concrete
-- `ctx` must derive 'Generic') using their associated type. The concrete `ctx`
-- is therefore expected to be a product-type of all the necessary components.
--
-- One can build an interface using only a subset of the wallet layer
-- capabilities and functions, for instance, something to fiddle with wallets
-- and their metadata does not require any networking layer.
type HasDBLayer s k = HasType (DBLayer IO s k)
type HasGenesisData = HasType (Block, NetworkParameters, SyncTolerance)
type HasLogger msg = HasType (Tracer IO msg)
-- | This module is only interested in one block-, and tx-type. This constraint
-- hides that choice, for some ease of use.
type HasNetworkLayer t = HasType (NetworkLayer IO t Block)
type HasTransactionLayer t k = HasType (TransactionLayer t k)
dbLayer
:: forall s k ctx. HasDBLayer s k ctx
=> Lens' ctx (DBLayer IO s k)
dbLayer =
typed @(DBLayer IO s k)
genesisData
:: forall ctx. HasGenesisData ctx
=> Lens' ctx (Block, NetworkParameters, SyncTolerance)
genesisData =
typed @(Block, NetworkParameters, SyncTolerance)
logger
:: forall msg ctx. HasLogger msg ctx
=> Lens' ctx (Tracer IO msg)
logger =
typed @(Tracer IO msg)
networkLayer
:: forall t ctx. (HasNetworkLayer t ctx)
=> Lens' ctx (NetworkLayer IO t Block)
networkLayer =
typed @(NetworkLayer IO t Block)
transactionLayer
:: forall t k ctx. (HasTransactionLayer t k ctx)
=> Lens' ctx (TransactionLayer t k)
transactionLayer =
typed @(TransactionLayer t k)
{-------------------------------------------------------------------------------
Wallet
-------------------------------------------------------------------------------}
-- | Initialise and store a new wallet, returning its ID.
createWallet
:: forall ctx s k.
( HasGenesisData ctx
, HasDBLayer s k ctx
, IsOurs s Address
, IsOurs s ChimericAccount
)
=> ctx
-> WalletId
-> WalletName
-> s
-> ExceptT ErrWalletAlreadyExists IO WalletId
createWallet ctx wid wname s = db & \DBLayer{..} -> do
let (hist, cp) = initWallet block0 gp s
now <- lift getCurrentTime
let meta = WalletMetadata
{ name = wname
, creationTime = now
, passphraseInfo = Nothing
, delegation = WalletDelegation NotDelegating []
}
mapExceptT atomically $
initializeWallet (PrimaryKey wid) cp meta hist pp $> wid
where
db = ctx ^. dbLayer @s @k
(block0, NetworkParameters gp pp, _) = ctx ^. genesisData
-- | Initialise and store a new legacy Icarus wallet. These wallets are
-- intrinsically sequential, but, in the incentivized testnet, we only have
-- access to the a snapshot of the MainNet.
--
-- To work-around this, we scan the genesis block with an arbitrary big gap and
-- resort to a default gap afterwards.
createIcarusWallet
:: forall ctx s k n.
( HasGenesisData ctx
, HasDBLayer s k ctx
, PaymentAddress n k
, k ~ IcarusKey
, s ~ SeqState n k
)
=> ctx
-> WalletId
-> WalletName
-> (k 'RootK XPrv, Passphrase "encryption")
-> ExceptT ErrWalletAlreadyExists IO WalletId
createIcarusWallet ctx wid wname credentials = db & \DBLayer{..} -> do
let s = mkSeqStateFromRootXPrv @n credentials $
mkUnboundedAddressPoolGap 10000
let (hist, cp) = initWallet block0 gp s
let addrs = map address . concatMap (view #outputs . fst) $ hist
let g = defaultAddressPoolGap
let s' = Seq.SeqState
(shrinkPool @n (liftPaymentAddress @n) addrs g (Seq.internalPool s))
(shrinkPool @n (liftPaymentAddress @n) addrs g (Seq.externalPool s))
(Seq.pendingChangeIxs s)
(Seq.rewardAccountKey s)
now <- lift getCurrentTime
let meta = WalletMetadata
{ name = wname
, creationTime = now
, passphraseInfo = Nothing
, delegation = WalletDelegation NotDelegating []
}
let pk = PrimaryKey wid
mapExceptT atomically $
initializeWallet pk (updateState s' cp) meta hist pp $> wid
where
db = ctx ^. dbLayer @s @k
(block0, NetworkParameters gp pp, _) = ctx ^. genesisData
-- | Check whether a wallet is in good shape when restarting a worker.
checkWalletIntegrity
:: forall ctx s k. HasDBLayer s k ctx
=> ctx
-> WalletId
-> GenesisParameters
-> ExceptT ErrCheckWalletIntegrity IO ()
checkWalletIntegrity ctx wid gp = db & \DBLayer{..} -> mapExceptT atomically $ do
cp <- withExceptT ErrCheckWalletIntegrityNoSuchWallet $ withNoSuchWallet wid $
readCheckpoint (PrimaryKey wid)
whenDifferentGenesis (blockchainParameters cp) gp $ throwE $
ErrCheckIntegrityDifferentGenesis
(getGenesisBlockHash gp)
(getGenesisBlockHash (blockchainParameters cp))
where
db = ctx ^. dbLayer @s @k
whenDifferentGenesis bp1 bp2 = when $
(bp1 ^. #getGenesisBlockHash /= bp2 ^. #getGenesisBlockHash) ||
(bp1 ^. #getGenesisBlockDate /= bp2 ^. #getGenesisBlockDate)
-- | Retrieve the wallet state for the wallet with the given ID.
readWallet
:: forall ctx s k. HasDBLayer s k ctx
=> ctx
-> WalletId
-> ExceptT ErrNoSuchWallet IO (Wallet s, WalletMetadata, Set Tx)
readWallet ctx wid = db & \DBLayer{..} -> mapExceptT atomically $ do
let pk = PrimaryKey wid
cp <- withNoSuchWallet wid $ readCheckpoint pk
meta <- withNoSuchWallet wid $ readWalletMeta pk
pending <- lift $ readTxHistory pk Nothing Descending wholeRange (Just Pending)
pure (cp, meta, Set.fromList (fromTransactionInfo <$> pending))
where
db = ctx ^. dbLayer @s @k
readWalletProtocolParameters
:: forall ctx s k. HasDBLayer s k ctx
=> ctx
-> WalletId
-> ExceptT ErrNoSuchWallet IO ProtocolParameters
readWalletProtocolParameters ctx wid = db & \DBLayer{..} ->
mapExceptT atomically $
withNoSuchWallet wid $
readProtocolParameters (PrimaryKey wid)
where
db = ctx ^. dbLayer @s @k
walletSyncProgress
:: forall ctx s t.
( HasGenesisData ctx
, HasNetworkLayer t ctx
)
=> ctx
-> Wallet s
-> IO SyncProgress
walletSyncProgress ctx w = do
let tip = currentTip w
syncProgress st ti tip =<< getCurrentTime
where
(_,_,st) = ctx ^. genesisData
ti :: TimeInterpreter IO
ti = timeInterpreter (ctx ^. networkLayer @t)
-- | Update a wallet's metadata with the given update function.
updateWallet
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> (WalletMetadata -> WalletMetadata)
-> ExceptT ErrNoSuchWallet IO ()
updateWallet ctx wid modify = db & \DBLayer{..} -> mapExceptT atomically $ do
meta <- withNoSuchWallet wid $ readWalletMeta (PrimaryKey wid)
putWalletMeta (PrimaryKey wid) (modify meta)
where
db = ctx ^. dbLayer @s @k
-- | Change a wallet's passphrase to the given passphrase.
updateWalletPassphrase
:: forall ctx s k.
( HasDBLayer s k ctx
, WalletKey k
)
=> ctx
-> WalletId
-> (Passphrase "raw", Passphrase "raw")
-> ExceptT ErrUpdatePassphrase IO ()
updateWalletPassphrase ctx wid (old, new) =
withRootKey @ctx @s @k ctx wid (coerce old) ErrUpdatePassphraseWithRootKey
$ \xprv scheme -> withExceptT ErrUpdatePassphraseNoSuchWallet $ do
-- NOTE
-- /!\ Important /!\
-- attachPrivateKeyFromPwd does use 'EncryptWithPBKDF2', so
-- regardless of the passphrase current scheme, we'll re-encrypt
-- it using the new scheme, always.
let oldP = preparePassphrase scheme old
let newP = preparePassphrase EncryptWithPBKDF2 new
let xprv' = changePassphrase oldP newP xprv
attachPrivateKeyFromPwd @ctx @s @k ctx wid (xprv', newP)
-- | List the wallet's UTxO statistics.
listUtxoStatistics
:: forall ctx s k. HasDBLayer s k ctx
=> ctx
-> WalletId
-> ExceptT ErrListUTxOStatistics IO UTxOStatistics
listUtxoStatistics ctx wid = do
(wal, _, pending) <- withExceptT
ErrListUTxOStatisticsNoSuchWallet (readWallet @ctx @s @k ctx wid)
let utxo = availableUTxO @s pending wal
pure $ computeUtxoStatistics log10 utxo
-- | Restore a wallet from its current tip up to the network tip.
--
-- This function returns immediately, starting a worker thread in the
-- background that will fetch and apply remaining blocks until the
-- network tip is reached or until failure.
restoreWallet
:: forall ctx s t k.
( HasLogger WalletLog ctx
, HasNetworkLayer t ctx
, HasDBLayer s k ctx
, HasGenesisData ctx
, IsOurs s Address
, IsOurs s ChimericAccount
)
=> ctx
-> WalletId
-> ExceptT ErrNoSuchWallet IO ()
restoreWallet ctx wid = db & \DBLayer{..} -> do
cps <- liftIO $ atomically $ listCheckpoints (PrimaryKey wid)
let forward bs (h, ps) = run $ do
restoreBlocks @ctx @s @k @t ctx wid bs h
saveParams @ctx @s @k ctx wid ps
liftIO (follow nw tr cps forward (view #header)) >>= \case
FollowInterrupted -> pure ()
FollowFailure ->
restoreWallet @ctx @s @t @k ctx wid
FollowRollback point -> do
rollbackBlocks @ctx @s @k ctx wid point
restoreWallet @ctx @s @t @k ctx wid
where
db = ctx ^. dbLayer @s @k
nw = ctx ^. networkLayer @t
tr = contramap MsgFollow (ctx ^. logger @WalletLog)
run :: ExceptT ErrNoSuchWallet IO () -> IO (FollowAction ErrNoSuchWallet)
run = fmap (either ExitWith (const Continue)) . runExceptT
-- | Rewind the UTxO snapshots, transaction history and other information to a
-- the earliest point in the past that is before or is the point of rollback.
rollbackBlocks
:: forall ctx s k.
( HasLogger WalletLog ctx
, HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> SlotNo
-> ExceptT ErrNoSuchWallet IO ()
rollbackBlocks ctx wid point = db & \DBLayer{..} -> do
lift $ traceWith tr $ MsgTryingRollback point
point' <- mapExceptT atomically $ rollbackTo (PrimaryKey wid) point
lift $ traceWith tr $ MsgRolledBack point'
where
db = ctx ^. dbLayer @s @k
tr = ctx ^. logger @WalletLog
-- | Apply the given blocks to the wallet and update the wallet state,
-- transaction history and corresponding metadata.
restoreBlocks
:: forall ctx s k t.
( HasLogger WalletLog ctx
, HasDBLayer s k ctx
, HasGenesisData ctx
, IsOurs s Address
, IsOurs s ChimericAccount
, HasNetworkLayer t ctx
)
=> ctx
-> WalletId
-> NonEmpty Block
-> BlockHeader
-> ExceptT ErrNoSuchWallet IO ()
restoreBlocks ctx wid blocks nodeTip = db & \DBLayer{..} -> mapExceptT atomically $ do
cp <- withNoSuchWallet wid (readCheckpoint $ PrimaryKey wid)
meta <- withNoSuchWallet wid (readWalletMeta $ PrimaryKey wid)
let gp = blockchainParameters cp
unless (cp `isParentOf` NE.head blocks) $ fail $ T.unpack $ T.unwords
[ "restoreBlocks: given chain isn't a valid continuation."
, "Wallet is at:", pretty (currentTip cp)
, "but the given chain continues starting from:"
, pretty (header (NE.head blocks))
]
let (filteredBlocks, cps) = NE.unzip $ applyBlocks @s blocks cp
let slotPoolDelegations =
[ (slotNo, cert)
| let slots = view #slotNo . view #header <$> blocks
, let delegations = view #delegations <$> filteredBlocks
, (slotNo, certs) <- NE.toList $ NE.zip slots delegations
, cert <- certs
]
let txs = fold $ view #transactions <$> filteredBlocks
let k = gp ^. #getEpochStability
let localTip = currentTip $ NE.last cps
putTxHistory (PrimaryKey wid) txs
forM_ slotPoolDelegations $ \delegation@(slotNo, cert) -> do
liftIO $ logDelegation delegation
putDelegationCertificate (PrimaryKey wid) cert slotNo
let unstable = sparseCheckpoints k (nodeTip ^. #blockHeight)
forM_ (NE.init cps) $ \cp' -> do
let (Quantity h) = currentTip cp' ^. #blockHeight
when (fromIntegral h `elem` unstable) $ do
liftIO $ logCheckpoint cp'
putCheckpoint (PrimaryKey wid) cp'
liftIO $ logCheckpoint (NE.last cps)
putCheckpoint (PrimaryKey wid) (NE.last cps)
prune (PrimaryKey wid)
liftIO $ do
progress <- walletSyncProgress @ctx @s @t ctx (NE.last cps)
traceWith tr $ MsgWalletMetadata meta
traceWith tr $ MsgSyncProgress progress
traceWith tr $ MsgDiscoveredTxs txs
traceWith tr $ MsgTip localTip
traceWith tr $ MsgBlocks blocks
traceWith tr $ MsgDiscoveredTxsContent txs
where
db = ctx ^. dbLayer @s @k
tr = ctx ^. logger @WalletLog
logCheckpoint :: Wallet s -> IO ()
logCheckpoint cp = traceWith tr $ MsgCheckpoint (currentTip cp)
logDelegation :: (SlotNo, DelegationCertificate) -> IO ()
logDelegation (slotNo, cert) = traceWith tr $ MsgDelegation slotNo cert
isParentOf :: Wallet s -> Block -> Bool
isParentOf cp = (== parent) . parentHeaderHash . header
where parent = headerHash $ currentTip cp
-- | Store the node tip params into the wallet database
saveParams
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> ProtocolParameters
-> ExceptT ErrNoSuchWallet IO ()
saveParams ctx wid params = db & \DBLayer{..} ->
mapExceptT atomically $ putProtocolParameters (PrimaryKey wid) params
where
db = ctx ^. dbLayer @s @k
-- | Remove an existing wallet. Note that there's no particular work to
-- be done regarding the restoration worker as it will simply terminate
-- on the next tick when noticing that the corresponding wallet is gone.
deleteWallet
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> ExceptT ErrNoSuchWallet IO ()
deleteWallet ctx wid = db & \DBLayer{..} -> do
mapExceptT atomically $ removeWallet (PrimaryKey wid)
where
db = ctx ^. dbLayer @s @k
-- | Fetch the cached reward balance of a given wallet from the database.
fetchRewardBalance
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> IO (Quantity "lovelace" Word64)
fetchRewardBalance ctx wid = db & \DBLayer{..} ->
atomically $ readDelegationRewardBalance pk
where
pk = PrimaryKey wid
db = ctx ^. dbLayer @s @k
-- | Read the current withdrawal capacity of a wallet. Note that, this simply
-- returns 0 if:
--
-- a) There's no reward account for this type of wallet.
-- b) The current reward value is too small to be considered (adding it would
-- cost more than its value).
readNextWithdrawal
:: forall ctx s t k.
( HasDBLayer s k ctx
, HasTransactionLayer t k ctx
)
=> ctx
-> WalletId
-> IO (Quantity "lovelace" Word64)
readNextWithdrawal ctx wid = db & \DBLayer{..} -> do
(pp, withdrawal) <- atomically $ (,)
<$> readProtocolParameters pk
<*> fmap getQuantity (readDelegationRewardBalance pk)
case pp of
-- May happen if done very early, in which case, rewards are probably
-- not woth considering anyway.
Nothing -> pure (Quantity 0)
Just ProtocolParameters{txParameters} -> do
let policy = W.getFeePolicy txParameters
let costOfWithdrawal =
minFee policy (mempty { withdrawal })
-
minFee policy mempty
pure $ if toInteger withdrawal < 2 * costOfWithdrawal
then Quantity 0
else Quantity withdrawal
where
db = ctx ^. dbLayer @s @k
tl = ctx ^. transactionLayer @t @k
pk = PrimaryKey wid
minFee :: FeePolicy -> CoinSelection -> Integer
minFee policy = fromIntegral . getFee . minimumFee tl policy Nothing
-- | Query the node for the reward balance of a given wallet.
--
-- Rather than force all callers of 'readWallet' to wait for fetching the
-- account balance (via the 'NetworkLayer'), we expose this function for it.
queryRewardBalance
:: forall ctx s t k.
( HasDBLayer s k ctx
, HasNetworkLayer t ctx
, HasRewardAccount s k
)
=> ctx
-> WalletId
-> ExceptT ErrFetchRewards IO (Quantity "lovelace" Word64)
queryRewardBalance ctx wid = db & \DBLayer{..} -> do
cp <- withExceptT ErrFetchRewardsNoSuchWallet
. mapExceptT atomically
. withNoSuchWallet wid
$ readCheckpoint pk
mapExceptT (fmap handleErr)
. getAccountBalance nw
. toChimericAccount @s @k
. rewardAccountKey
$ getState cp
where
pk = PrimaryKey wid
db = ctx ^. dbLayer @s @k
nw = ctx ^. networkLayer @t
handleErr = \case
Right x -> Right x
Left (ErrGetAccountBalanceAccountNotFound _) ->
Right $ Quantity 0
Left (ErrGetAccountBalanceNetworkUnreachable e) ->
Left $ ErrFetchRewardsNetworkUnreachable e
manageRewardBalance
:: forall ctx s t k.
( HasLogger WalletLog ctx
, HasNetworkLayer t ctx
, HasDBLayer s k ctx
, HasRewardAccount s k
, ctx ~ WalletLayer s t k
)
=> ctx
-> WalletId
-> IO ()
manageRewardBalance ctx wid = db & \DBLayer{..} -> do
watchNodeTip $ \bh -> do
traceWith tr $ MsgRewardBalanceQuery bh
query <- runExceptT $ queryRewardBalance @ctx @s @t @k ctx wid
traceWith tr $ MsgRewardBalanceResult query
case query of
Right amt -> do
res <- atomically $ runExceptT $ putDelegationRewardBalance pk amt
-- It can happen that the wallet doesn't exist _yet_, whereas we
-- already have a reward balance. If that's the case, we log and
-- move on.
case res of
Left err -> traceWith tr $ MsgRewardBalanceNoSuchWallet err
Right () -> pure ()
Left _err ->
-- Occasionaly failing to query is generally not fatal. It will
-- just update the balance next time the tip changes.
pure ()
traceWith tr MsgRewardBalanceExited
where
pk = PrimaryKey wid
db = ctx ^. dbLayer @s @k
NetworkLayer{watchNodeTip} = ctx ^. networkLayer @t
tr = ctx ^. logger @WalletLog
{-------------------------------------------------------------------------------
Address
-------------------------------------------------------------------------------}
-- | List all addresses of a wallet with their metadata. Addresses
-- are ordered from the most-recently-discovered to the oldest known.
listAddresses
:: forall ctx s k.
( HasDBLayer s k ctx
, IsOurs s Address
, CompareDiscovery s
, KnownAddresses s
)
=> ctx
-> WalletId
-> (s -> Address -> Maybe Address)
-- ^ A function to normalize address, so that delegated addresses
-- non-delegation addresses found in the transaction history are
-- shown with their delegation settings.
-- Use 'Just' for wallet without delegation settings.
-> ExceptT ErrNoSuchWallet IO [(Address, AddressState)]
listAddresses ctx wid normalize = db & \DBLayer{..} -> do
(s, txs) <- mapExceptT atomically $ (,)
<$> (getState <$> withNoSuchWallet wid (readCheckpoint primaryKey))
<*> lift (readTxHistory primaryKey Nothing Descending wholeRange Nothing)
let maybeIsOurs (TxOut a _) = if fst (isOurs a s)
then normalize s a
else Nothing
let usedAddrs
= Set.fromList
$ concatMap
(mapMaybe maybeIsOurs . W.outputs)
(fromTransactionInfo <$> txs)
let knownAddrs =
L.sortBy (compareDiscovery s) (mapMaybe (normalize s) $ knownAddresses s)
let withAddressState addr =
(addr, if addr `Set.member` usedAddrs then Used else Unused)
return $ withAddressState <$> knownAddrs
where
db = ctx ^. dbLayer @s @k
primaryKey = PrimaryKey wid
createRandomAddress
:: forall ctx s k n.
( HasDBLayer s k ctx
, PaymentAddress n ByronKey
, s ~ RndState n
, k ~ ByronKey
)
=> ctx
-> WalletId
-> Passphrase "raw"
-> Maybe (Index 'Hardened 'AddressK)
-> ExceptT ErrCreateRandomAddress IO Address
createRandomAddress ctx wid pwd mIx = db & \DBLayer{..} ->
withRootKey @ctx @s @k ctx wid pwd ErrCreateAddrWithRootKey $ \xprv scheme -> do
mapExceptT atomically $ do
cp <- withExceptT ErrCreateAddrNoSuchWallet $
withNoSuchWallet wid (readCheckpoint (PrimaryKey wid))
let s = getState cp
let accIx = Rnd.accountIndex s
(path, gen') <- case mIx of
Just addrIx | isKnownIndex accIx addrIx s ->
throwE $ ErrIndexAlreadyExists addrIx
Just addrIx ->
pure ((liftIndex accIx, liftIndex addrIx), Rnd.gen s)
Nothing ->
pure $ Rnd.findUnusedPath (Rnd.gen s) accIx (Rnd.unavailablePaths s)
let prepared = preparePassphrase scheme pwd
let addr = Rnd.deriveRndStateAddress @n xprv prepared path
let s' = (Rnd.addDiscoveredAddress addr path s) { Rnd.gen = gen' }
withExceptT ErrCreateAddrNoSuchWallet $
putCheckpoint (PrimaryKey wid) (updateState s' cp)
pure addr
where
db = ctx ^. dbLayer @s @k
isKnownIndex accIx addrIx s =
(liftIndex accIx, liftIndex addrIx) `Set.member` Rnd.unavailablePaths s
importRandomAddresses
:: forall ctx s k n.
( HasDBLayer s k ctx
, s ~ RndState n
, k ~ ByronKey
)
=> ctx
-> WalletId
-> [Address]
-> ExceptT ErrImportRandomAddress IO ()
importRandomAddresses ctx wid addrs = db & \DBLayer{..} -> mapExceptT atomically $ do
cp <- withExceptT ErrImportAddrNoSuchWallet
$ withNoSuchWallet wid (readCheckpoint (PrimaryKey wid))
let s = getState cp
ours = scanl' (\(_, t) addr -> isOurs addr t) (True, s) addrs
s' = snd (last ours)
if (not . any fst) ours
then throwE ErrImportAddrDoesNotBelong
else withExceptT ErrImportAddrNoSuchWallet $
putCheckpoint (PrimaryKey wid) (updateState s' cp)
where
db = ctx ^. dbLayer @s @k
-- NOTE
-- Addresses coming from the transaction history might be payment or
-- delegation addresses. So we normalize them all to be delegation addresses
-- to make sure that we compare them correctly.
normalizeDelegationAddress
:: forall s k n.
( DelegationAddress n k
, HasRewardAccount s k
)
=> s
-> Address
-> Maybe Address
normalizeDelegationAddress s addr = do
fingerprint <- eitherToMaybe (paymentKeyFingerprint addr)
pure $ liftDelegationAddress @n fingerprint (rewardAccountKey @s @k s)
{-------------------------------------------------------------------------------
Transaction
-------------------------------------------------------------------------------}
coinSelOpts
:: TransactionLayer t k
-> Quantity "byte" Word16
-> CoinSelectionOptions (ErrValidateSelection t)
coinSelOpts tl txMaxSize = CoinSelectionOptions
{ maximumNumberOfInputs = estimateMaxNumberOfInputs tl txMaxSize
, validate = validateSelection tl
}
feeOpts
:: TransactionLayer t k
-> Maybe DelegationAction
-> FeePolicy
-> FeeOptions
feeOpts tl action feePolicy = FeeOptions
{ estimateFee = minimumFee tl feePolicy action
, dustThreshold = minBound
, onDanglingChange = if allowUnbalancedTx tl then SaveMoney else PayAndBalance
}
-- | Prepare a transaction and automatically select inputs from the
-- wallet to cover the requested outputs. Note that this only runs
-- coin selection for the given outputs. In order to construct (and
-- sign) an actual transaction, use 'signPayment'.
selectCoinsForPayment
:: forall ctx s t k e.
( HasTransactionLayer t k ctx
, HasLogger WalletLog ctx
, HasDBLayer s k ctx
, e ~ ErrValidateSelection t
)
=> ctx
-> WalletId
-> NonEmpty TxOut
-> Quantity "lovelace" Word64
-> ExceptT (ErrSelectForPayment e) IO CoinSelection
selectCoinsForPayment ctx wid recipients withdrawal = do
(utxo, txp, minUtxo) <- withExceptT ErrSelectForPaymentNoSuchWallet $
selectCoinsSetup @ctx @s @k ctx wid
cs <- selectCoinsForPaymentFromUTxO @ctx @t @k @e ctx utxo txp recipients withdrawal
withExceptT ErrSelectForPaymentMinimumUTxOValue $ except $
guardCoinSelection minUtxo cs
pure cs
-- | Retrieve wallet data which is needed for all types of coin selections.
selectCoinsSetup
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> ExceptT ErrNoSuchWallet IO (W.UTxO, W.TxParameters, W.Coin)
selectCoinsSetup ctx wid = do
(wal, _, pending) <- readWallet @ctx @s @k ctx wid
txp <- txParameters <$> readWalletProtocolParameters @ctx @s @k ctx wid
minUTxO <- minimumUTxOvalue <$> readWalletProtocolParameters @ctx @s @k ctx wid
let utxo = availableUTxO @s pending wal
return (utxo, txp, minUTxO)
selectCoinsForPaymentFromUTxO
:: forall ctx t k e.
( HasTransactionLayer t k ctx
, HasLogger WalletLog ctx
, e ~ ErrValidateSelection t
)
=> ctx
-> W.UTxO
-> W.TxParameters
-> NonEmpty TxOut
-> Quantity "lovelace" Word64
-> ExceptT (ErrSelectForPayment e) IO CoinSelection
selectCoinsForPaymentFromUTxO ctx utxo txp recipients withdrawal = do
lift . traceWith tr $ MsgPaymentCoinSelectionStart utxo txp recipients
(sel, utxo') <- withExceptT ErrSelectForPaymentCoinSelection $ do
let opts = coinSelOpts tl (txp ^. #getTxMaxSize)
CoinSelection.random opts recipients withdrawal utxo
lift . traceWith tr $ MsgPaymentCoinSelection sel
let feePolicy = feeOpts tl Nothing (txp ^. #getFeePolicy)
withExceptT ErrSelectForPaymentFee $ do
balancedSel <- adjustForFee feePolicy utxo' sel
lift . traceWith tr $ MsgPaymentCoinSelectionAdjusted balancedSel
pure balancedSel
where
tl = ctx ^. transactionLayer @t @k
tr = ctx ^. logger @WalletLog
-- | Select necessary coins to cover for a single delegation request (including
-- one certificate).
selectCoinsForDelegation
:: forall ctx s t k.
( HasTransactionLayer t k ctx
, HasLogger WalletLog ctx
, HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> DelegationAction
-> ExceptT ErrSelectForDelegation IO CoinSelection
selectCoinsForDelegation ctx wid action = do
(utxo, txp, _) <- withExceptT ErrSelectForDelegationNoSuchWallet $
selectCoinsSetup @ctx @s @k ctx wid
selectCoinsForDelegationFromUTxO @_ @t @k ctx utxo txp action
selectCoinsForDelegationFromUTxO
:: forall ctx t k.
( HasTransactionLayer t k ctx
, HasLogger WalletLog ctx
)
=> ctx
-> W.UTxO
-> W.TxParameters
-> DelegationAction
-> ExceptT ErrSelectForDelegation IO CoinSelection
selectCoinsForDelegationFromUTxO ctx utxo txp action = do
let feePolicy = feeOpts tl (Just action) (txp ^. #getFeePolicy)
let sel = initDelegationSelection tl (txp ^. #getFeePolicy) action
withExceptT ErrSelectForDelegationFee $ do
balancedSel <- adjustForFee feePolicy utxo sel
lift $ traceWith tr $ MsgDelegationCoinSelection balancedSel
pure balancedSel
where
tl = ctx ^. transactionLayer @t @k
tr = ctx ^. logger @WalletLog
-- | Estimate fee for 'selectCoinsForDelegation'.
estimateFeeForDelegation
:: forall ctx s t k.
( HasTransactionLayer t k ctx
, HasLogger WalletLog ctx
, HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> ExceptT ErrSelectForDelegation IO FeeEstimation
estimateFeeForDelegation ctx wid = db & \DBLayer{..} -> do
(utxo, txp, _) <- withExceptT ErrSelectForDelegationNoSuchWallet
$ selectCoinsSetup @ctx @s @k ctx wid
isKeyReg <- mapExceptT atomically
$ withExceptT ErrSelectForDelegationNoSuchWallet
$ isStakeKeyRegistered (PrimaryKey wid)
let action = if isKeyReg then Join pid else RegisterKeyAndJoin pid
let selectCoins =
selectCoinsForDelegationFromUTxO @_ @t @k ctx utxo txp action
estimateFeeForCoinSelection $ Fee . feeBalance <$> selectCoins
where
db = ctx ^. dbLayer @s @k
pid = PoolId (error "Dummy pool id for estimation. Never evaluated.")
-- | Constructs a set of coin selections that select all funds from the given
-- source wallet, returning them as change.
--
-- If the coin selections returned by this function are used to create
-- transactions from the given wallet to a target wallet, executing those
-- transactions will have the effect of migrating all funds from the given
-- source wallet to the specified target wallet.
selectCoinsForMigration
:: forall ctx s t k.
( HasTransactionLayer t k ctx
, HasDBLayer s k ctx
)
=> ctx
-> WalletId
-- ^ The source wallet ID.
-> ExceptT ErrSelectForMigration IO [CoinSelection]
selectCoinsForMigration ctx wid = do
(utxo, txp, _) <- withExceptT ErrSelectForMigrationNoSuchWallet $
selectCoinsSetup @ctx @s @k ctx wid
selectCoinsForMigrationFromUTxO @ctx @t @k ctx utxo txp wid
selectCoinsForMigrationFromUTxO
:: forall ctx t k.
( HasTransactionLayer t k ctx
)
=> ctx
-> W.UTxO
-> W.TxParameters
-> WalletId
-- ^ The source wallet ID.
-> ExceptT ErrSelectForMigration IO [CoinSelection]
selectCoinsForMigrationFromUTxO ctx utxo txp wid = do
let feePolicy@(LinearFee (Quantity a) _ _) = txp ^. #getFeePolicy
let feeOptions = (feeOpts tl Nothing feePolicy)
{ dustThreshold = Coin $ ceiling a }
let selOptions = coinSelOpts tl (txp ^. #getTxMaxSize)
case depleteUTxO feeOptions (idealBatchSize selOptions) utxo of
cs | not (null cs) -> pure cs
_ -> throwE (ErrSelectForMigrationEmptyWallet wid)
where
tl = ctx ^. transactionLayer @t @k
-- | Estimate fee for 'selectCoinsForPayment'.
estimateFeeForPayment
:: forall ctx s t k e.
( HasTransactionLayer t k ctx
, HasLogger WalletLog ctx
, HasDBLayer s k ctx
, e ~ ErrValidateSelection t
)
=> ctx
-> WalletId
-> NonEmpty TxOut
-> Quantity "lovelace" Word64
-> ExceptT (ErrSelectForPayment e) IO FeeEstimation
estimateFeeForPayment ctx wid recipients withdrawal = do
(utxo, txp, minUtxo) <- withExceptT ErrSelectForPaymentNoSuchWallet $
selectCoinsSetup @ctx @s @k ctx wid
let selectCoins =
selectCoinsForPaymentFromUTxO @ctx @t @k @e ctx utxo txp recipients withdrawal
cs <- selectCoins `catchE` handleNotSuccessfulCoinSelection
withExceptT ErrSelectForPaymentMinimumUTxOValue $ except $
guardCoinSelection minUtxo cs
estimateFeeForCoinSelection $ (Fee . feeBalance <$> selectCoins)
`catchE` handleCannotCover utxo recipients
-- | When estimating fee, it is rather cumbersome to return "cannot cover fee"
-- whereas clients are just asking for an estimation. Therefore, we convert
-- cannot cover errors into the necessary fee amount, even though there isn't
-- enough in the wallet to cover for these fees.
handleCannotCover
:: Monad m
=> UTxO
-> NonEmpty TxOut
-> ErrSelectForPayment e
-> ExceptT (ErrSelectForPayment e) m Fee
handleCannotCover utxo outs = \case
ErrSelectForPaymentFee (ErrCannotCoverFee missing) -> do
let available = fromIntegral (W.balance utxo) - sum (getCoin . coin <$> outs)
pure $ Fee $ available + missing
e ->
throwE e
handleNotSuccessfulCoinSelection
:: Monad m
=> ErrSelectForPayment e
-> ExceptT (ErrSelectForPayment e) m CoinSelection
handleNotSuccessfulCoinSelection _ =
pure (mempty :: CoinSelection)
-- | Augments the given outputs with new outputs. These new outputs corresponds
-- to change outputs to which new addresses are being assigned to. This updates
-- the wallet state as it needs to keep track of new pending change addresses.
assignChangeAddresses
:: forall s m.
( GenChange s
, MonadIO m
)
=> ArgGenChange s
-> CoinSelection
-> s
-> m (CoinSelection, s)
assignChangeAddresses argGenChange cs = runStateT $ do
chgsOuts <- forM (change cs) $ \c -> do
addr <- state (genChange argGenChange)
pure $ TxOut addr c
outs' <- liftIO $ shuffle (outputs cs ++ chgsOuts)
pure $ cs { change = [], outputs = outs' }
-- | Produce witnesses and construct a transaction from a given
-- selection. Requires the encryption passphrase in order to decrypt
-- the root private key. Note that this doesn't broadcast the
-- transaction to the network. In order to do so, use 'submitTx'.
signPayment
:: forall ctx s t k.
( HasTransactionLayer t k ctx
, HasDBLayer s k ctx
, HasNetworkLayer t ctx
, IsOwned s k
, GenChange s
, HardDerivation k
, Bounded (Index (AddressIndexDerivationType k) 'AddressK)
)
=> ctx
-> WalletId
-> ArgGenChange s
-> Passphrase "raw"
-> CoinSelection
-> ExceptT ErrSignPayment IO (Tx, TxMeta, UTCTime, SealedTx)
signPayment ctx wid argGenChange pwd cs = db & \DBLayer{..} -> do
withRootKey @_ @s ctx wid pwd ErrSignPaymentWithRootKey $ \xprv scheme -> do
let pwdP = preparePassphrase scheme pwd
nodeTip <- withExceptT ErrSignPaymentNetwork $ currentNodeTip nl
mapExceptT atomically $ do
cp <- withExceptT ErrSignPaymentNoSuchWallet $ withNoSuchWallet wid $
readCheckpoint (PrimaryKey wid)
(cs', s') <- assignChangeAddresses argGenChange cs (getState cp)
withExceptT ErrSignPaymentNoSuchWallet $
putCheckpoint (PrimaryKey wid) (updateState s' cp)
let keyFrom = isOwned (getState cp) (xprv, pwdP)
let rewardAcnt = deriveRewardAccount @k pwdP xprv
(tx, sealedTx) <- withExceptT ErrSignPaymentMkTx $ ExceptT $ pure $
mkStdTx tl (rewardAcnt, pwdP) keyFrom (nodeTip ^. #slotNo) cs'
(time, meta) <- liftIO $ mkTxMeta ti (currentTip cp) s' cs'
return (tx, meta, time, sealedTx)
where
ti :: TimeInterpreter IO
ti = timeInterpreter nl
db = ctx ^. dbLayer @s @k
tl = ctx ^. transactionLayer @t @k
nl = ctx ^. networkLayer @t
-- | Very much like 'signPayment', but doesn't not generate change addresses.
signTx
:: forall ctx s t k.
( HasTransactionLayer t k ctx
, HasDBLayer s k ctx
, HasNetworkLayer t ctx
, IsOwned s k
, HardDerivation k
, Bounded (Index (AddressIndexDerivationType k) 'AddressK)
)
=> ctx
-> WalletId
-> Passphrase "raw"
-> UnsignedTx
-> ExceptT ErrSignPayment IO (Tx, TxMeta, UTCTime, SealedTx)
signTx ctx wid pwd (UnsignedTx inpsNE outsNE) = db & \DBLayer{..} -> do
withRootKey @_ @s ctx wid pwd ErrSignPaymentWithRootKey $ \xprv scheme -> do
let pwdP = preparePassphrase scheme pwd
nodeTip <- withExceptT ErrSignPaymentNetwork $ currentNodeTip nl
mapExceptT atomically $ do
cp <- withExceptT ErrSignPaymentNoSuchWallet $ withNoSuchWallet wid $
readCheckpoint (PrimaryKey wid)
let cs = mempty { inputs = inps, outputs = outs }
let keyFrom = isOwned (getState cp) (xprv, pwdP)
let rewardAcnt = deriveRewardAccount @k pwdP xprv
(tx, sealedTx) <- withExceptT ErrSignPaymentMkTx $ ExceptT $ pure $
mkStdTx tl (rewardAcnt, pwdP) keyFrom (nodeTip ^. #slotNo) cs
(time, meta) <- liftIO $ mkTxMeta ti (currentTip cp) (getState cp) cs
return (tx, meta, time, sealedTx)
where
ti :: TimeInterpreter IO
ti = timeInterpreter nl
db = ctx ^. dbLayer @s @k
tl = ctx ^. transactionLayer @t @k
nl = ctx ^. networkLayer @t
inps = NE.toList inpsNE
outs = NE.toList outsNE
-- | Makes a fully-resolved coin selection for the given set of payments.
selectCoinsExternal
:: forall ctx s t k e.
( GenChange s
, HasDBLayer s k ctx
, HasLogger WalletLog ctx
, HasTransactionLayer t k ctx
, e ~ ErrValidateSelection t
)
=> ctx
-> WalletId
-> ArgGenChange s
-> NonEmpty TxOut
-> Quantity "lovelace" Word64
-> ExceptT (ErrSelectCoinsExternal e) IO UnsignedTx
selectCoinsExternal ctx wid argGenChange payments withdrawal = do
cs <- withExceptT ErrSelectCoinsExternalUnableToMakeSelection $
selectCoinsForPayment @ctx @s @t @k @e ctx wid payments withdrawal
cs' <- db & \DBLayer{..} ->
withExceptT ErrSelectCoinsExternalNoSuchWallet $
mapExceptT atomically $ do
cp <- withNoSuchWallet wid $ readCheckpoint $ PrimaryKey wid
(cs', s') <- assignChangeAddresses argGenChange cs (getState cp)
putCheckpoint (PrimaryKey wid) (updateState s' cp)
pure cs'
UnsignedTx
<$> ensureNonEmpty (inputs cs') ErrSelectCoinsExternalUnableToAssignInputs
<*> ensureNonEmpty (outputs cs') ErrSelectCoinsExternalUnableToAssignOutputs
where
db = ctx ^. dbLayer @s @k
ensureNonEmpty
:: forall a. [a]
-> (WalletId -> ErrSelectCoinsExternal e)
-> ExceptT (ErrSelectCoinsExternal e) IO (NonEmpty a)
ensureNonEmpty mxs err = case NE.nonEmpty mxs of
Nothing -> throwE $ err wid
Just xs -> pure xs
data ErrSelectCoinsExternal e
= ErrSelectCoinsExternalNoSuchWallet ErrNoSuchWallet
| ErrSelectCoinsExternalUnableToMakeSelection (ErrSelectForPayment e)
| ErrSelectCoinsExternalUnableToAssignInputs WalletId
| ErrSelectCoinsExternalUnableToAssignOutputs WalletId
deriving (Eq, Show)
signDelegation
:: forall ctx s t k.
( HasTransactionLayer t k ctx
, HasDBLayer s k ctx
, HasNetworkLayer t ctx
, IsOwned s k
, GenChange s
, HardDerivation k
, AddressIndexDerivationType k ~ 'Soft
)
=> ctx
-> WalletId
-> ArgGenChange s
-> Passphrase "raw"
-> CoinSelection
-> DelegationAction
-> ExceptT ErrSignDelegation IO (Tx, TxMeta, UTCTime, SealedTx)
signDelegation ctx wid argGenChange pwd coinSel action = db & \DBLayer{..} -> do
nodeTip <- withExceptT ErrSignDelegationNetwork $ currentNodeTip nl
withRootKey @_ @s ctx wid pwd ErrSignDelegationWithRootKey $ \xprv scheme -> do
let pwdP = preparePassphrase scheme pwd
mapExceptT atomically $ do
cp <- withExceptT ErrSignDelegationNoSuchWallet $ withNoSuchWallet wid $
readCheckpoint (PrimaryKey wid)
(coinSel', s') <- assignChangeAddresses argGenChange coinSel (getState cp)
withExceptT ErrSignDelegationNoSuchWallet $
putCheckpoint (PrimaryKey wid) (updateState s' cp)
let rewardAcnt = deriveRewardAccount @k pwdP xprv
let keyFrom = isOwned (getState cp) (xprv, pwdP)
(tx, sealedTx) <- withExceptT ErrSignDelegationMkTx $ ExceptT $ pure $
case action of
RegisterKeyAndJoin poolId ->
mkDelegationJoinTx tl poolId
(rewardAcnt, pwdP)
keyFrom
(nodeTip ^. #slotNo)
coinSel'
Join poolId ->
mkDelegationJoinTx tl poolId
(rewardAcnt, pwdP)
keyFrom
(nodeTip ^. #slotNo)
coinSel'
Quit ->
mkDelegationQuitTx tl
(rewardAcnt, pwdP)
keyFrom
(nodeTip ^. #slotNo)
coinSel'
(time, meta) <- liftIO $
mkTxMeta ti (currentTip cp) s' coinSel'
return (tx, meta, time, sealedTx)
where
ti :: TimeInterpreter IO
ti = timeInterpreter nl
db = ctx ^. dbLayer @s @k
tl = ctx ^. transactionLayer @t @k
nl = ctx ^. networkLayer @t
-- | Construct transaction metadata from a current block header and a list
-- of input and output.
mkTxMeta
:: (IsOurs s Address, Monad m)
=> TimeInterpreter m
-> BlockHeader
-> s
-> CoinSelection
-> m (UTCTime, TxMeta)
mkTxMeta interpretTime blockHeader wState cs =
let
amtOuts =
sum (mapMaybe ourCoins (outputs cs))
amtInps = fromIntegral $
sum (getCoin . coin . snd <$> (inputs cs))
+ withdrawal cs
+ reclaim cs
in do
t <- slotStartTime' (blockHeader ^. #slotNo)
return
( t
, TxMeta
{ status = Pending
, direction = Outgoing
, slotNo = blockHeader ^. #slotNo
, blockHeight = blockHeader ^. #blockHeight
, amount = Quantity (amtInps - amtOuts)
}
)
where
slotStartTime' = interpretTime . startTime
ourCoins :: TxOut -> Maybe Natural
ourCoins (TxOut addr (Coin val)) =
if fst (isOurs addr wState)
then Just (fromIntegral val)
else Nothing
-- | Broadcast a (signed) transaction to the network.
submitTx
:: forall ctx s t k.
( HasNetworkLayer t ctx
, HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> (Tx, TxMeta, SealedTx)
-> ExceptT ErrSubmitTx IO ()
submitTx ctx wid (tx, meta, binary) = db & \DBLayer{..} -> do
withExceptT ErrSubmitTxNetwork $ postTx nw binary
mapExceptT atomically $ withExceptT ErrSubmitTxNoSuchWallet $
putTxHistory (PrimaryKey wid) [(tx, meta)]
where
db = ctx ^. dbLayer @s @k
nw = ctx ^. networkLayer @t
-- | Broadcast an externally-signed transaction to the network.
submitExternalTx
:: forall ctx t k.
( HasNetworkLayer t ctx
, HasTransactionLayer t k ctx
)
=> ctx
-> ByteString
-> ExceptT ErrSubmitExternalTx IO Tx
submitExternalTx ctx bytes = do
(tx,binary) <- withExceptT ErrSubmitExternalTxDecode $ except $
decodeSignedTx tl bytes
withExceptT ErrSubmitExternalTxNetwork $ postTx nw binary
return tx
where
nw = ctx ^. networkLayer @t
tl = ctx ^. transactionLayer @t @k
-- | Forget pending transaction.
forgetPendingTx
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> Hash "Tx"
-> ExceptT ErrRemovePendingTx IO ()
forgetPendingTx ctx wid tid = db & \DBLayer{..} -> do
mapExceptT atomically $ removePendingTx (PrimaryKey wid) tid
where
db = ctx ^. dbLayer @s @k
-- | List all transactions and metadata from history for a given wallet.
listTransactions
:: forall ctx s k t.
( HasDBLayer s k ctx
, HasNetworkLayer t ctx
)
=> ctx
-> WalletId
-> Maybe (Quantity "lovelace" Natural)
-- Inclusive minimum value of at least one withdrawal in each transaction
-> Maybe UTCTime
-- Inclusive minimum time bound.
-> Maybe UTCTime
-- Inclusive maximum time bound.
-> SortOrder
-> ExceptT ErrListTransactions IO [TransactionInfo]
listTransactions ctx wid mMinWithdrawal mStart mEnd order = db & \DBLayer{..} -> do
when (Just True == ( (<(Quantity 1)) <$> mMinWithdrawal )) $
throwE ErrListTransactionsMinWithdrawalWrong
let pk = PrimaryKey wid
mapExceptT atomically $ do
mapExceptT liftIO getSlotRange >>= maybe
(pure [])
(\r -> lift (readTxHistory pk mMinWithdrawal order r Nothing))
where
ti = timeInterpreter $ ctx ^. networkLayer @t
db = ctx ^. dbLayer @s @k
-- Transforms the user-specified time range into a slot range. If the
-- user-specified range terminates before the start of the blockchain,
-- returns 'Nothing'.
getSlotRange
:: ExceptT ErrListTransactions IO (Maybe (Range SlotNo))
getSlotRange = case (mStart, mEnd) of
(Just start, Just end) | start > end -> do
let err = ErrStartTimeLaterThanEndTime start end
throwE (ErrListTransactionsStartTimeLaterThanEndTime err)
_ -> do
liftIO $ ti $ slotRangeFromTimeRange $ Range mStart mEnd
-- | Get transaction and metadata from history for a given wallet.
getTransaction
:: forall ctx s k. HasDBLayer s k ctx
=> ctx
-> WalletId
-> Hash "Tx"
-> ExceptT ErrGetTransaction IO TransactionInfo
getTransaction ctx wid tid = db & \DBLayer{..} -> do
let pk = PrimaryKey wid
res <- lift $ atomically $ runExceptT $ getTx pk tid
case res of
Left err -> do
throwE (ErrGetTransactionNoSuchWallet err)
Right Nothing -> do
let err' = ErrNoSuchTransaction tid
throwE (ErrGetTransactionNoSuchTransaction err')
Right (Just tx) ->
pure tx
where
db = ctx ^. dbLayer @s @k
{-------------------------------------------------------------------------------
Delegation
-------------------------------------------------------------------------------}
-- | Helper function to factor necessary logic for joining a stake pool.
joinStakePool
:: forall ctx s t k.
( HasDBLayer s k ctx
, HasLogger WalletLog ctx
, HasNetworkLayer t ctx
, HasTransactionLayer t k ctx
, IsOwned s k
, GenChange s
, HardDerivation k
, AddressIndexDerivationType k ~ 'Soft
)
=> ctx
-> W.EpochNo
-> [PoolId]
-> PoolId
-> PoolLifeCycleStatus
-> WalletId
-> ArgGenChange s
-> Passphrase "raw"
-> ExceptT ErrJoinStakePool IO (Tx, TxMeta, UTCTime)
joinStakePool ctx currentEpoch knownPools pid poolStatus wid argGenChange pwd =
db & \DBLayer{..} -> do
(isKeyReg, walMeta) <- mapExceptT atomically
$ withExceptT ErrJoinStakePoolNoSuchWallet
$ (,) <$> isStakeKeyRegistered (PrimaryKey wid)
<*> withNoSuchWallet wid (readWalletMeta (PrimaryKey wid))
let mRetirementEpoch = view #retiredIn <$>
W.getPoolRetirementCertificate poolStatus
let retirementInfo =
PoolRetirementEpochInfo currentEpoch <$> mRetirementEpoch
withExceptT ErrJoinStakePoolCannotJoin $ except $
guardJoin knownPools (walMeta ^. #delegation) pid retirementInfo
let action = if isKeyReg then Join pid else RegisterKeyAndJoin pid
liftIO $ traceWith tr $ MsgIsStakeKeyRegistered isKeyReg
selection <- withExceptT ErrJoinStakePoolSelectCoin $
selectCoinsForDelegation @ctx @s @t @k ctx wid action
(tx, txMeta, txTime, sealedTx) <-
withExceptT ErrJoinStakePoolSignDelegation $
signDelegation
@ctx @s @t @k ctx wid argGenChange pwd selection action
withExceptT ErrJoinStakePoolSubmitTx $
submitTx @ctx @s @t @k ctx wid (tx, txMeta, sealedTx)
pure (tx, txMeta, txTime)
where
db = ctx ^. dbLayer @s @k
tr = ctx ^. logger
-- | Helper function to factor necessary logic for quitting a stake pool.
quitStakePool
:: forall ctx s t k.
( HasDBLayer s k ctx
, HasLogger WalletLog ctx
, HasNetworkLayer t ctx
, HasTransactionLayer t k ctx
, IsOwned s k
, GenChange s
, HardDerivation k
, AddressIndexDerivationType k ~ 'Soft
)
=> ctx
-> WalletId
-> ArgGenChange s
-> Passphrase "raw"
-> ExceptT ErrQuitStakePool IO (Tx, TxMeta, UTCTime)
quitStakePool ctx wid argGenChange pwd = db & \DBLayer{..} -> do
walMeta <- mapExceptT atomically $ withExceptT ErrQuitStakePoolNoSuchWallet $
withNoSuchWallet wid $ readWalletMeta (PrimaryKey wid)
rewards <- liftIO $ fetchRewardBalance @ctx @s @k ctx wid
withExceptT ErrQuitStakePoolCannotQuit $ except $
guardQuit (walMeta ^. #delegation) rewards
let action = Quit
selection <- withExceptT ErrQuitStakePoolSelectCoin $
selectCoinsForDelegation @ctx @s @t @k ctx wid action
(tx, txMeta, txTime, sealedTx) <- withExceptT ErrQuitStakePoolSignDelegation $
signDelegation @ctx @s @t @k ctx wid argGenChange pwd selection action
withExceptT ErrQuitStakePoolSubmitTx $
submitTx @ctx @s @t @k ctx wid (tx, txMeta, sealedTx)
pure (tx, txMeta, txTime)
where
db = ctx ^. dbLayer @s @k
{-------------------------------------------------------------------------------
Fee Estimation
-------------------------------------------------------------------------------}
-- | Result of a fee estimation process given a wallet and payment order.
data FeeEstimation = FeeEstimation
{ estMinFee :: Word64
-- ^ Most coin selections will result in a fee higher than this.
, estMaxFee :: Word64
-- ^ Most coin selections will result in a fee lower than this.
} deriving (Show, Eq)
-- | Estimate the transaction fee for a given coin selection algorithm by
-- repeatedly running it (100 times) and collecting the results. In the returned
-- 'FeeEstimation', the minimum fee is that which 90% of the sampled fees are
-- greater than. The maximum fee is the highest fee observed in the samples.
estimateFeeForCoinSelection
:: forall m err. Monad m
=> ExceptT err m Fee
-> ExceptT err m FeeEstimation
estimateFeeForCoinSelection
= fmap deciles
. handleErrors
. replicateM repeats
. runExceptT
. fmap getFee
where
-- Use method R-8 from to get top 90%.
-- https://en.wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
deciles = mkFeeEstimation
. map round
. V.toList
. quantiles medianUnbiased (V.fromList [1, 10]) 10
. V.fromList
. map fromIntegral
mkFeeEstimation [a,b] = FeeEstimation a b
mkFeeEstimation _ = error "estimateFeeForCoinSelection: impossible"
-- Remove failed coin selections from samples. Unless they all failed, in
-- which case pass on the error.
handleErrors :: m [Either err a] -> ExceptT err m [a]
handleErrors = ExceptT . fmap skipFailed
where
skipFailed samples = case partitionEithers samples of
([], []) ->
error "estimateFeeForCoinSelection: impossible empty list"
((e:_), []) ->
Left e
(_, samples') ->
Right samples'
repeats = 100 -- TODO: modify repeats based on data
{-------------------------------------------------------------------------------
Key Store
-------------------------------------------------------------------------------}
-- | The password here undergoes PBKDF2 encryption using HMAC
-- with the hash algorithm SHA512 which is realized in encryptPassphare
attachPrivateKeyFromPwd
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> (k 'RootK XPrv, Passphrase "encryption")
-> ExceptT ErrNoSuchWallet IO ()
attachPrivateKeyFromPwd ctx wid (xprv, pwd) = db & \DBLayer{..} -> do
hpwd <- liftIO $ encryptPassphrase pwd
-- NOTE Only new wallets are constructed through this function, so the
-- passphrase is encrypted with the new scheme (i.e. PBKDF2)
--
-- We do an extra sanity check after having encrypted the passphrase: we
-- tried to avoid some programmer mistakes with the phantom types on
-- Passphrase, but it's still possible that someone would inadvertently call
-- this function with a 'Passphrase' that wasn't prepared for
-- 'EncryptWithPBKDF2', if this happens, this is a programmer error and we
-- must fail hard for this would have dramatic effects later on.
case checkPassphrase EncryptWithPBKDF2 (coerce pwd) hpwd of
Right () -> attachPrivateKey db wid (xprv, hpwd) EncryptWithPBKDF2
Left{} -> fail
"Awe crap! The passphrase given to 'attachPrivateKeyFromPwd' wasn't \
\rightfully constructed. This is a programmer error. Look for calls \
\to this function and make sure that the given Passphrase wasn't not \
\prepared using 'EncryptWithScrypt'!"
where
db = ctx ^. dbLayer @s @k
-- | The hash here is the output of Scrypt function with the following parameters:
-- - logN = 14
-- - r = 8
-- - p = 1
-- - bytesNumber = 64
attachPrivateKeyFromPwdHash
:: forall ctx s k.
( HasDBLayer s k ctx
)
=> ctx
-> WalletId
-> (k 'RootK XPrv, Hash "encryption")
-> ExceptT ErrNoSuchWallet IO ()
attachPrivateKeyFromPwdHash ctx wid (xprv, hpwd) = db & \DBLayer{..} ->
-- NOTE Only legacy wallets are imported through this function, passphrase
-- were encrypted with the legacy scheme (Scrypt).
attachPrivateKey db wid (xprv, hpwd) EncryptWithScrypt
where
db = ctx ^. dbLayer @s @k
attachPrivateKey
:: DBLayer IO s k
-> WalletId
-> (k 'RootK XPrv, Hash "encryption")
-> PassphraseScheme
-> ExceptT ErrNoSuchWallet IO ()
attachPrivateKey db wid (xprv, hpwd) scheme = db & \DBLayer{..} -> do
now <- liftIO getCurrentTime
mapExceptT atomically $ do
putPrivateKey (PrimaryKey wid) (xprv, hpwd)
meta <- withNoSuchWallet wid $ readWalletMeta (PrimaryKey wid)
let modify x = x
{ passphraseInfo = Just $ WalletPassphraseInfo
{ lastUpdatedAt = now
, passphraseScheme = scheme
}
}
putWalletMeta (PrimaryKey wid) (modify meta)
-- | Execute an action which requires holding a root XPrv.
--
-- 'withRootKey' takes a callback function with two arguments:
--
-- - The encrypted root private key itself
-- - The underlying passphrase scheme (legacy or new)
--
-- Caller are then expected to use 'preparePassphrase' with the given scheme in
-- order to "prepare" the passphrase to be used by other function. This does
-- nothing for the new encryption, but for the legacy encryption with Scrypt,
-- passphrases needed to first be CBOR serialized and blake2b_256 hashed.
--
-- @@@
-- withRootKey @ctx @s @k ctx wid pwd OnError $ \xprv scheme ->
-- changePassphrase (preparePassphrase scheme pwd) newPwd xprv
-- @@@
withRootKey
:: forall ctx s k e a. HasDBLayer s k ctx
=> ctx
-> WalletId
-> Passphrase "raw"
-> (ErrWithRootKey -> e)
-> (k 'RootK XPrv -> PassphraseScheme -> ExceptT e IO a)
-> ExceptT e IO a
withRootKey ctx wid pwd embed action = db & \DBLayer{..} -> do
(xprv, scheme) <- withExceptT embed $ mapExceptT atomically $ do
mScheme <- (>>= (fmap passphraseScheme . passphraseInfo)) <$>
lift (readWalletMeta $ PrimaryKey wid)
mXPrv <- lift $ readPrivateKey $ PrimaryKey wid
case (mXPrv, mScheme) of
(Just (xprv, hpwd), Just scheme) -> do
withExceptT (ErrWithRootKeyWrongPassphrase wid) $ ExceptT $
return $ checkPassphrase scheme pwd hpwd
return (xprv, scheme)
_ ->
throwE $ ErrWithRootKeyNoRootKey wid
action xprv scheme
where
db = ctx ^. dbLayer @s @k
{-------------------------------------------------------------------------------
Errors
-------------------------------------------------------------------------------}
data ErrUTxOTooSmall
= ErrUTxOTooSmall Word64 [Word64]
-- ^ UTxO(s) participating in transaction are too small to make transaction
-- that will be accepted by node.
-- We record what minimum UTxO value and all outputs/change less than this value
deriving (Show, Eq)
-- | Errors that can occur when creating an unsigned transaction.
data ErrSelectForPayment e
= ErrSelectForPaymentNoSuchWallet ErrNoSuchWallet
| ErrSelectForPaymentCoinSelection (ErrCoinSelection e)
| ErrSelectForPaymentFee ErrAdjustForFee
| ErrSelectForPaymentMinimumUTxOValue ErrUTxOTooSmall
deriving (Show, Eq)
-- | Errors that can occur when listing UTxO statistics.
newtype ErrListUTxOStatistics
= ErrListUTxOStatisticsNoSuchWallet ErrNoSuchWallet
deriving (Show, Eq)
-- | Errors that can occur when signing a transaction.
data ErrSignPayment
= ErrSignPaymentMkTx ErrMkTx
| ErrSignPaymentNoSuchWallet ErrNoSuchWallet
| ErrSignPaymentWithRootKey ErrWithRootKey
| ErrSignPaymentNetwork ErrCurrentNodeTip
deriving (Show, Eq)
-- | Errors that can occur when submitting a signed transaction to the network.
data ErrSubmitTx
= ErrSubmitTxNetwork ErrPostTx
| ErrSubmitTxNoSuchWallet ErrNoSuchWallet
deriving (Show, Eq)
-- | Errors that can occur when submitting an externally-signed transaction
-- to the network.
data ErrSubmitExternalTx
= ErrSubmitExternalTxNetwork ErrPostTx
| ErrSubmitExternalTxDecode ErrDecodeSignedTx
deriving (Show, Eq)
-- | Errors that can occur when trying to change a wallet's passphrase.
data ErrUpdatePassphrase
= ErrUpdatePassphraseNoSuchWallet ErrNoSuchWallet
| ErrUpdatePassphraseWithRootKey ErrWithRootKey
deriving (Show, Eq)
-- | Errors that can occur when trying to perform an operation on a wallet that
-- requires a private key, but where none is attached to the wallet.
data ErrWithRootKey
= ErrWithRootKeyNoRootKey WalletId
| ErrWithRootKeyWrongPassphrase WalletId ErrWrongPassphrase
deriving (Show, Eq)
-- | Errors that can occur when trying to list transactions.
data ErrListTransactions
= ErrListTransactionsNoSuchWallet ErrNoSuchWallet
| ErrListTransactionsStartTimeLaterThanEndTime ErrStartTimeLaterThanEndTime
| ErrListTransactionsMinWithdrawalWrong
deriving (Show, Eq)
-- | Errors that can occur when trying to get transaction.
data ErrGetTransaction
= ErrGetTransactionNoSuchWallet ErrNoSuchWallet
| ErrGetTransactionNoSuchTransaction ErrNoSuchTransaction
deriving (Show, Eq)
-- | Indicates that the specified transaction hash is not found.
newtype ErrNoSuchTransaction = ErrNoSuchTransaction (Hash "Tx")
deriving (Show, Eq)
-- | Indicates that the specified start time is later than the specified end
-- time.
data ErrStartTimeLaterThanEndTime = ErrStartTimeLaterThanEndTime
{ errStartTime :: UTCTime
, errEndTime :: UTCTime
} deriving (Show, Eq)
-- | Errors that can occur when creating unsigned delegation certificate
-- transaction.
data ErrSelectForDelegation
= ErrSelectForDelegationNoSuchWallet ErrNoSuchWallet
| ErrSelectForDelegationFee ErrAdjustForFee
deriving (Show, Eq)
-- | Errors that can occur when signing a delegation certificate.
data ErrSignDelegation
= ErrSignDelegationNoSuchWallet ErrNoSuchWallet
| ErrSignDelegationWithRootKey ErrWithRootKey
| ErrSignDelegationMkTx ErrMkTx
| ErrSignDelegationNetwork ErrCurrentNodeTip
deriving (Show, Eq)
data ErrJoinStakePool
= ErrJoinStakePoolNoSuchWallet ErrNoSuchWallet
| ErrJoinStakePoolSelectCoin ErrSelectForDelegation
| ErrJoinStakePoolSignDelegation ErrSignDelegation
| ErrJoinStakePoolSubmitTx ErrSubmitTx
| ErrJoinStakePoolCannotJoin ErrCannotJoin
deriving (Generic, Eq, Show)
data ErrQuitStakePool
= ErrQuitStakePoolNoSuchWallet ErrNoSuchWallet
| ErrQuitStakePoolSelectCoin ErrSelectForDelegation
| ErrQuitStakePoolSignDelegation ErrSignDelegation
| ErrQuitStakePoolSubmitTx ErrSubmitTx
| ErrQuitStakePoolCannotQuit ErrCannotQuit
deriving (Generic, Eq, Show)
-- | Errors that can occur when fetching the reward balance of a wallet
data ErrFetchRewards
= ErrFetchRewardsNetworkUnreachable ErrNetworkUnavailable
| ErrFetchRewardsNoSuchWallet ErrNoSuchWallet
deriving (Generic, Eq, Show)
data ErrSelectForMigration
= ErrSelectForMigrationNoSuchWallet ErrNoSuchWallet
| ErrSelectForMigrationEmptyWallet WalletId
-- ^ User attempted to migrate an empty wallet
deriving (Eq, Show)
data ErrCheckWalletIntegrity
= ErrCheckWalletIntegrityNoSuchWallet ErrNoSuchWallet
| ErrCheckIntegrityDifferentGenesis (Hash "Genesis") (Hash "Genesis")
deriving (Eq, Show)
instance Exception ErrCheckWalletIntegrity
data ErrCannotJoin
= ErrAlreadyDelegating PoolId
| ErrNoSuchPool PoolId
deriving (Generic, Eq, Show)
data ErrCannotQuit
= ErrNotDelegatingOrAboutTo
| ErrNonNullRewards (Quantity "lovelace" Word64)
deriving (Generic, Eq, Show)
-- | Can't perform given operation because the wallet died.
newtype ErrWalletNotResponding
= ErrWalletNotResponding WalletId
deriving (Eq, Show)
data ErrCreateRandomAddress
= ErrIndexAlreadyExists (Index 'Hardened 'AddressK)
| ErrCreateAddrNoSuchWallet ErrNoSuchWallet
| ErrCreateAddrWithRootKey ErrWithRootKey
| ErrCreateAddressNotAByronWallet
deriving (Generic, Eq, Show)
data ErrImportRandomAddress
= ErrImportAddrNoSuchWallet ErrNoSuchWallet
| ErrImportAddrDoesNotBelong
| ErrImportAddressNotAByronWallet
deriving (Generic, Eq, Show)
data ErrNotASequentialWallet
= ErrNotASequentialWallet
deriving (Generic, Eq, Show)
{-------------------------------------------------------------------------------
Utils
-------------------------------------------------------------------------------}
withNoSuchWallet
:: Monad m
=> WalletId
-> m (Maybe a)
-> ExceptT ErrNoSuchWallet m a
withNoSuchWallet wid =
maybeToExceptT (ErrNoSuchWallet wid) . MaybeT
data PoolRetirementEpochInfo = PoolRetirementEpochInfo
{ currentEpoch
:: W.EpochNo
-- ^ The current epoch.
, retirementEpoch
:: W.EpochNo
-- ^ The retirement epoch of a pool.
}
deriving (Eq, Generic, Show)
guardJoin
:: [PoolId]
-> WalletDelegation
-> PoolId
-> Maybe PoolRetirementEpochInfo
-> Either ErrCannotJoin ()
guardJoin knownPools delegation pid mRetirementEpochInfo = do
when (pid `notElem` knownPools) $
Left (ErrNoSuchPool pid)
forM_ mRetirementEpochInfo $ \info ->
when (currentEpoch info >= retirementEpoch info) $
Left (ErrNoSuchPool pid)
when ((null next) && isDelegatingTo (== pid) active) $
Left (ErrAlreadyDelegating pid)
when (not (null next) && isDelegatingTo (== pid) (last next)) $
Left (ErrAlreadyDelegating pid)
where
WalletDelegation {active, next} = delegation
guardQuit
:: WalletDelegation
-> Quantity "lovelace" Word64
-> Either ErrCannotQuit ()
guardQuit WalletDelegation{active,next} rewards = do
let last_ = maybe active (view #status) $ lastMay next
unless (isDelegatingTo anyone last_) $
Left ErrNotDelegatingOrAboutTo
unless (rewards == Quantity 0) $
Left $ ErrNonNullRewards rewards
where
anyone = const True
guardCoinSelection
:: Coin
-> CoinSelection
-> Either ErrUTxOTooSmall ()
guardCoinSelection minUtxoValue cs@CoinSelection{outputs, change} = do
when (cs == mempty) $
Right ()
let outputCoins = map (\(TxOut _ c) -> c) outputs
let invalidTxOuts =
filter (< minUtxoValue) (outputCoins ++ change)
unless (L.null invalidTxOuts) $
Left (ErrUTxOTooSmall (getCoin minUtxoValue) (getCoin <$> invalidTxOuts))
{-------------------------------------------------------------------------------
Logging
-------------------------------------------------------------------------------}
data WalletLog
= MsgTryingRollback SlotNo
| MsgRolledBack SlotNo
| MsgFollow FollowLog
| MsgDelegation SlotNo DelegationCertificate
| MsgCheckpoint BlockHeader
| MsgWalletMetadata WalletMetadata
| MsgSyncProgress SyncProgress
| MsgDiscoveredTxs [(Tx, TxMeta)]
| MsgDiscoveredTxsContent [(Tx, TxMeta)]
| MsgTip BlockHeader
| MsgBlocks (NonEmpty Block)
| MsgDelegationCoinSelection CoinSelection
| MsgIsStakeKeyRegistered Bool
| MsgPaymentCoinSelectionStart W.UTxO W.TxParameters (NonEmpty TxOut)
| MsgPaymentCoinSelection CoinSelection
| MsgPaymentCoinSelectionAdjusted CoinSelection
| MsgRewardBalanceQuery BlockHeader
| MsgRewardBalanceResult (Either ErrFetchRewards (Quantity "lovelace" Word64))
| MsgRewardBalanceNoSuchWallet ErrNoSuchWallet
| MsgRewardBalanceExited
deriving (Show, Eq)
instance ToText WalletLog where
toText = \case
MsgTryingRollback point ->
"Try rolling back to " <> pretty point
MsgRolledBack point ->
"Rolled back to " <> pretty point
MsgFollow msg ->
toText msg
MsgDelegation slotNo cert -> case cert of
CertDelegateNone{} -> mconcat
[ "Discovered end of delegation within slot "
, pretty slotNo
]
CertDelegateFull{} -> mconcat
[ "Discovered delegation to pool "
, pretty (dlgCertPoolId cert)
, " within slot "
, pretty slotNo
]
CertRegisterKey {} -> mconcat
[ "Discovered stake key registration "
, " within slot "
, pretty slotNo
]
MsgCheckpoint checkpointTip ->
"Creating checkpoint at " <> pretty checkpointTip
MsgWalletMetadata meta ->
pretty meta
MsgSyncProgress progress ->
"syncProgress: " <> pretty progress
MsgDiscoveredTxs txs ->
"discovered " <> pretty (length txs) <> " new transaction(s)"
MsgDiscoveredTxsContent txs ->
"transactions: " <> pretty (blockListF (snd <$> txs))
MsgTip tip ->
"local tip: " <> pretty tip
MsgBlocks blocks ->
"blocks: " <> pretty (NE.toList blocks)
MsgDelegationCoinSelection sel ->
"Coins selected for delegation: \n" <> pretty sel
MsgIsStakeKeyRegistered True ->
"Wallet stake key is registered. Will not register it again."
MsgIsStakeKeyRegistered False ->
"Wallet stake key is not registered. Will register..."
MsgPaymentCoinSelectionStart utxo _txp recipients ->
"Starting coin selection " <>
"|utxo| = "+|Map.size (getUTxO utxo)|+" " <>
"#recipients = "+|NE.length recipients|+""
MsgPaymentCoinSelection sel ->
"Coins selected for payment: \n" <> pretty sel
MsgPaymentCoinSelectionAdjusted sel ->
"Coins after fee adjustment: \n" <> pretty sel
MsgRewardBalanceQuery bh ->
"Updating the reward balance for block " <> pretty bh
MsgRewardBalanceResult (Right amt) ->
"The reward balance is " <> pretty amt
MsgRewardBalanceNoSuchWallet err ->
"Trying to store a balance for a wallet that doesn't exist (yet?): " <>
T.pack (show err)
MsgRewardBalanceResult (Left err) ->
"Problem fetching reward balance. Will try again on next chain update. " <>
T.pack (show err)
MsgRewardBalanceExited ->
"Reward balance worker has exited."
instance HasPrivacyAnnotation WalletLog
instance HasSeverityAnnotation WalletLog where
getSeverityAnnotation = \case
MsgTryingRollback _ -> Info
MsgRolledBack _ -> Info
MsgFollow msg -> getSeverityAnnotation msg
MsgDelegation _ _ -> Info
MsgCheckpoint _ -> Info
MsgWalletMetadata _ -> Info
MsgSyncProgress _ -> Info
MsgDiscoveredTxs _ -> Info
MsgDiscoveredTxsContent _ -> Debug
MsgTip _ -> Info
MsgBlocks _ -> Debug
MsgDelegationCoinSelection _ -> Debug
MsgPaymentCoinSelectionStart{} -> Debug
MsgPaymentCoinSelection _ -> Debug
MsgPaymentCoinSelectionAdjusted _ -> Debug
MsgIsStakeKeyRegistered _ -> Info
MsgRewardBalanceQuery _ -> Debug
MsgRewardBalanceResult (Right _) -> Debug
MsgRewardBalanceResult (Left _) -> Notice
MsgRewardBalanceNoSuchWallet{} -> Warning
MsgRewardBalanceExited -> Notice
|
module Equis.Solver
import Matrix.Numeric
import Data.Matrix.Numeric
%access private
||| Solves a system of linear equations.
|||
||| @coeff the matrix of coefficients
||| @sols the solutions
export
solve : Neg a => Fractional a => (coeff : Matrix (S n) (S n) a) -> (sols : Vect (S n) a) -> Vect (S n) a
solve coeff sols = (inverse coeff) </> sols
|
We have been blessed by God to be a part of an organization of women who for 145 years have used their hands to turn their faith, hope and love into action on behalf of women, children and youth. Today I challenge you to examine what’s in your hands, reflect on United Methodist Women’s next 100 years of mission in Jesus’ name—then make it happen. Use your hands to invite women you know to expand their concepts of Christian mission at Mission u. Use your hands to push a United Methodist Women member to stir up her gifts at Leadership Development Days. Use your hands to support women, children and youth who face injustice.
I believe this applies to United Methodist Women today as well. So what’s in our hands?
I know what’s in our hands: faith for seeking spiritual growth personally and guidance for moving forward in mission. What’s in our hands is hope for making the world more just and peaceful for women, children and youth by training new leaders, expanding educational opportunities and working with United Methodist and other like-minded partners. What’s in our hands is love from God for reaching out to sisters and brothers.
We have an awesome opportunity to hone our skills and educate ourselves for informed action at our annual Mission u events. This year our spiritual growth study theme is “How Is It With Your Soul?” The text by Priscilla Pope-Levison and Jack Levison explores this question that is central to the history and development of The United Methodist Church. For the second and final year we will learn about the Roma of Europe in our geographic study. And this year marks the first of our two-year issue study on the church and people with disabilities.
I’m particularly looking forward to The Church and People With Disabilities. My niece has apraxia, a speech disorder, and she lives not letting the disability stop her from achieving any of her goals. She loves to read, so I’m always picking up Reading Program and other United Methodist Women resources for her to read. Also, one of the most awesome opportunities I’ve ever had was to serve as a Special Olympics cheerleading coach.
United Methodist Women, examine what’s in your hands. Then use your gifts, talents and giving to make United Methodist Women a stronger and more effective agent in God’s mission.
This entry was posted in From the President, Mission u. Bookmark the permalink. |
module NextGenSeqUtils
include("include_all.jl")
export
# align.jl
usearch_filter,
usearch_trim_fastq_with_phreds,
nw_align,
banded_nw_align,
triplet_nw_align,
local_align,
kmer_seeded_align,
triplet_kmer_seeded_align,
loc_kmer_seeded_align,
local_kmer_seeded_align,
kmer_seeded_edit_dist,
resolve_alignments,
align_reference_frames,
local_edit_dist,
# hmm.jl
viterbi_logs,
trans_mat,
obs_mat,
initial_dist,
get_obs_given_state,
homopolymer_filter,
markov_filter,
forward_logs,
backward_logs,
forward_backward_logs,
logsum,
gen_seq_with_model,
viterbiprint,
# io.jl
read_fasta,
read_fasta_with_names,
read_fasta_with_names_in_other_order,
write_fasta,
read_fastq,
write_fastq,
# kmers.jl
KmerType,
kmer_count,
sparse_aa_kmer_count,
corrected_kmer_dist,
corrected_kmer_dist_full,
# orient.jl
orient_strands,
orient_to_refs,
orient_to_refs_file,
# paths.jl
Paths,
PATHS,
# phreds.jl
Phred,
Prob,
LogProb,
MIN_PHRED,
MAX_PHRED,
phred_to_log_p,
phred_to_p,
p_to_phred,
error_probs_to_phreds,
quality_filter,
length_vs_qual,
qual_hist,
# simulation.jl
simple_gen_seq,
simple_evolve,
fixed_diff_evolve,
pb_seq_sim,
env_pb_seq_sim,
# utils.jl
print_fasta,
degap,
dash_count,
single_gap,
single_mod_three_gap,
seq_details,
print_rgb,
reverse_complement,
print_diffs,
trim_ends_indices,
translate_to_aa,
generate_aa_seqs,
filter_by_length,
length_filter,
concat_fastas,
maxfreq,
freq,
sorted_freqs,
freq_dict_print,
nl43env,
dist_matrix,
# demux.jl
demux_fastx,
IUPAC_equals,
toIUPACnum,
#FAD.jl
FAD,
#evodist
estimate_distance
end # module
|
If $f$ is analytic on $S$, then $f^{(n)}$ is analytic on $S$. |
// Copyright Oliver Kowalke 2009.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_CONTEXT_ALL_H
#define BOOST_CONTEXT_ALL_H
#include <boost/context/fcontext.hpp>
#endif // BOOST_CONTEXT_ALL_H
|
(** * Autosubst Header for Scoped Syntax
Our development utilises well-scoped de Bruijn syntax. This means that the de Bruijn indices are taken from finite types. As a consequence, any kind of substitution or environment used in conjunction with well-scoped syntax takes the form of a mapping from some finite type _I^n_. In particular, _renamings_ are mappings _I^n -> I^m_. Here we develop the theory of how these parts interact.
Version: December 11, 2019.
*)
From cbpv Require Export axioms.
Set Implicit Arguments.
Unset Strict Implicit.
Definition ap {X Y} (f : X -> Y) {x y : X} (p : x = y) : f x = f y :=
match p with eq_refl => eq_refl end.
Definition apc {X Y} {f g : X -> Y} {x y : X} (p : f = g) (q : x = y) : f x = g y :=
match q with eq_refl => match p with eq_refl => eq_refl end end.
(** ** Primitives of the Sigma Calculus
We implement the finite type with _n_ elements, _I^n_, as the _n_-fold iteration of the Option Type. _I^0_ is implemented as the empty type.
*)
Fixpoint fin (n : nat) : Type :=
match n with
| 0 => False
| S m => option (fin m)
end.
(** Renamings and Injective Renamings
_Renamings_ are mappings between finite types.
*)
Definition ren (m n : nat) : Type := fin m -> fin n.
Definition id {X} (x : X) := x.
Definition idren {k: nat} : ren k k :=
fun x => x.
(** We give a special name, to the newest element in a non-empty finite type, as it usually corresponds to a freshly bound variable. *)
Definition var_zero {n : nat} : fin (S n) := None.
Definition null {T} (i : fin 0) : T := match i with end.
Definition shift {n : nat} : ren n (S n) :=
Some.
Definition comp := @funcomp.
(** Extension of Finite Mappings
Assume we are given a mapping _f_ from _I^n_ to some type _X_, then we can _extend_ this mapping with a new value from _x : X_ to a mapping from _I^n+1_ to _X_. We denote this operation by _x . f_ and define it as follows:
*)
Definition scons {X : Type} {n : nat} (x : X) (f : fin n -> X) (m : fin (S n)) : X :=
match m with
| None => x
| Some i => f i
end.
(** ** Type Class Instances for Notation *)
(** *** Type classes for renamings. *)
Class Ren1 (X1 : Type) (Y Z : Type) :=
ren1 : X1 -> Y -> Z.
Class Ren2 (X1 X2 : Type) (Y Z : Type) :=
ren2 : X1 -> X2 -> Y -> Z.
Class Ren3 (X1 X2 X3 : Type) (Y Z : Type) :=
ren3 : X1 -> X2 -> X3 -> Y -> Z.
Class Ren4 (X1 X2 X3 X4 : Type) (Y Z : Type) :=
ren4 : X1 -> X2 -> X3 -> X4 -> Y -> Z.
Class Ren5 (X1 X2 X3 X4 X5 : Type) (Y Z : Type) :=
ren5 : X1 -> X2 -> X3 -> X4 -> X5 -> Y -> Z.
Notation "s ⟨ xi1 ⟩" := (ren1 xi1 s) (at level 7, left associativity, format "s ⟨ xi1 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ⟩" := (ren2 xi1 xi2 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ; xi3 ⟩" := (ren3 xi1 xi2 xi3 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ; xi3 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ⟩" := (ren4 xi1 xi2 xi3 xi4 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ⟩") : subst_scope.
Notation "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ; xi5 ⟩" := (ren5 xi1 xi2 xi3 xi4 xi5 s) (at level 7, left associativity, format "s ⟨ xi1 ; xi2 ; xi3 ; xi4 ; xi5 ⟩") : subst_scope.
Notation "⟨ xi ⟩" := (ren1 xi) (at level 1, left associativity, format "⟨ xi ⟩") : fscope.
Notation "⟨ xi1 ; xi2 ⟩" := (ren2 xi1 xi2) (at level 1, left associativity, format "⟨ xi1 ; xi2 ⟩") : fscope.
(** *** Type Classes for Substiution *)
Class Subst1 (X1 : Type) (Y Z: Type) :=
subst1 : X1 -> Y -> Z.
Class Subst2 (X1 X2 : Type) (Y Z: Type) :=
subst2 : X1 -> X2 -> Y -> Z.
Class Subst3 (X1 X2 X3 : Type) (Y Z: Type) :=
subst3 : X1 -> X2 -> X3 -> Y -> Z.
Class Subst4 (X1 X2 X3 X4: Type) (Y Z: Type) :=
subst4 : X1 -> X2 -> X3 -> X4 -> Y -> Z.
Class Subst5 (X1 X2 X3 X4 X5 : Type) (Y Z: Type) :=
subst5 : X1 -> X2 -> X3 -> X4 -> X5 -> Y -> Z.
Notation "s [ sigma ]" := (subst1 sigma s) (at level 7, left associativity, format "s '/' [ sigma ]") : subst_scope.
Notation "s [ sigma ; tau ]" := (subst2 sigma tau s) (at level 7, left associativity, format "s '/' [ sigma ; '/' tau ]") : subst_scope.
(** ** Type Class for Variables *)
Class Var X Y :=
ids : X -> Y.
(** ** Proofs for substitution primitives *)
(** Forward Function Composition
Substitutions represented as functions are ubiquitious in this development and we often have to compose them, without talking about their pointwise behaviour.
That is, we are interested in the forward compostion of functions, _f o g_, for which we introduce a convenient notation, "f >> g". The direction of the arrow serves as a reminder of the _forward_ nature of this composition, that is first apply _f_, then _g_. *)
Arguments funcomp {X Y Z} (g)%fscope (f)%fscope.
Notation "f >> g" := (funcomp g f) (at level 50).
Open Scope subst_scope.
Notation "x .: f" := (@scons _ _ x f) (at level 55) : subst_scope.
(** Generic lifting operation for renamings *)
Definition up_ren m n (xi : ren m n) : ren (S m) (S n) :=
var_zero .: xi >> shift.
(** Generic proof that lifting of renamings composes. *)
Lemma up_ren_ren k l m (xi: ren k l) (zeta : ren l m) (rho: ren k m) (E: forall x, (xi >> zeta) x = rho x) :
forall x, (up_ren xi >> up_ren zeta) x = up_ren rho x.
Proof.
intros [x|].
- simpl. unfold funcomp. now rewrite <- E.
- reflexivity.
Qed.
Arguments up_ren_ren {k l m} xi zeta rho E.
Lemma scons_eta {T} {n : nat} (f : fin (S n) -> T) :
f var_zero .: shift >> f = f.
Proof. fext. intros [x|]; reflexivity. Qed.
Lemma scons_eta_id {n : nat} : var_zero .: shift = id :> (fin (S n) -> fin (S n)).
Proof. fext. intros [x|]; reflexivity. Qed.
Lemma scons_comp (T: Type) U {m} (s: T) (sigma: fin m -> T) (tau: T -> U ) :
(s .: sigma) >> tau = (tau s) .: (sigma >> tau) .
Proof.
fext. intros [x|]. reflexivity. simpl. reflexivity.
Qed.
Lemma fin_eta {X} (f g : fin 0 -> X) :
forall x, f x = g x.
Proof. intros []. Qed.
(** ** Variadic Substitution Primitives *)
Fixpoint shift_p (p : nat) {n} : ren n (p + n) :=
fun n => match p with
| 0 => n
| S p => Some (shift_p p n)
end.
Fixpoint scons_p {X: Type} {m : nat} : forall {n} (f : fin m -> X) (g : fin n -> X), fin (m + n) -> X.
Proof.
destruct m.
- intros n f g. exact g.
- intros n f g. cbn. apply scons.
+ exact (f var_zero).
+ apply scons_p.
* intros z. exact (f (Some z)).
* exact g.
Defined.
Definition zero_p {m : nat} {n} : fin m -> fin (m + n).
Proof.
induction m.
- intros [].
- intros [x|].
+ exact (shift_p 1 (IHm x)).
+ exact var_zero.
Defined.
Lemma scons_p_head' {X} {m n} (f : fin m -> X) (g : fin n -> X) z:
(scons_p f g) (zero_p z) = f z.
Proof.
induction m.
- inversion z.
- destruct z.
+ simpl. simpl. now rewrite IHm.
+ reflexivity.
Qed.
Lemma scons_p_head X m n (f : fin m -> X) (g : fin n -> X) :
(zero_p >> scons_p f g) = f.
Proof. fext. intros z. unfold funcomp. apply scons_p_head'. Qed.
Lemma scons_p_tail' X m n (f : fin m -> X) (g : fin n -> X) z :
scons_p f g (shift_p m z) = g z.
Proof. induction m; cbn; eauto. Qed.
Lemma scons_p_tail X m n (f : fin m -> X) (g : fin n -> X) :
shift_p m >> scons_p f g = g.
Proof. fext. intros z. unfold funcomp. apply scons_p_tail'. Qed.
Lemma destruct_fin {m n} (x : fin (m + n)):
(exists x', x = zero_p x') \/ exists x', x = shift_p m x'.
Proof.
induction m; simpl in *.
- right. eauto.
- destruct x as [x|].
+ destruct (IHm x) as [[x' ->] |[x' ->]].
* left. now exists (Some x').
* right. eauto.
+ left. exists None. eauto.
Qed.
Lemma scons_p_comp' X Y m n (f : fin m -> X) (g : fin n -> X) (h : X -> Y) x:
h (scons_p f g x) = scons_p (f >> h) (g >> h) x.
Proof.
destruct (destruct_fin x) as [[x' ->]|[x' ->]].
- now rewrite !scons_p_head'.
- now rewrite !scons_p_tail'.
Qed.
Lemma scons_p_comp {X Y m n} {f : fin m -> X} {g : fin n -> X} {h : X -> Y} :
(scons_p f g) >> h = scons_p (f >> h) (g >> h).
Proof. fext. intros z. unfold funcomp. apply scons_p_comp'. Qed.
Lemma scons_p_congr {X} {m n} (f f' : fin m -> X) (g g': fin n -> X) z:
(forall x, f x = f' x) -> (forall x, g x = g' x) -> scons_p f g z = scons_p f' g' z.
Proof. intros H1 H2. induction m; eauto. cbn. destruct z; eauto. Qed.
(** Generic n-ary lifting operation. *)
Definition upRen_p p { m : nat } { n : nat } (xi : (fin) (m) -> (fin) (n)) : fin (p + m) -> fin (p + n) :=
scons_p (zero_p ) (xi >> shift_p _).
Arguments upRen_p p {m n} xi.
(** Generic proof for composition of n-ary lifting. *)
Lemma up_ren_ren_p p k l m (xi: ren k l) (zeta : ren l m) (rho: ren k m) (E: forall x, (xi >> zeta) x = rho x) :
forall x, (upRen_p p xi >> upRen_p p zeta) x = upRen_p p rho x.
Proof.
intros x. destruct (destruct_fin x) as [[? ->]|[? ->]].
- unfold upRen_p. unfold funcomp. now repeat rewrite scons_p_head'.
- unfold upRen_p. unfold funcomp. repeat rewrite scons_p_tail'.
now rewrite <- E.
Qed.
Arguments zero_p m {n}.
Arguments scons_p {X} m {n} f g.
Lemma scons_p_eta {X} {m n} {f : fin m -> X}
{g : fin n -> X} (h: fin (m + n) -> X) {z: fin (m + n)}:
(forall x, g x = h (shift_p m x)) -> (forall x, f x = h (zero_p m x)) -> scons_p m f g z = h z.
Proof.
intros H1 H2. destruct (destruct_fin z) as [[? ->] |[? ->]].
- rewrite scons_p_head'. eauto.
- rewrite scons_p_tail'. eauto.
Qed.
Arguments scons_p_eta {X} {m n} {f g} h {z}.
Arguments scons_p_congr {X} {m n} {f f'} {g g'} {z}.
Opaque scons.
Opaque var_zero.
Opaque null.
Opaque shift.
Opaque up_ren.
Opaque var_zero.
Opaque idren.
Opaque comp.
Opaque funcomp.
Opaque id.
(** ** Notations for Scoped Syntax *)
Module CommaNotation.
Notation "s , sigma" := (scons s sigma) (at level 60, format "s , sigma", right associativity) : subst_scope.
End CommaNotation.
Notation "s '..'" := (scons s ids) (at level 1, format "s ..") : subst_scope.
Notation "↑" := (shift) : subst_scope.
Ltac unfold_funcomp := match goal with
| |- context[(?f >> ?g) ?s] => change ((f >> g) s) with (g (f s))
end.
(** ** Tactics for Scoped Syntax *)
(** Generic fsimpl tactic: simplifies the above primitives in a goal. *)
Ltac fsimpl :=
repeat match goal with
| [|- context[id >> ?f]] => change (id >> f) with f (* AsimplCompIdL *)
| [|- context[?f >> id]] => change (f >> id) with f (* AsimplCompIdR *)
| [|- context [id ?s]] => change (id s) with s
| [|- context[comp ?f ?g]] => change (comp f g) with (g >> f) (* AsimplCompIdL *)
| [|- context[(?f >> ?g) >> ?h]] =>
change ((f >> g) >> h) with (f >> (g >> h)) (* AsimplComp *)
| [|- zero_p >> scons_p ?f ?g] => rewrite scons_p_head
| [|- context[(?s.:?sigma) var_zero]] => change ((s.:sigma) var_zero) with s
| [|- context[(?s.:?sigma) (shift ?m)]] => change ((s.:sigma) (shift m)) with (sigma m)
| [|- context[idren >> ?f]] => change (idren >> f) with f
| [|- context[?f >> idren]] => change (f >> idren) with f
| [|- context[?f >> (?x .: ?g)]] => change (f >> (x .: g)) with g
| [|- context[?x2 .: shift >> ?f]] => change x2 with (f var_zero); rewrite (@scons_eta _ _ f)
| [|- context[?f var_zero .: ?g]] => change g with (shift >> f); rewrite scons_eta
|[|- _ = ?h (?f ?s)] => change (h (f s)) with ((f >> h) s)
|[|- ?h (?f ?s) = _] => change (h (f s)) with ((f >> h) s)
| _ => first [progress (rewrite scons_comp) | progress (rewrite scons_eta_id) | progress (autorewrite with FunctorInstances)]
end.
(** Generic fsimpl tactic: simplifies the above primitives in the context *)
Ltac fsimplc :=
repeat match goal with
| [H: context[id >> ?f] |- _] => change (id >> f) with f in H(* AsimplCompIdL *)
| [H: context[?f >> id]|- _] => change (f >> id) with f in H(* AsimplCompIdR *)
| [H: context [id ?s]|- _] => change (id s) with s in H
| [H: context[comp ?f ?g]|- _] => change (comp f g) with (g >> f) in H (* AsimplCompIdL *)
| [H: context[(?f >> ?g) >> ?h]|- _] =>
change ((f >> g) >> h) with (f >> (g >> h)) in H (* AsimplComp *)
| [H: context[(?s.:?sigma) var_zero]|- _] => change ((s.:sigma) var_zero) with s in H
| [H: context[(?s.:?sigma) var_zero]|- _] => change ((s.:sigma) var_zero) with s in H
| [H: context[(?s.:?sigma) (shift ?m)]|- _] => change ((s.:sigma) (shift m)) with (sigma m) in H
|[H : context[ _ = ?h (?f ?s)]|- _] => change (h (f s)) with ((f >> h) s) in H
|[H: context[?h (?f ?s) = _]|- _] => change (h (f s)) with ((f >> h) s) in H
| [H: context[idren >> ?f]|- _] => change (idren >> f) with f in H
| [H: context[?f >> idren]|- _] => change (f >> idren) with f in H
| [H: context[?f >> (?x .: ?g)]|- _] =>
change (f >> (x .: g)) with g in H
| [H: context[?x2 .: shift >> ?f]|- _] =>
change x2 with (f var_zero) in H; rewrite (@scons_eta _ _ f) in H
| [H: context[?f var_zero .: ?g]|- _] =>
change g with (shift >> f) in H; rewrite scons_eta in H
| _ => first [progress (rewrite scons_comp in *) | progress (rewrite scons_eta_id in *) | progress (autorewrite with FunctorInstances in *)]
end.
(** Simplification in both the goal and the context *)
Tactic Notation "fsimpl" "in" "*" :=
fsimpl; fsimplc.
Tactic Notation "auto_case" tactic(t) := (match goal with
| [|- forall (i : fin 0), _] => intros []; t
| [|- forall (i : fin (S (S (S (S _))))), _] => intros [[[[|]|]|]|]; t
| [|- forall (i : fin (S (S (S _)))), _] => intros [[[|]|]|]; t
| [|- forall (i : fin (S (S _))), _] => intros [[?|]|]; t
| [|- forall (i : fin (S _)), _] => intros [?|]; t
end).
(** Functor instances which can be added later on. *)
Hint Rewrite @scons_p_comp scons_p_head scons_p_tail @scons_p_head' @scons_p_tail': FunctorInstances.
|
[STATEMENT]
lemma sams_suma__out546:
assumes "A B C D E F SumA A B C" and
"SAMS A B C D E F"
shows "E Out D F"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
have "D E F A B C SumA A B C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. D E F A B C SumA A B C
[PROOF STEP]
using assms(1) suma_sym
[PROOF STATE]
proof (prove)
using this:
A B C D E F SumA A B C
?A ?B ?C ?D ?E ?F SumA ?G ?H ?I \<Longrightarrow> ?D ?E ?F ?A ?B ?C SumA ?G ?H ?I
goal (1 subgoal):
1. D E F A B C SumA A B C
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
D E F A B C SumA A B C
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
D E F A B C SumA A B C
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
have "SAMS D E F A B C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. SAMS D E F A B C
[PROOF STEP]
using assms(2) sams_sym
[PROOF STATE]
proof (prove)
using this:
SAMS A B C D E F
SAMS ?A ?B ?C ?D ?E ?F \<Longrightarrow> SAMS ?D ?E ?F ?A ?B ?C
goal (1 subgoal):
1. SAMS D E F A B C
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
SAMS D E F A B C
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
D E F A B C SumA A B C
SAMS D E F A B C
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
D E F A B C SumA A B C
SAMS D E F A B C
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
using sams_suma__out213
[PROOF STATE]
proof (prove)
using this:
D E F A B C SumA A B C
SAMS D E F A B C
\<lbrakk>?A ?B ?C ?D ?E ?F SumA ?D ?E ?F; SAMS ?A ?B ?C ?D ?E ?F\<rbrakk> \<Longrightarrow> ?B Out ?A ?C
goal (1 subgoal):
1. E Out D F
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
E Out D F
goal:
No subgoals!
[PROOF STEP]
qed |
Contenido bajo licencia Creative Commons BY 4.0 y código bajo licencia MIT. © Juan Gómez y Nicolás Guarín-Zapata 2020. Este material es parte del curso Modelación Computacional en el programa de Ingeniería Civil de la Universidad EAFIT.
# Pórticos planos
## Introducción
En este Notebook se agrega un elemento adicional al programa de análisis estructural, iniciado con el problema simple de resortes unidimesionales y extendido para considerar armaduras planas. En particular, ahora se adicionan elementos tipo viga para abordar problemas en los cuales los elementos funcionen principalmente a flexión. Adicionalmente, en una implementación independiente, es posible acoplar el comportamiento a carga axial de la cercha con el comportamiento a flexión de la viga.
El modelo de viga que se discute en este notebook corresponde a un modelo de Euler-Bernoulli en el cual se desprecian las deformaciones por cortante y por lo tanto válido para elementos con secciones transversales de baja esbeltez.
**Al completar este notebook usted debería estar en la capacidad de:**
* Reconocer las modificaciones necesarias para convertir un programa fundamental de ensamblaje de resortes en uno para estructuras conformadas por vigas.
* Resolver problemas simples de estructuras conformadas por vigas sometidas a cargas puntuales.
### Ensamblaje de elementos tipo viga
Consideremos un elemento tipo viga en su sistema local de referencia como el mostrado en la figura. En el sistema local el elemento tiene 2 grados de libertad por nodo, correspondientes al desplazamiento transversal y una rotación con respecto a un eje perpendicular al plano de la imagen.
<center>
</center>
El vector de grados de libertad (o desplazamientos generalizados) del elemento es:
$$
u^T=\begin{bmatrix}v_1 &\theta_1 &v_2 &\theta_2\end{bmatrix}
$$
mientras que el vector de fuerza (momentos y cortantes) esta dado por:
$$
f^T=\begin{bmatrix}f_1 &m_1 &f_2 &m_2\end{bmatrix}
$$
En el sistema de referencia local la matriz de rigidez relacionando fuerzas con desplazamientos es:
$$
\begin{Bmatrix} f_1\\ m_1\\ f_2\\ m_2\end{Bmatrix} =
\begin{bmatrix}
12\frac{EI}{\mathcal l^3} &6\frac{EI}{\mathcal l^3} &-12\frac{EI}{\mathcal l^3} &6\frac{EI}{\mathcal l^2}\\
6\frac{EI}{\mathcal l^3}&4\frac{EI}{\mathcal l}&-6\frac{EI}{\mathcal l^2}&2\frac{EI}{\mathcal l}\\
12\frac{EI}{\mathcal l^3}&-6\frac{EI}{\mathcal l^2}&12\frac{EI}{\mathcal l^3}&-6\frac{EI}{\mathcal l^2}\\
6\frac{EI}{\mathcal l^2}&2\frac{EI}{\mathcal l}&-6\frac{EI}{\mathcal l^2}&4\frac{EI}{\mathcal l}
\end{bmatrix}
\begin{Bmatrix} v_1\\ \theta_1\\ v_2\\ \theta_2\end{Bmatrix}
$$
<div class="alert alert-warning">
**Nota:** La matriz de rigidez puede formularse por diferentes métodos que serán cubiertos en el curso de Análisis de Estructuras.
</div>
Para obtener la matriz de rigidez global de la estructura es necesario considerar nuevamente la contribución de todos los elementos en el sistema **global** de referencia. Con ese propósito procedemos como con el problema de la cercha. Usando la matriz de transformación bajo rotación $\lambda$ se tiene que:
$$K = \lambda^T k\lambda$$
donde $K$ es la matriz de rigidez para el elemento tipo viga en el sistema global de referencia. Note que en este sistema el elemento tiene ahora 3 grados de libertad por nodo.
### Estructura aporticada simple
Considere el siguiente modelo simple conformado por un ensamblaje de 3 elementos. (Los archivos de datos de entrada están disponibles en la carpeta `files`).
<center>
</center>
Se requiere determinar el desplazamiento lateral de la estructura cuando es sometida a la carga lateral $P$.
<div class="alert alert-warning">
Encuentre la matriz de transformación bajo rotación $\lambda$ requerida para la formulación de la matriz de rigidez en el sistema de referencia global.
</div>
Las modificacions que se deben aplicar al código de resortes (o al de cerchas) solo estan relacionadas con el hecho de que ahora hay 3 grados de libertad en cada nodo.
```python
import matplotlib.pyplot as plt
import numpy as np
import sympy as sym
```
```python
%matplotlib inline
```
Lea los archivos de entrada de la carpeta `files`.
```python
def readin():
nodes = np.loadtxt('files/Fnodes.txt', ndmin=2)
mats = np.loadtxt('files/Fmater.txt', ndmin=2)
elements = np.loadtxt('files/Feles.txt', ndmin=2)
loads = np.loadtxt('files/Floads.txt', ndmin=2)
return nodes, mats, elements, loads
```
La función `eqcounter` cuenta ecuaciones y genera el arreglo de condiciones de frontera.
```python
def eqcounter(nodes):
nnodes = nodes.shape[0]
IBC = np.zeros([nnodes, 3], dtype=np.integer)
for i in range(nnodes):
for k in range(3):
IBC[i, k] = int(nodes[i, k+3])
neq = 0
for i in range(nnodes):
for j in range(3):
if IBC[i, j] == 0:
IBC[i, j] = neq
neq = neq + 1
return neq, IBC
```
Ahora, la función `DME` calcula la matriz de ensamblaje de ecuaciones.
```python
def DME(nodes, elements):
nels = elements.shape[0]
IELCON = np.zeros([nels, 2], dtype=np.integer)
DME_mat = np.zeros([nels, 6], dtype=np.integer)
neq, IBC = eqcounter(nodes)
ndof = 6
nnodes = 2
for i in range(nels):
for j in range(nnodes):
IELCON[i, j] = elements[i, j+3]
kk = IELCON[i, j]
for l in range(3):
DME_mat[i, 3*j+l] = IBC[kk, l]
return DME_mat, IBC, neq
```
La función `assembly` usa el modelo y la matriz `DME_mat` para calcular la matriz de rigidez global.
```python
def assembly(elements, mats, nodes, neq, DME_mat):
IELCON = np.zeros([2], dtype=np.integer)
KG = np.zeros((neq, neq))
nels = elements.shape[0]
nnodes = 2
ndof = 6
for el in range(nels):
elcoor = np.zeros([nnodes, 2])
im = np.int(elements[el, 2])
par0 = mats[im, 0]
par1 = mats[im, 1]
for j in range(nnodes):
IELCON[j] = elements[el, j+3]
elcoor[j, 0] = nodes[IELCON[j], 1]
elcoor[j, 1] = nodes[IELCON[j], 2]
kloc = uelbeam2DU(elcoor, par0, par1)
dme = DME_mat[el, :ndof]
for row in range(ndof):
glob_row = dme[row]
if glob_row != -1:
for col in range(ndof):
glob_col = dme[col]
if glob_col != -1:
KG[glob_row, glob_col] = KG[glob_row, glob_col] +\
kloc[row, col]
return KG
```
La función `uelbeam2D` usa las coordenadas de los nodos y los parámetros de material para calcular la matriz de rigidez local ya transformada al sistema de referencia global.
<div class="alert alert-warning">
Agregue un comentario a cada línea relevante de los códigos de las siguientes funciones y úselos para escribir los pseudocódigos correspondientes. En particular identifique el calculo de la matriz de transformación bajo rotación $\lambda$.
</div>
```python
def uelbeam2DU(coord, I, Emod):
"""2D-2-noded beam element
without axial deformation
Parametros
----------
coord : ndarray
Cordenadas nodales (2, 2).
A : float
Area de la seccion transversal.
Emod : float
Modulo de elasticidad (>0).
Returna
-------
kl : ndarray
Matriz de rigidez local para el elemento (4, 4).
"""
vec = coord[1, :] - coord[0, :]
nx = vec[0]/np.linalg.norm(vec)
ny = vec[1]/np.linalg.norm(vec)
L = np.linalg.norm(vec)
Q = np.array([
[-ny, nx, 0, 0, 0, 0],
[0, 0, 1.0, 0, 0, 0],
[0, 0, 0, -ny, nx, 0],
[0, 0, 0, 0, 0, 1.0]])
kl = (I*Emod/(L*L*L)) * np.array([
[12.0, 6, -12.0, 6*L],
[6, 4*L*L, -6*L, 2*L*L],
[-12.0, -6*L, 12.0, -6*L],
[6*L, 2*L*L, -6*L, 4*L*L]])
kG = np.dot(np.dot(Q.T, kl), Q)
return kG
```
La rutina `loadassem` forma el vector de cargas en los nodos.
```python
def loadasem(loads, IBC, neq, nl):
RHSG = np.zeros([neq])
for i in range(nl):
il = int(loads[i, 0])
ilx = IBC[il, 0]
ily = IBC[il, 1]
ilT = IBC[il, 2]
if ilx != -1:
RHSG[ilx] = loads[i, 1]
if ily != -1:
RHSG[ily] = loads[i, 2]
if ilT != -1:
RHSG[ilT] = loads[i, 3]
return RHSG
```
El programa principal mantiene la misma estructura que el programa de resortes, es decir, se tienen siguientes pasos:
* Lee el modelo.
* Determina la matriz de ensamblaje `DME_mat`.
* Ensambla el sistema global de ecuaciones.
* Determina los desplazamientos globales `UG` tras resolver el sistema global.
```python
nodes, mats, elements, loads = readin()
DME_mat, IBC, neq = DME(nodes, elements)
KG = assembly(elements, mats, nodes, neq, DME_mat)
RHSG = loadasem(loads, IBC, neq, 1)
UG = np.linalg.solve(KG, RHSG)
print(UG)
```
[ 2.25000000e+01 2.00000000e+01 1.27557539e-16 -1.25918779e-15
2.00000000e+01 4.88970566e-16]
<div class="alert alert-warning">
### Problemas propuestos
#### Problema 1
Implemente una función que calcule las fuerzas nodales en cada elemento y que verifique el equilibrio del sistema.
#### Problema 2
Determine la rigidez lateral de la estructura usando la siguiente expresión:
$$k = \frac{P}{\delta}\, .$$
#### Problema 3
Repotencie la estructura de tal forma que la rigidez lateral se incremente por un factor de 2.0
#### Problema 4
Repare el pórtico mostrado en la figura adicionando elementos y/o imponiendo restricciones apropiadas a los desplazamientos. (Cree un nuevo paquete de archivos de datos).
#### Problema 5
Para el portico de la figura asuma que la conexión del nudo 5 es articulada e indique como esta condición cambia los resultados.
<center>
</center>
</div>
## Referencias
* Bathe, Klaus-Jürgen. (2006) Finite element procedures. Klaus-Jurgen Bathe. Prentice Hall International.
* Juan Gómez, Nicolás Guarín-Zapata (2018). SolidsPy: 2D-Finite Element Analysis with Python, <https://github.com/AppliedMechanics-EAFIT/SolidsPy>.
## Formato del notebook
La siguiente celda cambia el formato del Notebook.
```python
from IPython.core.display import HTML
def css_styling():
styles = open('./nb_style.css', 'r').read()
return HTML(styles)
css_styling()
```
<link href='http://fonts.googleapis.com/css?family=Fenix' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Alegreya+Sans:100,300,400,500,700,800,900,100italic,300italic,400italic,500italic,700italic,800italic,900italic' rel='stylesheet' type='text/css'>
<link href='http://fonts.googleapis.com/css?family=Source+Code+Pro:300,400' rel='stylesheet' type='text/css'>
<style>
/*
Template for Notebooks for Modelación computacional.
Based on Lorena Barba template available at:
https://github.com/barbagroup/AeroPython/blob/master/styles/custom.css
*/
/* Fonts */
@font-face {
font-family: "Computer Modern";
src: url('http://mirrors.ctan.org/fonts/cm-unicode/fonts/otf/cmunss.otf');
}
/* Text */
div.cell{
width:800px;
margin-left:16% !important;
margin-right:auto;
}
h1 {
font-family: 'Alegreya Sans', sans-serif;
}
h2 {
font-family: 'Fenix', serif;
}
h3{
font-family: 'Fenix', serif;
margin-top:12px;
margin-bottom: 3px;
}
h4{
font-family: 'Fenix', serif;
}
h5 {
font-family: 'Alegreya Sans', sans-serif;
}
div.text_cell_render{
font-family: 'Alegreya Sans',Computer Modern, "Helvetica Neue", Arial, Helvetica, Geneva, sans-serif;
line-height: 135%;
font-size: 120%;
width:600px;
margin-left:auto;
margin-right:auto;
}
.CodeMirror{
font-family: "Source Code Pro";
font-size: 90%;
}
/* .prompt{
display: None;
}*/
.text_cell_render h1 {
font-weight: 200;
font-size: 50pt;
line-height: 100%;
color:#CD2305;
margin-bottom: 0.5em;
margin-top: 0.5em;
display: block;
}
.text_cell_render h5 {
font-weight: 300;
font-size: 16pt;
color: #CD2305;
font-style: italic;
margin-bottom: .5em;
margin-top: 0.5em;
display: block;
}
.warning{
color: rgb( 240, 20, 20 )
}
</style>
|
= = = Types of ylides = = =
|
''' Functions for use with 1d and 2D spectra.
Due to laziness, documentation is brief, incomplete, and poorly formatted.'''
import numpy as np
from astropy.io import fits
#________________________________________________________________________________
def errorSpectrum(bigspec):
'''Calculates 1D error spectrum from 2D spectrum as standard deviation'''
return np.std(bigspec, axis=0, ddof=1)
# _______________________________________________________________________________
def writeToFits(oldname, data, header, filenumber, quad, spectype):
'''Takes an array and writes it to a fits file. Retruns name of the file
Inputs
oldname = full name of the file containing multiple spectra
data = the spectrum
header = header of the extension from which data comes; also new header
filenumber = number of the spectrum
quad = quadrant number
spectype = type of spectrum'''
#New file name
newname = oldname[19:]
newname = 'Q' + str(quad) + '_' + str(filenumber) + '_' + newname
#Type specific changes
if spectype.lower() == '2d':
#Name
newname = '2D_' + newname
elif spectype.lower() == 'error':
#Name
newname = newname[:-27]
newname = newname + 'ERROR.fits'
#Write to file
hdu = fits.PrimaryHDU(data, header=header)
hdu.writeto(newname, overwrite=True)
return newname
#________________________________________________________________________________
def mapWavelength(header, length, top, bottom):
'''The following function takes as input the header and length of a spectrum fits
file as well as the points where the spectrum was cut to ignore the pillarboxing
It maps the dispersion axis from pixels to wavelength.
Note: the last pixel is not mapped, but it gets cut anyways, so it doesn't matter.
Data Dictionary:
beginning = beginning of pillarboxing
end = end of pillarboxing
stepsize = number of Angstroms per pixel
top/bottom = indices of the ends of the spectrum, without pillarboxing '''
#Map whole strip, including pillarboxing
beginning = header['CRVAL1']
stepsize = header['CD1_1']
end = length*stepsize + beginning
mappedvalues = np.arange(beginning, end, stepsize)
#Cut the strip to line up with just the spectrum
return mappedvalues[top: -bottom]
#_______________________________________________________________________________
def cutPillars(olddata):
'''The following function takes the spectral data as input and outputs the
data with the "pillarboxing" on the sides cut off as well as the indices where
it was cut off. These "pillarboxing" pixels all have a value of -1.
Input
olddata = 1D array containing spectrum
**Note: the end_index is the index for the first pixel on the right pillar.'''
#Remove left pillar
leftcut = np.where(olddata > -1)[0] #the data has 1 dimension
#Remove right pillar
reversedata = np.flip(olddata, 0) #the data has 1 dimension
rightcut = np.where(reversedata > -1)[0] #this is still the flipped array
#Get endpoints of the data
beginning_index = leftcut[0] #these indices are for the element, not dim
end_index = rightcut[0] #although not 0th indexed when coming from end,
#scope operator excludes endpoint anyways
#Extract spectrum
newdata = olddata
newdata = newdata[beginning_index: -end_index]
return newdata, beginning_index, end_index
#_______________________________________________________________________________
def binSpectrum(xfull, yfull, isError, dx=2):
'''The following functions bins spectra into bins of size dx pixels.
Requires x-axis data so that the spectrum y-axis values are lined up with the
beginning of the appropriate wavelength bin.
**Note: 3rd argument is boolean telling the function if the data is error data.
If so, we bin the data via addition in quadrature.'''
#Constants
full_length = xfull.size #length of the arrays containing unbinned data
bin_index = 0 #index for binned arrays (as opposed to full_index)
#Initialize binned arrays
if full_length%dx == 0:
xbin = np.zeros(int(full_length/dx))
ybin = np.zeros(int(full_length/dx))
else:
xbin = np.zeros(full_length//dx + 1)
ybin = np.zeros(full_length//dx + 1)
#Bin the data
for full_index in range(0, full_length, dx):
#Create bins, or "buckets"
if bin_index == (ybin.size-1): #last bin
bucket = yfull[full_index:]
else: #normal bins
bucket = yfull[full_index : full_index+dx]
#Add up the spectrum values in bucket
if isError: #if error spectrum
ybin[bin_index] = np.sqrt(np.sum(bucket**2))
else: #if science spectrum
ybin[bin_index] = np.sum(bucket)
#Set binned x-axis values
xbin[bin_index] = xfull[full_index]
bin_index += 1
return xbin, ybin
# _______________________________________________________________________________
|
module IdrisJvm.IR.Exports
import IdrisJvm.IR.Types
import IdrisJvm.IO
%access public export
funknown : FDesc
fcon : String -> FDesc
fstr : String -> FDesc
fio : FDesc -> FDesc
fapp : String -> List FDesc -> FDesc
consFDesc : FDesc -> List FDesc -> List FDesc
emptyFDesc : List FDesc
consSDecl : SDecl -> List SDecl -> List SDecl
consSDecl = (::)
emptySDecl : List SDecl
emptySDecl = []
consExportIFace : ExportIFace -> List ExportIFace -> List ExportIFace
consExportIFace = (::)
emptyExportIFace : List ExportIFace
emptyExportIFace = []
consString : String -> List String -> List String
emptyListString : List String
consExport : Export -> List Export -> List Export
emptyExport : List Export
loc : Int -> LVar
glob : String -> LVar
it8 : NativeTy
it16 : NativeTy
it32 : NativeTy
it64 : NativeTy
itFixed : NativeTy -> IntTy
itNative : IntTy
itBig : IntTy
itChar : IntTy
atInt : IntTy -> ArithTy
atFloat : ArithTy
lPlus : ArithTy -> PrimFn
lMinus : ArithTy -> PrimFn
lTimes : ArithTy -> PrimFn
lSDiv : ArithTy -> PrimFn
lSRem : ArithTy -> PrimFn
lEq : ArithTy -> PrimFn
lSLt : ArithTy -> PrimFn
lSLe : ArithTy -> PrimFn
lSGt : ArithTy -> PrimFn
lSGe : ArithTy -> PrimFn
lUDiv : IntTy -> PrimFn
lURem : IntTy -> PrimFn
lAnd : IntTy -> PrimFn
lOr : IntTy -> PrimFn
lXOr : IntTy -> PrimFn
lCompl : IntTy -> PrimFn
lSHL : IntTy -> PrimFn
lLSHR : IntTy -> PrimFn
lASHR : IntTy -> PrimFn
lLt : IntTy -> PrimFn
lLe : IntTy -> PrimFn
lGt : IntTy -> PrimFn
lGe : IntTy -> PrimFn
lIntFloat : IntTy -> PrimFn
lFloatInt : IntTy -> PrimFn
lIntStr : IntTy -> PrimFn
lStrInt : IntTy -> PrimFn
lChInt : IntTy -> PrimFn
lIntCh : IntTy -> PrimFn
lFExp : PrimFn
lFLog : PrimFn
lFSin : PrimFn
lFCos : PrimFn
lFTan : PrimFn
lFASin : PrimFn
lFACos : PrimFn
lFATan : PrimFn
lFSqrt : PrimFn
lFFloor : PrimFn
lFCeil : PrimFn
lFNegate : PrimFn
lStrHead : PrimFn
lStrTail : PrimFn
lStrCons : PrimFn
lStrIndex : PrimFn
lStrRev : PrimFn
lStrSubstr : PrimFn
lReadStr : PrimFn
lWriteStr : PrimFn
lSystemInfo : PrimFn
lFork : PrimFn
lPar : PrimFn
lCrash : PrimFn
lNoOp : PrimFn
lStrConcat : PrimFn
lStrLt : PrimFn
lStrEq : PrimFn
lStrLen : PrimFn
lFloatStr : PrimFn
lStrFloat : PrimFn
lSExt : IntTy -> IntTy -> PrimFn
lZExt : IntTy -> IntTy -> PrimFn
lTrunc : IntTy -> IntTy -> PrimFn
lBitCast : ArithTy -> ArithTy -> PrimFn
lExternal : String -> PrimFn
exportData : FDesc -> Export
exportFun : String -> FDesc -> FDesc -> List FDesc -> Export
mkExportIFace : String -> String -> List Export -> ExportIFace
sV : LVar -> SExp
sApp : Bool -> String -> List LVar -> SExp
sLet : LVar -> SExp -> SExp -> SExp
sUpdate : LVar -> SExp -> SExp
sCon : (Maybe LVar) -> Int -> String -> List LVar -> SExp
sCase : CaseType -> LVar -> List SAlt -> SExp
sChkCase : LVar -> List SAlt -> SExp
sProj : LVar -> Int -> SExp
sConst : Const -> SExp
sForeign : FDesc -> FDesc -> List (FDesc, LVar) -> SExp
sOp : PrimFn -> List LVar -> SExp
sNothing : SExp
sError : String -> SExp
mkSForeignArg : FDesc -> LVar -> (FDesc, LVar)
consSAlt : SAlt -> List SAlt -> List SAlt
emptySAlt : List SAlt
consSForeignArg : (FDesc, LVar) -> List (FDesc, LVar) -> List (FDesc, LVar)
emptySForeignArg : List (FDesc, LVar)
consLVar : LVar -> List LVar -> List LVar
emptyLVar : List LVar
constI : Int -> Const
constBI : String -> Const
constFl : Double -> Const
constCh : Char -> Const
constStr : String -> Const
constB8 : Bits8 -> Const
constB16 : Bits16 -> Const
constB32 : Int -> Const
constB64 : Bits64 -> Const
aType : ArithTy -> Const
strType : Const
worldType : Const
theWorld : Const
voidType : Const
forgot : Const
sFun : String -> List String -> Int -> SExp -> SDecl
sConCase : Int -> Int -> String -> List String -> SExp -> SAlt
sConstCase : Const -> SExp -> SAlt
sDefaultCase : SExp -> SAlt
nothingLVar : Maybe LVar
justLVar : LVar -> Maybe LVar
updatable : CaseType
shared : CaseType
funknown = FUnknown
fcon = FCon
fstr = FStr
fio = FIO
fapp = FApp
consFDesc = (::)
emptyFDesc = []
consString = (::)
emptyListString = []
consExport = (::)
emptyExport = []
loc = Loc
glob = Glob
it8 = IT8
it16 = IT16
it32 = IT32
it64 = IT64
itFixed = ITFixed
itNative = ITNative
itBig = ITBig
itChar = ITChar
atInt = ATInt
atFloat = ATFloat
{- PrimFn exports -}
lPlus = LPlus
lMinus = LMinus
lTimes = LTimes
lSDiv = LSDiv
lSRem = LSRem
lEq = LEq
lSLt = LSLt
lSLe = LSLe
lSGt = LSGt
lSGe = LSGe
lUDiv = LUDiv
lURem = LURem
lAnd = LAnd
lOr = LOr
lXOr = LXOr
lCompl = LCompl
lSHL = LSHL
lLSHR = LLSHR
lASHR = LASHR
lLt = LLt
lLe = LLe
lGt = LGt
lGe = LGe
lIntFloat = LIntFloat
lFloatInt = LFloatInt
lIntStr = LIntStr
lStrInt = LStrInt
lChInt = LChInt
lIntCh = LIntCh
lFExp = LFExp
lFLog = LFLog
lFSin = LFSin
lFCos = LFCos
lFTan = LFTan
lFASin = LFASin
lFACos = LFACos
lFATan = LFATan
lFSqrt = LFSqrt
lFFloor = LFFloor
lFCeil = LFCeil
lFNegate = LFNegate
lStrHead = LStrHead
lStrTail = LStrTail
lStrCons = LStrCons
lStrIndex = LStrIndex
lStrRev = LStrRev
lStrSubstr = LStrSubstr
lReadStr = LReadStr
lWriteStr = LWriteStr
lSystemInfo = LSystemInfo
lFork = LFork
lPar = LPar
lCrash = LCrash
lNoOp = LNoOp
lStrConcat = LStrConcat
lStrLt = LStrLt
lStrEq = LStrEq
lStrLen = LStrLen
lFloatStr = LFloatStr
lStrFloat = LStrFloat
lSExt = LSExt
lZExt = LZExt
lTrunc = LTrunc
lBitCast = lBitCast
lExternal = LExternal
exportData = ExportData
exportFun = ExportFun
mkExportIFace = MkExportIFace
sV = SV
sApp = SApp
sLet = SLet
sUpdate = SUpdate
sCon = SCon
sCase = SCase
sChkCase = SChkCase
sProj = SProj
sConst = SConst
sForeign = SForeign
sOp = SOp
sNothing = SNothing
sError = SError
mkSForeignArg fdesc lvar = (fdesc, lvar)
consSAlt = (::)
emptySAlt = []
consSForeignArg = (::)
emptySForeignArg = []
consLVar = (::)
emptyLVar = []
constI = I
constBI = BI
constFl = Fl
constCh = Ch
constStr = Str
constB8 = B8
constB16 = B16
constB32 = B32
constB64 = B64
aType = AType
strType = StrType
worldType = WorldType
theWorld = TheWorld
voidType = VoidType
forgot = Forgot
sFun n args locs exp = SFun n args locs exp
sConCase = SConCase
sConstCase = SConstCase
sDefaultCase = SDefaultCase
nothingLVar = Nothing
justLVar = Just
updatable = Updatable
shared = Shared
|
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE TypeFamilies #-}
module Marvin.API.Algorithms.KMeans (
KMeans (..)
, CentroidModel
, distancesFromNearestCentroids
, centroids
) where
import Control.Monad.Reader
import Data.Foldable
import Data.Array.IArray
import Data.Ord
import Control.Arrow
import Control.Monad.Except
import Marvin.API.Table.DataType
import Marvin.API.Fallible
import Marvin.API.Meta.Model
import Marvin.API.Table.DataType
import Marvin.API.Fallible
import Marvin.API.Table.Internal
import Data.Vector.Unboxed (Vector)
import qualified Data.Vector.Unboxed as UVec
import qualified Data.Vector as Vec
import qualified Numeric.LinearAlgebra as LA
-- * Parameters
-- | Parameters.
data KMeans = KMeans {
k :: Int -- ^ Number of clusters (centroids).
, numberOfIterations :: Int -- ^ Number of iterations.
}
-- | Default parameters.
defaultKMeans = KMeans {
k = 5
, numberOfIterations = 10
}
-- * Model
-- | Resulting model.
data CentroidModel = CentroidModel {
centroids_ :: Array Label Centroid
, dimension :: Int
} deriving Show
-- | Retrieves a list of the centroids.
centroids :: CentroidModel -> [[Double]]
centroids model = fmap LA.toList $ fmap snd $ assocs $ centroids_ model
-- | Retrieves the distances from the nearest centroids for some points
distancesFromNearestCentroids :: CentroidModel -> NumericTable -> Fallible NumericColumn
distancesFromNearestCentroids model tab = do
if dimension model /= numberOfColumns tab
then throwError $ RowLengthMismatch $ "While trying the calculate nearest distances " ++
"with CentroidModel."
else return ()
let ps = tableToRows tab
let distances = fmap (distanceFromNearest model) ps
fromList $ Vec.toList $ distances
-- | Type aliases.
type Point = LA.Vector Double
type Centroid = LA.Vector Double
type Label = Int
-- | Distance from nearest centroid.
distanceFromNearest :: CentroidModel -> Point -> Double
distanceFromNearest model p = minimum $ fmap (dist p) cs
where
cs = fmap snd $ assocs $ centroids_ model
instance Estimator KMeans NumericTable where
type ResultingModel KMeans NumericTable = CentroidModel
fit = fit'
fit' :: KMeans -> NumericTable -> Fallible CentroidModel
fit' kMeans table = do
ensurePositive
(InvalidAlgorithmParameter "KMeans" "numberOfIterations must be positive") numIter
ensurePositive
(InvalidAlgorithmParameter "KMeans" "number of centroids (k) must be positive") numCentroids
ensureNonEmptyTable "While fitting KMeans." table
return $ CentroidModel {
centroids_ = centroidArray
, dimension = numberOfColumns table
}
where
centroidArray = fitKMeans params numIter
numIter = numberOfIterations kMeans
numCentroids = k kMeans
pts = tableToRows table
params = KMeansParams {
numOfCents = numCentroids
, points = Vec.toList pts
}
instance Predictor CentroidModel where
type Testing CentroidModel = NumericTable
type Prediction CentroidModel = NaturalColumn
predict model numericTable = if numberOfColumns numericTable /= dimension model
then throwError $ RowLengthMismatch "When predicting with a CentroidModel."
else fromList $ Vec.toList labels
where
pts = tableToRows numericTable
labels = fmap (nearestLabel (centroids_ model)) pts
tableToRows =
Vec.map (LA.fromList . UVec.toList) . transposeVectorVectorU . Vec.map values . columnsVec
data InitStrategy = TakeFirstK
data KMeansParams = KMeansParams {
points :: [Point]
, numOfCents :: Int
}
dimensions :: KMeansParams -> Int
dimensions params = LA.size $ head $ points params
initCents :: Reader KMeansParams (Array Label Centroid)
initCents = do
ps <- asks points
k <- asks numOfCents
return $ listArray (1,k) (take k (ps))
dist :: LA.Vector Double -> LA.Vector Double -> Double
dist xs ys = LA.norm_2 $ xs - ys
nearestLabels :: Array Label Centroid -> Reader KMeansParams [(Label, Point)]
nearestLabels cs = do
ps <- asks points
return $
fmap (\p -> (nearestLabel cs p, p)) ps
nearestLabel :: Array Label Centroid -> Point -> Label
nearestLabel cs p = fst $ minimumBy (comparing (dist p . snd)) (assocs cs)
mean :: [Vector Double] -> Vector Double
mean xs = UVec.map (\x -> x / n) $ sumVecs xs
where
n = fromIntegral (length xs)
sumVecs = foldr1 addVecs
addVecs x y = UVec.zipWith (+) x y
accumArrWithLabs :: Ix i => Array i (Point, Int) -> [(i, Point)] -> Array i (Point, Int)
accumArrWithLabs = accum meanAccum
meanAccum :: (Point, Int) -> Point -> (Point, Int)
meanAccum (sum, cnt) p = (sum + p, cnt + 1)
nextCentroids :: Array Label Centroid -> Reader KMeansParams (Array Label Centroid)
nextCentroids cs = do
ps <- asks points
k <- asks numOfCents
n <- asks dimensions
let initArr = listArray (1,k) (repeat (n LA.|> [0,0..], 0))
labeledPoints <- nearestLabels cs
let means = accum meanAccum initArr labeledPoints
return $ amap (\(sum, cnt) -> LA.scale (1 / fromIntegral cnt) sum) means
fitKMeans :: KMeansParams -> Int
-> Array Label Centroid
fitKMeans params numIter = runReader readerIter params
where
readerIter = do
init <- initCents
runIterationM numIter nextCentroids init
runIterationM :: Monad m => Int -> (a -> m a) -> a -> m a
runIterationM n f init = iterate (>>= f) (return init) !! n
|
(* Title: HOL/Auth/n_mutualExSimp_lemma_inv__3_on_rules.thy
Author: Yongjian Li and Kaiqiang Duan, State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
Copyright 2016 State Key Lab of Computer Science, Institute of Software, Chinese Academy of Sciences
*)
header{*The n_mutualExSimp Protocol Case Study*}
theory n_mutualExSimp_lemma_inv__3_on_rules imports n_mutualExSimp_lemma_on_inv__3
begin
section{*All lemmas on causal relation between inv__3*}
lemma lemma_inv__3_on_rules:
assumes b1: "r \<in> rules N" and b2: "(\<exists> p__Inv3 p__Inv4. p__Inv3\<le>N\<and>p__Inv4\<le>N\<and>p__Inv3~=p__Inv4\<and>f=inv__3 p__Inv3 p__Inv4)"
shows "invHoldForRule s f r (invariants N)"
proof -
have c1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Exit i)\<or>
(\<exists> i. i\<le>N\<and>r=n_Idle i)"
apply (cut_tac b1, auto) done
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Crit i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_CritVsinv__3) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Exit i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_ExitVsinv__3) done
}
moreover {
assume d1: "(\<exists> i. i\<le>N\<and>r=n_Idle i)"
have "invHoldForRule s f r (invariants N)"
apply (cut_tac b2 d1, metis n_IdleVsinv__3) done
}
ultimately show "invHoldForRule s f r (invariants N)"
by satx
qed
end
|
{-# OPTIONS --omega-in-omega --no-termination-check --overlapping-instances #-}
module Light.Variable.Levels where
open import Light.Level using (Level)
variable ℓ aℓ bℓ cℓ dℓ eℓ fℓ gℓ hℓ : Level
|
\documentclass{mcmthesis}
\usepackage{palatino}
\usepackage{lipsum}
\usepackage{booktabs}
\usepackage{tabu}%%table
\usepackage{colortbl}
\usepackage{indentfirst}%%suojin
\usepackage{geometry}%页面设置
\usepackage{graphics}%图片设置
\usepackage{caption}%注释设置
\usepackage{graphicx}
\usepackage{subfigure}
\usepackage{palatino}
\usepackage{tikz}
\usepackage{enumerate}
\usepackage{longtable}
\usepackage{wrapfig}
\usetikzlibrary{shapes.geometric, arrows,chains}
% 设置颜色代号
\colorlet{lcfree}{green}
\colorlet{lcnorm}{blue}
\colorlet{lccong}{red}
% -------------------------------------------------
% 设置调试标志层
\pgfdeclarelayer{marx}
\pgfsetlayers{main,marx}
% 标记坐标点的宏定义。交换下面两个定义关闭。
\providecommand{\cmark}[2][]{%
\begin{pgfonlayer}{marx}
\node [nmark] at (c#2#1) {#2};
\end{pgfonlayer}{marx}
}
\providecommand{\cmark}[2][]{\relax}
\newcommand{\upcite}[1]{\textsuperscript{\textsuperscript{\cite{#1}}}}
%%----------------------paper------------------start------------------------------------%%
\mcmsetup{
CTeX = false,
tcn = 69377,
problem = C,
sheet = true,
titleinsheet = true,
keywordsinsheet = true,
titlepage = false,
abstract = false
}
\title{Charitable Funds Boost University Education}
%正文摘要和控制页摘要名字修改
\def\abstractname{Abstract}
\def\sheetsummaryname{summary}
\begin{document}
%控制页摘要内容
\begin{sheetsummary}
wqeqweqweqw
\par In order to find the optimal strategy of investments, we define return on investment(ROI) based on an evaluation standard of universities' performance, introduce the investment risk to restrict target of investment, establish a Single-target Mixed Integer Linear Model with the goal of comprehensive ROI.
\par Firstly, we analyze the data by integrity and redundancy of the information, after discarding the tags and universities which are lack of data, fill out other missing data using Linear trend method.
\par Secondly, by correlating the remaining indicators based on Pearson correlation coefficient
, we categorize the indicators and select four main indicators that we believe are most relevant to evaluating school performance: graduation rates, the ability of graduates to work, retention rates, and education improvement rates. Then determine the contribution of different indicators to performance through entropy method, and calculate the performance of candidate schools, based on which, we acquire the possible value of performance of future four years through GM(1,1).
\par Finally, we use the performance change (the ratio of the mean and the variance of performance) as the investment risk, define the ROI by the performance and the annual total investment, develop a Single-target Mixed Integer Linear Programming. The optimal model is solved based on the investment risk, the total investment, and the number of investment objectives.
\par We acquire an optimal strategy and a candidate schools’ list by this Model. Take Trinity Baptist College as an example, the investment amount in the following five years is 2683937 \$, 2683937 \$, 0 \$, 2340678 \$, 2474686 \$.
\end{sheetsummary}
%正文摘要内容
\begin{abstract}
\end{abstract}
%关键词
\begin{keywords}
GM(1,1); Correlation Analysis; Portfolio Theory; Single-target Linear;
\end{keywords}
\maketitle
% Generate the Table of Contents, if it's needed.
\tableofcontents
\newpage
%%---------------------------introduction--------------------------------
\section{Introduction}
\subsection{Background}
\par In the United States, foundations are an important force in philanthropy. Many foundations, in order to promote the development of education in the United States, donate large sums of money to some schools in order to improve the education level of their schools. This kind of investment doesn’t require money returns but is based on the performance of schools. As a result, many foundations Before investing, will assess the comprehensive strength of each school, collect the data available from the school to analyze its performance, and use the return on investment as a measure of the overall strength of schools. Based on this background, this thesis designs an optimal investment strategy for Goodgrant Foundation, and give a 1 to N optimized and prioritized candidate list of schools from the best strategy.
\subsection{Our Works}
\begin{itemize}
\item \textbf{Construct an ROI evaluation criteria}
\par In investment economics, the main factor to measure the quality of investment is the rate of return on investment. The rate of return on investment to be considered for investment in a charitable institution is not an ordinary monetary reward. Rather, it considers the performance of a school as an investment return. Therefore, the ratio between the school performance and investment amount is the return on investment.School performance is mainly determined by indicators such as graduation rate, working ability of graduates, improvement rate of school education, and retention rate, which reflect the comprehensive strength of schools. the overall comprehensive strength of a school can be judged by judging the level of return on investment,So we decide whether to invest.
\item \textbf{Identified the risk of investment in education}
\par As the indicators describing school performance are sparsely populated, a 50\% error in the function of curve-fit performance as a function of investment will result in a much lower accuracy of the results. The use of gray prediction can be a good solution to the shortcomings of this small data. Predicting the long-term ROI by analyzing the previous data, to a certain extent, ensures the data integrity and accuracy, and provides analyzable data for solving the single-objective optimization equations later.
\item \textbf{Using Grey Prediction to determine the long-term investment strategy}
\par Because of the risks of investment, in order to avoid the investment risk and make the loss of investment within the controllable range, the variance and mean ratio of the index value of each school performance for nearly four years is defined as the investment risk coefficient. From the size of the investment risk coefficient can determine the stability of the comprehensive ability of the school, so as to make the least risk investment.
\end{itemize}
%%--------------------------assumptions-------------------------------
\section{Assumptions}
\subsection{About the Given Data}
\begin{itemize}
\item The data given is true and reliable, and the deletion rate is limited.
\item Filled data can reflect the real situation.
\end{itemize}
\subsection{About Our Model}
\begin{itemize}
\item Abandon schools with data loss rates in excess of 50\%.
\item The local optimal solution represents the global relative solution.
\end{itemize}
%%--------------------------Analysis problem-----------------------------
\section{Analysis of the Problem}
The best investment strategies for the Goodgrant Foundation are designed to meet the requirements of the title. Among the investment strategies, there are several factors that need to be considered: the choice of schools, the investment amount of each school, the return on investment, and the investment time to motivate student performance , In order to obtain the highest comprehensive return on investment, and give a 1 to N optimized and prioritized candidate list of schools from the best strategy.
\subsection{Basic data preprocessing}
\par We found that there was a large amount of missing data by looking at the data given and the databases from both sites. Prepare the data preprocessing plan is as follows:
\begin{enumerate}[step: 1]
\item Discard data from schools that have data loss rates in excess of 50\%.
\item For the remaining school data, use the linear trend method to fill.
\item Normalize the target variable.
\end{enumerate}
\subsection{Solution Steps}
\par We accomplish the task in several steps:
\begin{enumerate}[step 1:]
\item Determine the data items included in the school's performance index through relevancy analysis and interviews.
\item The data items to be normalized, and the use of entropy method to calculate the weight of data items.
\item Determine the ROI calculation method.
\item Define the volatility of the school performance index as a funding risk.
\item The data items to be normalized, and the use of entropy method to calculate the weight of data items.
\item Identify the schools invested and the corresponding amounts of funding through a Single- target Mixed Integer Linear Programming.
\item Calculate funding plans for the next five years by GM(1,1) forecasting data for the next five years
\end{enumerate}
\section{Symbol Description}
\begin{table}[h]
\centering
\caption{Symbol Description In This Paper}
\label{tab:Symbol Description}
\begin{tabular}{cc}
\toprule
Symbols & Description\\
\midrule
$ROI$ & return on investment\\
$X_i$ & $i$th school indicator\\
$GR$ & Graduation rate\\
$GWA$ & Graduates working ability\\
$EIR$ & Education improvement rate\\
$R$ & Retention rate\\
$SAT$ & Average admission score\\
$R1$, $R2$, $R3$, $R4$& The retention rate of students of different years in a year\\
$R$ & The average retention rate of a student\\
$IM$ & Indicator matrix\\
$Q_m$ & investment risk factor for the m th school\\
$Q_{max}$ & Acceptable maximum investment risk factor\\
$TROI$ & Composite return on investment\\
$Q$ & The fluctuation for School Performance Index\\
\bottomrule
\end{tabular}
\end{table}
%%---------------------model-----------------------------------
\section{Establishment of the Model}
\subsection{Define ROI indicator}
Since investing needs to take into account the potential of a school to capitalize on its resources and return on investment, the school's existing rate of return on investment is required to assess the school's potential for capital utilization and to anticipate the ROI as a reference when investing in charities standard. The ROI(Return on Investment) is now given as follow:
\begin{equation}
\label{ROI Def}
ROI = \frac{School \quad Performance}{School \quad Income}
\end{equation}
\subsubsection{School performance index}
\par performance represents the achievement of the school's funds and funds for school construction, access to information shows that the target results include School graduation rate increase, the ability of graduates to work, education to improve the rate of retention of students and other major indicators.Due to it has given 140 school indicators, we need to screen out the main indicators, so we use the bivariate Pearson correlation to analysis the indicators’ correlation given by the school data.
\subsubsection{Pearson correlation coefficient}
\par Using the Pearson correlation coefficient that reflects the correlation between the two variables to measure the correlation between the indicators, the Pearson correlation coefficient between every two indicators of the school is:
\begin{equation}
\label{eq2}
\rho_{x_{n-1},x_{n}}=corr(x_{n-1},x_n)=\frac{cov(x_{n-1},x_n)}{\sigma_{x_{n-1}}\sigma_{x_n}}
\end{equation}
Covariance($cov(x_{n-1},x_n)$) is :
\begin{equation}
\label{eq4}
cov(x_{n-1},x_n)=\frac{\sum \limits ^m_{i=1}(x_i-\overline{x})}{m-1}
\end{equation}
$m$ stands for school number, $\sigma_{x_{n-1}}$ and $\sigma_{x_{n}}$ is the standard deviation of the $n-1$th and $n$th indicators.
Combined with Pearson correlation coefficient and SPSS statistics, the correlation between each index is as following table \ref{tab:Correlation analysis of variables in a given data table}:
\begin{table}[h]
\centering
\caption{Correlation analysis of variables in a given data table}
\label{tab:Correlation analysis of variables in a given data table}
\begin{tabular}{cccccc}
\toprule
& SAT\_AVG\_ALL & RET\_FT4 & gt\_25k\_p6 & PCTPELL\\
\midrule
SAT\_AVG\_ALL & - &0.155 & 0.418 & -0.549\\
RET\_FT4 & 0.155 & - & 0.148 & -0.128\\
gt\_25k\_p6 & 0.418 & 0.148 & - & -0.582\\
PCTPELL & -0.549 & -0.128 & -5.82 & - \\
\bottomrule
\end{tabular}
\end{table}
This table shows the correlation between the performance-related indicators and the performance-related indicators that we determined based on the relevant information. Since indicators with very little relevance can show the performance of different aspects of the school, The paper chooses the composition of the indicators that have the lowest relevancy and is closely related to the school's performance as the main indicators. We have chosen the following indicators.
\subsubsection{Select indicators}
We have chosen the following indicators according to above method.
\begin{enumerate}
\item \textbf{graduation rate (GR)}:
Graduation rate includes 3 variables: GBA4RTT, GBA5RTT, and GBA6RTT. We use the final 6-year graduation rate(GBA6RTT) as GR.
\item \textbf{Graduates' ability to work}:
Students' ability to work(GWA) is determined by the ratio of student winners(it's CSTOTLT in access data base) to total school attendance(it's UGDS in access data base).
\begin{equation}
\label{GWA}
GWA \quad = \frac{CSTOTLT}{UGDS}
\end{equation}
\item \textbf{The rate of increase in education(EIR)}:
The ratio of post-graduate ability to pre-admission ability. The improvement of education is reflected in the improvement of students' abilities. Therefore, the average admission score (SAT) of all students before enrollment is selected as the pre-admission ability, and the award rate is used as a reflection of GWA after graduation.
\begin{equation}
\label{EIR}
EIR = \frac{GWA}{SAT}
\end{equation}
\item \textbf{retention rate (R)}:
The actual number of students reported in a class and the ratio of the number of theoretical reports. The retention rates R1, R2, R3, R4(it's RET\_FT4, RET\_FTL4, RET\_PT4, RET\_PTL4 in access data base) for the four different types of students given in the school data are averaged to give a retention rate R.
\begin{equation}
\label{R}
R = \frac{R1+R2+R3+R4}{4}
\end{equation}
\end{enumerate}
\subsubsection{Entropy Method to Determine Performance}
\par Principle of Entropy Method: Entropy is a concept derived from thermodynamics. In 1948, Shannon introduced the information entropy for the first time to describe the uncertainty of the signal source, which is a measure of the order of the system. The smaller the entropy of evaluation index, indicating that the greater the degree of variation of the index value, the greater the amount of information provided, the greater the weight. And because of the subjectivity of AHP in determining the weight of index value, this model uses information entropy to determine the objective weight of each index according to the degree of variation of each index value.
\begin{itemize}
\item Citing the values of the four main indicators (GR, GWA, EIR, R) in the data processing, the indicator matrix (IM):
\begin{equation}
\label{eq6}
IM=(q_{im})_{4\times m}=\left(\begin{array}{cccc}
q_{1,1} & q_{1,2} & \ldots & q_{1,m}\\
q_{2,1} & q_{2,2} & \ldots & q_{2,m}\\
q_{3,1} & q_{3,2} & \ldots & q_{3,m}\\
q_{4,1} & q_{4,2} & \ldots & q_{4,m}\\
\end{array} \right)
\end{equation}
$m$ represents the number of schools, $q_{i,m} \quad (i = 1,2,3,4)$ followed by four indicators (GR, GWA, EIR, R).
\item Give the entropy definition. The four indicators (GR, GWA, EIR, R) in the indicator matrix correspond in turn to their entropy $e_i \quad (i=1,2,3,4)$.
\begin{equation}
\label{eq8}
\left\{
\begin{aligned}
&e_i & = \quad & -k\sum^m_{j=1}Z_{ij}ln(Z_{ij}) \\
&z_{i,j} & = \quad & \frac{q_{ij}}{\sum\limits ^m_{j=1}q_{ij}} \\
&k & = \quad & \frac {1}{ln(m)}
\end{aligned}
\right.
\end{equation}
$k$, $z_{i,j}$ is an intermediate variable. When $z_{i,j}$ will result in logarithmic meaningless, so that $z_{ij}\cdot ln z_{i,j}$.
\item Define entropy weights. According to the definition of entropy, four indicators (GR, GWA, EIR, R) in the indicator matrix correspond to their entropy $\omega_i \quad i = 1,2,3,4$.
\begin{equation}
\label{eq10}
\left\{
\begin{split}
&\omega_i & = \quad & \frac{1-e_i}{4-\sum\limits^4_{i=1}e_i} \\
&\sum^4_{i=1}\omega_i & = \quad & 1 \\
&k & = \quad & \frac {1}{ln(m)}
\end{split}
\right.
\end{equation}
Combined with equation \ref{eq6},\ref{eq8},\ref{eq10}, we can find that the weights of the four indicators (GR, GWA, EIR, R) as follow.
\begin{equation}
\label{eq11}
\omega_1=0.2501 \quad \omega_2=0.2501 \quad \omega_3=0.2496 \quad \omega_4=0.2501
\end{equation}
\end{itemize}
\subsubsection{School income index}
We choose the school's total annual income as the school income index. For three kinds of schools, we choose the following data for School income index:
\begin{itemize}
\item \textbf{Public institutions}: Total operating and noperating revenues
\item \textbf{Private not-for-profit institutions}: Total revenues and investment return
\item \textbf{Private for-profit institutions}: Total revenues and investment return
\end{itemize}
\subsubsection{ROI formula}
According to the definition of $ROI_m$ formula give its specific formula.
\begin{equation}
\label{eq17}
ROI_m = \frac{SP_m}{SI_m} = \frac{0.2501\cdot GR + 0.2501\cdot GWA + 0.2501\cdot EIR +0.2496\cdot R}{Total\quad operating\quad and\quad noperating\quad revenues}
\end{equation}
$SI_m$ Is the $m$th school's income, $SP_m$ is the $m$th school's performance.
\subsection{ROI forecast for the next five years}
\par Based on the ROI data for each of the four previous schools for each of the four years, GM(1, 1) forecasts the ROI of each school in the next five years.
\subsubsection{GM(1,1)}
\par The traditional GM (1,1) model\upcite{gm} is composed of a differential equation containing univariate.
\par Let $X^{(0)}=[x^{(0)}(1), x^{(0)}(2),\cdots ,x^{(0)}(n)]$ , $X^{(1)}=[x^{(1)}(1), x^{(1)}(2),\cdots ,x^{(1)}(n)]$ , $Z^{(1)}=[z^{(1)}(1), z^{(1)}(2),\cdots ,z^{(1)}(n)]$ , where $x^{(1)}(k) = \sum\limits_{i=1}^k x^{(1)}(i)$ and $z^{(1)}(k) = \frac{x^{(0)}(k) + x^{(0)}(k+1)}{2}$ , then $x^{(0)}(k) + a z^{(1)}(k) = b$ is called the GM(1,1) model. The parameter $a$ is called the development coefficient, and the parameters $b$ is called the grey action quantity.
Then, we can get the time responded function of GM(1,1) model.
\begin{equation}
\hat{x}^{(1)}(k+1) = (x^{(0)}(1) - \frac{b}{a})e^{-a k} + \frac{b}{a} \qquad k = 1, 2, \cdots, n
\end{equation}
and the restored function of $x^{(0)}(k + 1)$ can be given by
\begin{equation}
\hat{x}^{(0)}(k+1) = (1 - e^a)(x^{(0)}(1) - \frac{b}{a})e^{-a k} \qquad k = 1, 2, \cdots, n
\end{equation}
\par In which, $\hat{x}^{(1)}(k)$ is the simulative value of $x^{(1)}(k)$, and $\hat{x}^{(0)}(k)$ is simulative value of $x^{(0)}(k)$
\par Without external interference, we know that the population and GDP was exponential growth. But this model will produce a continuously simulative deviation when simulating a homogeneous-exponent sequence. Because the unequal conversion between the discrete difference equation and continuous differential equation. So we learn from C.I.Chen\upcite{}. The Discrete GM(1,1) Model as follows:
\begin{equation}
\hat{x}^{(1)}(k+1) = (x^{(0)}(1) - \frac{b}{a})(\frac{2 - a}{2 + a})^k+ \frac{b}{a} \qquad k = 1, 2, \cdots, n
\end{equation}
\begin{equation}
\hat{x}^{(0)}(k+1) = (1 - \frac{2 + a}{2 - a})(x^{(0)}(1) - \frac{b}{a})(\frac{2 - a}{2 + a})^k \qquad k = 1, 2, \cdots, n
\end{equation}
\par Use least squares to solve $a$ and $b$. Put it into the prediction formula. We can get the predictive value $X^{(1)}$, and the analog value of $X^{(0)}$.\\
\subsection{Definition of Risk}
\par In financial sector, Modern Portfolio Theory was used to measure risk and benefits, drawing the Efficient Frontier of all the risky assets and find the Tangency Portfolio\upcite{PT}. For charitable investment, there also exists risk, which will forbid investment reaching the optimal point though it does not bring any lost for you. Here we define “risk” fo our investment.
\begin{equation}
\label{risk}
Q = \frac{S^2_e}{\mu_e}
\end{equation}
\par In the formula, we use the concept of coefficient of variation. $S^2_e$ indicates the variance of School Performance Index, and $\mu_e$ indicates the means of School Performance Index.
\subsection{Establish a single objective optimization equation\upcite{single}}
\par The primary school for philanthropic investment focuses on the strength of schools and the potential for capital utilization to maximize the return on comprehensive investment. The standard for quantifying the strength of schools and capitalizing their potential is the return on investment (ROI) for each school over the next four years, To establish a single-objective optimization equation that takes the total investment return (TROI) as the target and the investment amount $\omega_m$ as the variable.
\begin{equation}
\label{eq20}
TROI=\sum\limits^{n}_{m=1}ROI_m\cdot \omega_m
\end{equation}
\par $m$ represents the $m$th school, the total number is $n$; $ROI_m$for the first m schools return on investment, those have been GM (1,1) predicted. In order to solve this equation, the following constraints are given as follows:
\begin{enumerate}[(1)]
\item To not exceed the total annual investment amount A, give the investment amount constraint:
\begin{equation}
\label{eq22}
\left\{
\begin{split}
&A & = \quad & 100000000\$ \\
&m\cdot \omega_m & = \quad & A \\
&\omega_m &\leqslant \quad & A
\end{split}
\right.
\end{equation}
\par $A$ is the total investment of 100 million US dollars.
\item As the number of schools invested by charitable organizations can not be infinite, and to maximize the return on investment, the number of selected schools needs to be limited to select the best investment schools. So given the number of school restrictions.
\begin{equation}
\label{eq29}
10\leqslant m \leqslant 50
\end{equation}
\par The final single-objective optimization equation for total ROI is as follows:
\begin{equation}
\label{eq30a}
TROI = \sum\limits^{n}_{m=1}ROI_m\cdot\omega_m
\end{equation}
\begin{equation}
\label{eq30b}
\left\{
\begin{split}
& m \cdot\omega_m & = \quad &A\\
&\omega_m & \leqslant \quad & A\\
&\sum\limits^{n}_{m=1}Q_m\cdot\omega_m &\leqslant \quad &A\cdot Q_{max}\\
&10 &\leqslant\quad & m &\leqslant\quad & 50
\end{split}
\right.
\end{equation}
\par $m$ represents the $m$th school, the total number is $n$; TROI is the comprehensive return on investment, $ROI_m,\omega_m,Q_m,Q_{max}$ is the m school return on investment, $ROI_m,\omega_m,Q_m,Q_{max}$ is the first m of the school's investment amount, A total amount of 100 million, $ROI_m,\omega_m,Q_m,Q_{max}$is To describe the ratio of the volatility of investment risk, $ROI_m,\omega_m,Q_m,Q_{max}$ is the maximum risk factor the organization can bear.
\end{enumerate}
\section{Calculation and Analysis of the Model}
\subsection{Data preprocessing\upcite{data1,data2}}
\begin{enumerate}
\item Eliminate schools with data loss rates above 50\%, given the missing information on the various indicators given for each school.
\item Exclude schools under the supervision of the Ministry of Education, cannot be awarded a bachelor's degree.
\item Using a regression equation was used to fill in the missing values with the missing values
\item Since each index unit is not uniform, its data are normalized so as to obtain complete and analyzable data with a total of 572 schools' data.
\end{enumerate}
\subsection{Basic ROI Calculation}
This gives a return on investment of 572 schools. Specific results are as following table \ref{tab:Past 4 Years ROI Calculation Results} :
\begin{table}[h]
\centering
\caption{Past 4 Years ROI Calculation Results}
\label{tab:Past 4 Years ROI Calculation Results}
\begin{tabular}{cccccc}
\toprule
unit ID & Institution Name & 2013 & 2014 & 2015 & 2016 \\
\midrule
448840 &University of South Florida-St Petersburg &35.81 &43.71 &41.11 &45.28\\
433660 &Florida Gulf Coast University & 14.85 & 16.34 & 16.52 & 14.80\\
392840 &Watkins College of Art Design Film & 240.20 & 371.51 & 332.42 & 478.11\\
366711 &California State University-San Marcos & 19.32 & 19.76 & 20.99 & 16.11\\
243780 &Purdue University-Main Campus & 2.12 & 2.06 &2.37 & 2.41\\
240727 &University of Wyoming & 5.73 & 6.13 & 7.39 & 6.07\\
240480 &University of Wisconsin-Stevens Point &17.64 &18.40 &22.74 &20.94\\
240471 &University of Wisconsin-River Falls & 29.98 & 32.10 & 40.12 & 33.90\\
240462 &University of Wisconsin-Platteville & 24.44 & 24.87 & 25.55 & 22.40\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\newpage
\par The ROI values in the above table \ref{tab:Past 4 Years ROI Calculation Results} are accurate to two decimal places, sorted from high to low, giving the ROI of some schools. According to the ROI of each school, the potential and the investment value of the school can be judged.
\par The estimated ROI of 572 schools for the next five years is shown in the following table \ref{tab:Next 5 Years ROI Predict Results}.
\begin{table}[h]
\centering
\caption{Next 5 Years ROI Predict Results}
\label{tab:Next 5 Years ROI Predict Results}
\begin{tabular}{cccccc}
\toprule
Institution Name & 2017 & 2018 & 2019 & 2020 & 2021\\
\midrule
University of South Florida-St Petersburg &45.00 &45.85 &46.71 &47.58 &48.48\\
Florida Gulf Coast University &14.43 &13.76 &13.12 &12.51 &11.93\\
Watkins College of Art Design \& Film &522.31 &604.57 &699.78 &809.99 &937.56\\
California State University-San Marcos &15.74 &14.37 &13.12 &11.98 &10.93\\
Purdue University-Main Campus &2.64 &2.85 &3.07 &3.30 &3.56\\
University of Wyoming &6.48 &6.46 &6.43 &6.41 &6.38\\
University of Wisconsin-Stevens Point &23.22 &24.62 &26.10 &27.66 &29.33\\
University of Wisconsin-River Falls &37.09 &37.98 &38.89 &39.83 &40.78\\
University of Wisconsin-Platteville &21.96 &20.90 &19.89 &18.93 &18.02\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\par Due to the large number of schools, this table has only selected some of the schools to display. From the above table, it can be initially seen that the ROI of some schools shows an increase while some of them have Therefore, when re-investing, we not only need to use the annual ROI as a reference standard, but also pay attention to the volatility of the ROI to reduce the investment risk.
\subsection{Solve Single-target Mixed Integer Linear Programming}
\subsubsection{Solve Best Investment Strategy for First Year}
\par Now equation \ref{eq30a}, \ref{eq30b} for the single objective optimization equation, we solve the Fmincom function to Predict the school to be invested for first year, the amount of investment, and the maximum ROI. We use the median of all school risks as the risk threshold which is $Q_{max} = 0.00064011$. The result are as following table \ref{tab:First Best Investment Advice Form} (In order to show more data in this table, we expend the risk $Q$ for 1000 times).
\begin{table}[h]
\centering
\caption{First Best Investment Advice Form}
\label{tab:First Best Investment Advice Form}
\begin{tabular}{cccccc}
\toprule
unit ID & Institution Name & ROI & Risk & Investment\\
\midrule
176789 & Calvary Bible College and Theological Seminary &1061.75& 3.18 &806756\\
137953& Trinity Baptist College &644.85 &0.39 &2683937\\
392840& Watkins College of Art Design \& Film &451.24& 7.47 &0\\
224244& Dallas Christian College &412.83 &3.38 &3113685\\
169327& Cleary University &408.07 &0.11 &3645894\\
102669& Alaska Pacific University &306.54 &11.76 &3231178\\
204176& Mount Carmel College of Nursing & 289.44 &8.25 &2521678\\
219198& Mount Marty College &277.79 &1.68 &2409603\\
117104& Life Pacific College& 266.21& 14.23& 2340678\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\par In the above table \ref{tab:First Best Investment Advice Form}, the ROI is accurate to two decimal places. Risk is the investment risk coefficient, and Investment is the investment amount. From the investment risk coefficient, most investment schools have a low investment risk coefficient and a few investment risk factors are high Schools, because of their high rate of return on investment, so there are also involved in investment. Therefore, investment risk and return on investment are the two major criteria for investment.
\par The proportion of investment in each school is now shown in figure \ref{50 Schools Funded by the Amount Allocate}.
\begin{figure}[!h]
\small
\centering
\includegraphics[width=14cm]{year1_part.png}
\caption{50 Schools Funded by the Amount Allocate} \label{50 Schools Funded by the Amount Allocate}
\end{figure}
\par For the picture above, each one represents the investment amount of each school, it has 50 schools and a total investment of 100 million US dollars. As can be seen from the figure, the distribution of funds is relatively average. Combining the investment objectives of charitable organizations, we can see that in order to improve the education level in more schools, we need to allocate resources more evenly so that the results are consistent with their purpose.
\subsubsection{The Optimal Investment Strategy for the Next Four Years}
\par Assuming that the funds invested by the institution will not change each year, the same method can be used to predict the annual investment plan for next four years. Therefore, the investment plan for the next five years is given as following tables \ref{tab:Second Year's Best Investment Advice Form},\ref{tab:Third Year's Best Investment Advice Form},\ref{tab:Fourth Year's Best Investment Advice Form},\ref{tab:Fifth Year's Best Investment Advice Form} (In order to show more data in this table, we expend the risk $Q$ for 1000 times).
\begin{table}[h]
\centering
\caption{Second Year's Best Investment Advice Form}
\label{tab:Second Year's Best Investment Advice Form}
\begin{tabular}{cccccc}
\toprule
unit ID & Institution Name & ROI & Risk & Investment\\
\midrule
176789& Calvary Bible College and Theological Seminary& 1747.04 &3.18 &806756\\
137953& Trinity Baptist College &556.30 &0.39 &2683937\\
392840& Watkins College of Art Design \& Film& 522.31& 7.47& 0\\
224244& Dallas Christian College & 433.89 &3.38& 3113685\\
169327& Cleary University &429.16 &0.11 &3645894\\
204176& Mount Carmel College of Nursing &340.02& 8.25& 3231178\\
102669& Alaska Pacific University& 333.30& 11.76& 2521678\\
219198& Mount Marty College &322.17& 1.68 &2409603\\
106713& Central Baptist College& 310.48& 2.23& 2340678\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h]
\centering
\caption{Third Year's Best Investment Advice Form}
\label{tab:Third Year's Best Investment Advice Form}
\begin{tabular}{cccccc}
\toprule
unit ID & Institution Name & ROI & Risk & Investment\\
\midrule
176789& Calvary Bible College and Theological Seminary& 2874.66& 3.18& 806756\\
137953& Trinity Baptist College& 479.91& 0.39& 0\\
392840& Watkins College of Art Design \& Film& 604.57& 7.47& 2683937\\
224244& Dallas Christian College& 456.03& 3.38& 3113685\\
169327& Cleary University& 451.35& 0.11& 3645894\\
204176& Mount Carmel College of Nursing & 399.43 & 8.25& 3231178\\
102669& Alaska Pacific University& 362.41& 11.76& 2340678\\
219198& Mount Marty College& 373.64& 1.68& 2409603\\
106713& Central Baptist College& 389.80& 2.23& 2521678\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h]
\centering
\caption{Fourth Year's Best Investment Advice Form}
\label{tab:Fourth Year's Best Investment Advice Form}
\begin{tabular}{cccccc}
\toprule
unit ID & Institution Name & ROI & Risk & Investment\\
\midrule
176789& Calvary Bible College and Theological Seminary& 4730.08& 3.18& 806756\\
392840& Watkins College of Art Design \& Film& 699.78& 7.47& 2683937\\
106713& Central Baptist College& 489.37& 2.23& 0\\
224244& Dallas Christian College& 479.29& 3.38& 3113685\\
169327& Cleary University& 474.68& 0.11& 3645894\\
204176& Mount Carmel College of Nursing& 469.22& 8.25& 3231178\\
219198& Mount Marty College& 433.34& 1.68& 2521678\\
115728& Holy Names University& 421.05& 23.00& 2409603\\
137953& Trinity Baptist College& 414.00& 0.39& 2340678\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\begin{table}[h]
\centering
\caption{Fifth Year's Best Investment Advice Form}
\label{tab:Fifth Year's Best Investment Advice Form}
\begin{tabular}{cccccc}
\toprule
unit ID & Institution Name & ROI & Risk & Investment\\
\midrule
176789& Calvary Bible College and Theological Seminary& 7783.07& 3.18& 806756\\
392840& Watkins College of Art Design \& Film& 809.99& 7.47& 2683937\\
106713& Central Baptist College& 614.39& 2.23& 0\\
115728& Holy Names University& 563.02& 23.00& 3113685\\
204176& Mount Carmel College of Nursing& 551.20& 8.25& 3645894\\
200156& University of Jamestown& 539.63& 17.60& 3231178\\
224244& Dallas Christian College& 503.74& 3.38& 2521678\\
219198& Mount Marty College& 502.58& 1.68& 2409603\\
169327& Cleary University& 499.21& 0.11& 2340678\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\begin{figure}[htbp]
\begin{minipage}[t]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth]{ROI_9_1.png} \\
X-axis = Years\quad Y-axis = ROI
\caption{Calvary Bible College and Theological Seminary 's ROI In 9 Years}
\label{Calvary Bible College and Theological Seminary 's ROI In 9 Years}
\end{minipage}
\hspace{1ex}
\begin{minipage}[t]{0.5\textwidth}
\centering
\includegraphics[width=\linewidth]{ROI_9_2.png}\\
X-axis = Years\quad Y-axis = ROI
\caption{Cleary University 's ROI In 9 Years} \label{Cleary University 's ROI In 9 Years}
\end{minipage}
\end{figure}
\newpage
\par The table shows the annual investment plan for next for years. From the table, we can see that most of the schools invested each year have not changed and the risk coefficient does not fluctuate much. Therefore, we can see that Schools are constantly upgrading their education level in the continuous investment process . For specific analysis, we can choose Calvary Bible College and Theological Seminary, Cleary University for analysis.
\par As can be seen from the figure \ref{Calvary Bible College and Theological Seminary 's ROI In 9 Years}, \ref{Cleary University 's ROI In 9 Years}, after the investment, the return on investment of College and Theological Seminary and Cleary University continues to increase. Although the rate of return on investment at Cleary University School is rising slowly, it is still increasing. By analyzing other schools, we can also find that under the investment, the ROI of most schools is constantly improving. Therefore, we can see that this investment strategy has the ability to improve school education.
\par However, not all schools have improved levels of education, as can be seen in the table \ref{tab:Some Schools Five-year Subsidy} below.
\begin{table}[h]
\centering
\caption{Some Schools Five-year Subsidy}
\label{tab:Some Schools Five-year Subsidy}
\begin{tabular}{cccccc}
\toprule
INSTNM & Year 1 & Year 2 & Year 3 & Year 4 & Year 5\\
\midrule
Trinity Baptist College & 2683937 & 2683937 & 0 & 2340678 & 2474686\\
Watkins College of Art Design \& Film & 0 & 0 & 2683937 & 2683937 & 2683937\\
Dallas Christian College & 3113684 & 3113684 & 3113684 & 3113684 & 2521677\\
Cleary University & 3645894 & 3645894 & 3645894 & 3645894 & 2340678\\
Alaska Pacific University & 3231177 & 2521677 & 2340678 & 3741025 & 3741025\\
Mount Carmel College of Nursing & 2521677 & 3231177 & 3231177 & 3231177 & 3645894\\
Mount Marty College & 2409603 & 2409603 & 2409603 & 2521677 & 2409603\\
Life Pacific College & 2340678 & 2643213 & 2936629 & 0 & 0\\
Saint Mary-of-the-Woods College & 3299144 & 3299144 & 0 & 18348 & 2917307\\
Central Baptist College & 3741025 & 2340678 & 2521677 & 0 & 0\\
Southwestern Christian University & 0 & 3217725 & 2077921 & 0 & 0\\
Marygrove College & 2474686 & 3741025 & 2474686 & 2917307 & 2643213\\
\bottomrule
\end{tabular}
\end{table}
\newpage
This table shows only some of the school's investment amount, the specific form in Appendix 1. As can be seen from the above table, schools such as Trinity Baptist College and Life Pacific College may not receive any investment after several years of investment. This may be due to an increase in the school's investment risk or no obvious improvement in education levels, resulting in Not in line with charities to maximize return on investment. Schools like Watkins College of Art Design \& Film and Southwestern Christian University, for example, They did not receive any investment from the start, but later received an investment indicating. This shows that each year this model can be adjusted according to the school's overall strength and return on investment and risk changes to give the best investment strategy.
\section{Strengths and Weaknesses}
\subsection{Strengths}
\begin{itemize}
\item Make full use of all the data given, we use linear trend method to fill in the missing value, to ensure the accuracy of the data.
\item After reading the information, we have customized the return on investment, investment risk coefficient, etc., with a high degree of innovation.
\end{itemize}
\subsection{Weaknesses}
\begin{itemize}
\item The direct deletion of schools with data loss rates up to 50\% may delete certain potential schools and result in some bias.
\item Our definition of ROI does not reflect well the impact of funding itself.
\end{itemize}
\section{A letter to the Goodgrant Foundation}
Dear Sir/Madam:
\par We are honored to help you choose the best donation target. Here we will give the definition of Return on investment(ROI), explain the investment strategy and analysis method that make the ROI highest, and finally give the ROI ranking of schools from high ROI to low ROI in accordance with the best investment strategy.
\par As we know, Goodgrant Foundation's goal is to improve the education level of American colleges. Therefore, the return on investment can be measured by the level of education level in universities and the level of colleges can be measured by the performance of a school. So, in order to get the maximum return on comprehensive investment , we will give a definition of the performance of a school firstly.
\par Since the performance of a school reflects the school's level of education, the rate of return on investment is now defined as the ratio of school performance to school income, so we need to find indicators that reflect school performance. As for student, they pay for college and aim to enhance their ability to work. Therefore, we choose the ability of graduates to work as one of the main indicators. From a social perspective, if it is difficult for students to graduate from a school, the quality of its education level may be a poor. So the graduation rate and retention rate of students are selected as two of the main indicators. From the aspect of education, if the quality of education is improved, the teachers and students' ability to work in the school can be improved. Therefore, the rate of education improvement is selected as one of the main indicators. Thus, the four major indicators closely related to school performance are determined: graduation rate of students, education promotion rate, working ability of graduates and retention rate of students. When we read the existing indicator data that reflects school performance , we find most of the indicator data are repetitive or meaningless. Therefore, by analyzing the correlation degree between the indicators through the correlation analysis method, we combine the indicators with high correlation and find indicators which can reflect the four main indicators above. We use the entropy method to determine the weight of each indicator and determine the investment of each school response rate according to the product of each indicator and the weight of it as .
\par We choose the ratio between the investment and the schools’ performance as the return on investment. Because not every school can effectively use the funds to improve self-education level, it is necessary for you to choose before you invest. Because the data of some schools is incomplete, some schools are under the control of the Ministry of Education and some schools do not grant undergraduate degree, the missing data of whom within 50\% data loss rate are filled in with the linear trend method and the others are removed. According to the return on investment of nearly four years in each school, we use the GM (1,1) method to predict the return on investment over the next five years. Every investment is risky, so in order to avoid the investment risk, we use the variance and mean of return on investment Ratio as the risk coefficient of investment to analyze its four-year investment risk. We assume the maximum acceptable risk coefficient is the median of the investment risk coefficient, choose the risk coefficient less than the maximum risk as a constraint, the most comprehensive return on investment as a goal and solve the single objective optimization equation with the investment amount as the variable in order to find the optimal investment strategy.
\par To sum up, we predict the optimal annual investment strategy for 5 years as following table \ref{tab:Some Schools Five-year Subsidy}:
\begin{table}[h]
\centering
\caption{Some Schools Five-year Subsidy}
\label{tab:Some Schools Five-year Subsidy}
\begin{tabular}{cccccc}
\toprule
INSTNM & Year 1 & Year 2 & Year 3 & Year 4 & Year 5\\
\midrule
Trinity Baptist College & 2683937 & 2683937 & 0 & 2340678 & 2474686\\
Watkins College of Art Design \& Film & 0 & 0 & 2683937 & 2683937 & 2683937\\
Dallas Christian College & 3113684 & 3113684 & 3113684 & 3113684 & 2521677\\
Cleary University & 3645894 & 3645894 & 3645894 & 3645894 & 2340678\\
Alaska Pacific University & 3231177 & 2521677 & 2340678 & 3741025 & 3741025\\
Mount Carmel College of Nursing & 2521677 & 3231177 & 3231177 & 3231177 & 3645894\\
Mount Marty College & 2409603 & 2409603 & 2409603 & 2521677 & 2409603\\
Life Pacific College & 2340678 & 2643213 & 2936629 & 0 & 0\\
Saint Mary-of-the-Woods College & 3299144 & 3299144 & 0 & 18348 & 2917307\\
Central Baptist College & 3741025 & 2340678 & 2521677 & 0 & 0\\
Southwestern Christian University & 0 & 3217725 & 2077921 & 0 & 0\\
Marygrove College & 2474686 & 3741025 & 2474686 & 2917307 & 2643213\\
$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$&$\cdots$\\
\bottomrule
\end{tabular}
\end{table}
\newpage
\begin{thebibliography}{99}
\bibitem{gm} Tan G J. The Structure Method and Application of Background Value in Grey System GM(1,1) Model (Ⅰ)[J]. SYSTEMS ENGINEERING-THEORY \& PRACTICE, 2000, 20(5):125-127.
\bibitem{single} Richards A, Schouwenaars T, How J P, et al. Spacecraft Trajectory Planning with Avoidance Constraints Using Mixed-Integer Linear Programming[J]. Journal of Guidance Control \& Dynamics, 2002, 25(4):755-764.
\bibitem{corr} Hardoon D R, Szedmak S, Shawe-Taylor J. Canonical Correlation Analysis: An Overview with Application to Learning Methods[J]. Neural Computation, 2014, 16(12):2639-2664.
\bibitem{PT} In Princeton Companion to Applied Mathematics Princeton University Press. Portfolio Theory[J]. Princeton Companion to Applied Mathematics Princeton University Press.
\bibitem{data1} \url{www.nces.ed.gov/ipeds}
\bibitem{data2} \url{https://collegescorecard.ed.gov}
\end{thebibliography}
\begin{appendices}
\par 50 schools in the next 5 years the amount of funding calculation results(table \ref{tab:50 Schools Five-year Subsidy}).
\begin{table}[h]
\centering
\caption{50 Schools Five-year Subsidy}
\label{tab:50 Schools Five-year Subsidy}
\begin{tabular}{cccccc}
\toprule
INSTNM & Year 1 & Year 2 & Year 3 & Year 4 & Year 5\\
\midrule
Calvary Bible College & 806756 & 806756 & 806756 & 806756 & 806756\\
Trinity Baptist College & 2683937 & 2683937 & 0 & 2340678 & 2474686\\
Watkins College of Art Design & Film \& 0 & 0 & 2683937 & 2683937 & 2683937\\
Dallas Christian College & 3113684 & 3113684 & 3113684 & 3113684 & 2521677\\
Cleary University & 3645894 & 3645894 & 3645894 & 3645894 & 2340678\\
Alaska Pacific University & 3231177 & 2521677 & 2340678 & 3741025 & 3741025\\
Mount Carmel College of Nursing & 2521677 & 3231177 & 3231177 & 3231177 & 3645894\\
Mount Marty College & 2409603 & 2409603 & 2409603 & 2521677 & 2409603\\
Life Pacific College & 2340678 & 2643213 & 2936629 & 0 & 0\\
Saint Mary-of-the-Woods College & 3299144 & 3299144 & 0 & 18348 & 2917307\\
Central Baptist College & 3741025 & 2340678 & 2521677 & 0 & 0\\
Southwestern Christian University & 0 & 3217725 & 2077921 & 0 & 0\\
Marygrove College & 2474686 & 3741025 & 2474686 & 2917307 & 2643213\\
Cincinnati Christian University & 18348 & 2474686 & 1761951 & 2839048 & 3217725\\
Rochester College & 2917307 & 1613080 & 392573 & 2077921 & 416098\\
University of Maine at Fort Kent & 2643213 & 1761951 & 2643213 & 1761951 & 1761951\\
Warner University & 1761951 & 2157836 & 3217725 & 1443964 & 0\\
Oakland City University & 2937459 & 1443964 & 0 & 0 & 826702\\
Blackburn College & 2839048 & 2839048 & 2937459 & 2937459 & 2937459\\
Holy Names University & 2157836 & 0 & 3299144 & 2409603 & 3113684\\
Friends University & 3217725 & 2917307 & 18348 & 0 & 3299144\\
Judson University & 0 & 3507028 & 1613080 & 826702 & 0\\
University of Sioux Falls & 1443964 & 0 & 2157836 & 2157836 & 2839048\\
Erskine College & 1613080 & 0 & 3507028 & 0 & 3314292\\
University of Jamestown & 0 & 18348 & 3741025 & 3299144 & 3231177\\
Coker College & 3507028 & 2937459 & 2917307 & 2474686 & 0\\
MidAmerica Nazarene University & 0 & 0 & 3314292 & 0 & 2172278\\
Olivet College & 392573 & 826702 & 0 & 3507028 & 3507028\\
Roberts Wesleyan College & 826702 & 392573 & 1443964 & 3217725 & 1443964\\
Truett-McConnell College & 2936629 & 2936629 & 0 & 392573 & 2936629\\
Lancaster Bible College & 0 & 0 & 0 & 2172278 & 0\\
Valley City State University & 1756329 & 0 & 2172278 & 2937359 & 0\\
Toccoa Falls College & 0 & 3314292 & 3102160 & 3250179 & 1621848\\
Union College & 0 & 217316 & 0 & 0 & 0\\
Bluefield State College & 2780508 & 0 & 2839048 & 2643213 & 18348\\
Malone University & 3314292 & 1756329 & 826702 & 2936629 & 392573\\
Bethel College-Indiana & 3553563 & 0 & 0 & 0 & 0\\
Spalding University & 0 & 2780508 & 2780508 & 0 & 2780508\\
Northwest University & 2077921 & 3102160 & 217316 & 217316 & 217316\\
Ohio Dominican University & 2689672 & 3553563 & 0 & 0 & 0\\
Wilmington College & 3102160 & 2172278 & 3250179 & 3516047 & 3552950\\
Missouri Baptist University & 217316 & 416098 & 0 & 0 & 0\\
Siena Heights University & 2172278 & 3560335 & 0 & 0 & 0\\
Concordia University-Texas & 3250179 & 3552950 & 0 & 0 & 0\\
Paine College & 3516047 & 2077921 & 1756329 & 1613080 & 0\\
Holy Family University & 3552950 & 3250179 & 2689672 & 2780508 & 3553563\\
Concordia University-Portland & 3560335 & 2689672 & 0 & 0 & 2157836\\
Lakeland College & 2937359 & 2937359 & 1621848 & 416098 & 3560335\\
Howard Payne University & 416098 & 0 & 0 & 0 & 0\\
Geneva College & 1621848 & 1621848 & 2937359 & 3552950 & 3102160\\
\bottomrule
\end{tabular}
\end{table}
\end{appendices}
\end{document}
|
Formal statement is: lemma small_subsetI [intro]: "f \<in> L F (g) \<Longrightarrow> l F (f) \<subseteq> l F (g)" Informal statement is: If $f$ is a small subset of $g$, then $l(f)$ is a subset of $l(g)$. |
{-# OPTIONS --cubical --safe --postfix-projections #-}
module Data.Rational.Unnormalised where
open import Prelude
open import Data.Integer using (ℤ; ⁺)
import Data.Integer as ℤ
import Data.Nat as ℕ
open import Data.Nat.DivMod using (nonZero)
infixl 7 _/_ _/suc_
record ℚ : Type where
constructor _/suc_
field
num : ℤ
den-pred : ℕ
den : ℕ
den = suc den-pred
open ℚ public
_/_ : (n : ℤ) → (d : ℕ) → ⦃ d≢0 : T (nonZero d) ⦄ → ℚ
n / suc d = n /suc d
{-# DISPLAY _/suc_ n d = n / suc d #-}
infixl 6 _+_
_+_ : ℚ → ℚ → ℚ
(x + y) .num = num x ℤ.* ⁺ (den y) ℤ.+ num y ℤ.* ⁺ (den x)
(x + y) .den-pred = x .den-pred ℕ.+ y .den-pred ℕ.+ x .den-pred ℕ.* y .den-pred
infixl 7 _*_
_*_ : ℚ → ℚ → ℚ
(x * y) .num = x .num ℤ.* y .num
(x * y) .den-pred = x .den-pred ℕ.+ y .den-pred ℕ.+ x .den-pred ℕ.* y .den-pred
|
(* Author: Dmitriy Traytel *)
header {* Normalization of WS1S Formulas *}
(*<*)
theory WS1S_Normalization
imports WS1S
begin
(*>*)
fun nNot where
"nNot (FNot \<phi>) = \<phi>"
| "nNot (FAnd \<phi>1 \<phi>2) = FOr (nNot \<phi>1) (nNot \<phi>2)"
| "nNot (FOr \<phi>1 \<phi>2) = FAnd (nNot \<phi>1) (nNot \<phi>2)"
| "nNot \<phi> = FNot \<phi>"
primrec norm where
"norm (FQ a m) = FQ a m"
| "norm (FLess m n) = FLess m n"
| "norm (FIn m M) = FIn m M"
| "norm (FOr \<phi> \<psi>) = FOr (norm \<phi>) (norm \<psi>)"
| "norm (FAnd \<phi> \<psi>) = FAnd (norm \<phi>) (norm \<psi>)"
| "norm (FNot \<phi>) = nNot (norm \<phi>)"
| "norm (FExists \<phi>) = FExists (norm \<phi>)"
| "norm (FEXISTS \<phi>) = FEXISTS (norm \<phi>)"
context formula
begin
lemma satisfies_nNot[simp]: "(w, I) \<Turnstile> nNot \<phi> \<longleftrightarrow> (w, I) \<Turnstile> FNot \<phi>"
by (induct \<phi> rule: nNot.induct) auto
lemma FOV_nNot[simp]: "FOV (nNot \<phi>) = FOV (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma SOV_nNot[simp]: "SOV (nNot \<phi>) = SOV (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma pre_wf_formula_nNot[simp]: "pre_wf_formula n (nNot \<phi>) = pre_wf_formula n (FNot \<phi>)"
by (induct \<phi> rule: nNot.induct) auto
lemma FOV_norm[simp]: "FOV (norm \<phi>) = FOV \<phi>"
by (induct \<phi>) auto
lemma SOV_norm[simp]: "SOV (norm \<phi>) = SOV \<phi>"
by (induct \<phi>) auto
lemma pre_wf_formula_norm[simp]: "pre_wf_formula n (norm \<phi>) = pre_wf_formula n \<phi>"
by (induct \<phi> arbitrary: n) auto
lemma satisfies_norm[simp]: "wI \<Turnstile> norm \<phi> \<longleftrightarrow> wI \<Turnstile> \<phi>"
by (induct \<phi> arbitrary: wI) auto
lemma lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S_norm[simp]: "lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S n (norm \<phi>) = lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S n \<phi>"
unfolding lang\<^sub>W\<^sub>S\<^sub>1\<^sub>S_def by auto
end
(*<*)
end
(*>*)
|
**Imports:**
```python
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
import ipywidgets as widgets
```
$$
\begin{align}
v(r,m,\Delta) &= \max_{c_{1},c_{2}\in\mathbb{R}_{++}^{2}}\ln(c_{1})+\beta\ln(c_{2}) \\
& \text{u.b.b.} \\
c_{1}+\frac{c_{2}}{1+r}&\leq\Delta m+\frac{(1-\Delta)m}{1+r} \\
\end{align}
$$
# Functions
```python
# solve for consumption
def solve(beta,m,r,Delta_vec):
w = Delta_vec*m + (1-Delta_vec)*m/(1+r)
fac = beta*(1+r)
c1 = w/(1+fac/(1+r))
c2 = fac*c1
return c1,c2
# evaluate utility
u_func = lambda c1,c2,beta: np.log(c1) + np.log(c2)
```
```python
beta = 1.0
m = 2.0
ro = 0.0
rl = 1.0
```
```python
Ndelta = 1000
Delta_vec = np.linspace(1e-8,1.0-1e-8,Ndelta)
```
# Plot
```python
fig = plt.figure(figsize=(6,6/1.5),dpi=100)
ax = fig.add_subplot(1,1,1)
# saver
c1,c2 = solve(beta,m,ro,Delta_vec)
I = c1 <= Delta_vec*m
uo = u_func(c1[I],c2[I],beta)
ax.plot(Delta_vec[I],uo,label='utility for saver')
# hand-to-moth
c1 = Delta_vec*m
c2 = (1-Delta_vec)*m
umid = u_func(c1,c2,beta)
ax.plot(Delta_vec,umid,label='utility for hand-to-mouth')
# borrower
c1,c2 = solve(beta,m,rl,Delta_vec)
I = c1 > Delta_vec*m
ul = u_func(c1[I],c2[I],beta)
ax.plot(Delta_vec[I],ul,label='utility for borrow')
# lines
ax.axvline(1/3,color='black',ls='--',label='$\Delta = 1/3$')
ax.axvline(1/2,color='black',ls=':',label='$\Delta = 1/2$')
ax.axhline(-np.log(2),color='black',ls='-.',label='utility = $-\ln2$')
# legend and details
lgd = ax.legend(frameon=True,ncol=1,bbox_to_anchor=(1.05, 1), loc='upper left',)
ax.set_xlim([0.0,1.0])
ax.set_ylim([-5.0,1.0])
ax.set_xlabel('$\Delta$')
ax.set_ylabel('utility');
```
```python
#fig.savefig(f'Lec4_Consumption.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
```
|
{-# OPTIONS --erased-cubical --safe #-}
module Interval where
open import Cubical.Core.Everything using (_≡_; Level; Type; Σ; _,_; fst; snd; _≃_; ~_)
open import Cubical.Foundations.Prelude using (refl; sym; _∙_; cong; transport; subst; funExt; transp; I; i0; i1)
--open import Cubical.Foundations.Function using (_∘_)
open import Cubical.Foundations.Univalence using (ua)
open import Cubical.Foundations.Isomorphism using (iso; Iso; isoToPath; section; retract; isoToEquiv)
open import Data.Bool using (Bool; true; false; _∨_; _∧_; not; if_then_else_)
open import Data.Integer using (ℤ; +_; -[1+_]; _-_; ∣_∣; -_)
open import Data.Integer.DivMod using (_modℕ_)
open import Data.Fin using (Fin; toℕ; #_)
open import Data.List using (List; []; _∷_; foldl; map; reverse; length; _++_; take; drop)
open import Data.Nat using (ℕ; zero; suc; _⊓_; _∸_) renaming (_≡ᵇ_ to _==_)
open import Data.Nat.DivMod using (_mod_)
open import Data.Sign using (Sign)
open import Data.String using (String; intersperse) renaming (_++_ to _++s_)
open import Data.Product using (_×_; _,_; Σ; proj₁; proj₂)
open import Data.Vec using (Vec; []; _∷_; lookup; replicate; _[_]%=_; toList; updateAt) renaming (map to vmap)
open import Pitch
open import Util using (allPairs; ◯pairs; firstPairs; _∘_)
-- Maximum number of interval classes (0 to 6).
ic7 : ℕ
ic7 = 7
PitchPair : Type
PitchPair = Pitch × Pitch
PCPair : Type
PCPair = PC × PC
-- Unordered pitch interval
-- Absolute distance in semitones between two pitches.
Upi : Type
Upi = ℕ
-- Ordered pitch interval
-- Relative distance in semitones between two pitches.
Opi : Type
Opi = ℤ
-- Interval Class
-- Also known as unodered pitch-class interval (upci).
IC : Type
IC = Fin ic7
-- (Ordered) pitch-class interval (also abbreviated opci)
PCI : Type
PCI = Fin s12
intervalWithinOctave : Upi → Upi
intervalWithinOctave i = toℕ (i mod s12)
absoluteInterval : Opi → Upi
absoluteInterval i = ∣ i ∣
makeSigned : Sign → Upi → Opi
makeSigned Sign.- zero = + 0
makeSigned Sign.- (suc i) = -[1+ i ]
makeSigned Sign.+ i = + i
-- Names for intervals
per1 = 0
min2 = 1
maj2 = 2
min3 = 3
maj3 = 4
per4 = 5
aug4 = 6
per5 = 7
min6 = 8
maj6 = 9
min7 = 10
maj7 = 11
per8 = 12
min9 = 13
maj9 = 14
min10 = 15
maj10 = 16
per11 = 17
aug11 = 18
per12 = 19
isConsonant : Upi → Bool
isConsonant iv =
(i == per1) ∨
(i == min3) ∨
(i == maj3) ∨
(i == per5) ∨
(i == min6) ∨
(i == maj6) ∨
(i == per8)
where i = intervalWithinOctave iv
isDissonant : Upi → Bool
isDissonant = not ∘ isConsonant
isPerfect : Upi → Bool
isPerfect iv =
(i == per1) ∨
(i == per4) ∨
(i == per5) ∨
(i == per8)
where i = intervalWithinOctave iv
isUnison : Upi → Bool
isUnison i = i == per1
isThird : Upi → Bool
isThird i = (i == min3) ∨ (i == maj3)
-- Half or whole step.
isStep : Upi → Bool
isStep i =
(i == min2) ∨
(i == maj2)
PitchInterval : Type
PitchInterval = Pitch × Upi
pitchIntervalToPitchPair : PitchInterval → PitchPair
pitchIntervalToPitchPair (p , n) = (p , transposePitch (+ n) p)
secondPitch : PitchInterval → Pitch
secondPitch = proj₂ ∘ pitchIntervalToPitchPair
pitchPairToOpi : PitchPair → Opi
pitchPairToOpi (p , q) = (+ q) - (+ p)
toIC : PCPair → IC
toIC (p , q) =
let x = toℕ (∣ (+ (toℕ q)) - (+ (toℕ p)) ∣ mod s12)
in x ⊓ (s12 ∸ x) mod ic7
toPCI : PCPair → PCI
toPCI (p , q) =
(((+ (toℕ q)) - (+ (toℕ p))) modℕ s12) mod s12
-- Assumes p ≤ q
toPitchInterval : PitchPair → PitchInterval
toPitchInterval pq = proj₁ pq , absoluteInterval (pitchPairToOpi pq)
-- DEPRECATED? Note that the first and last pitches are compared in normal order, not circular order.
◯pcIntervals : List PC → List PCI
◯pcIntervals = map toPCI ∘ ◯pairs
-- Note that the first and last pitches are compared in normal order, not circular order.
pcIntervals : List PC → List PCI
pcIntervals = map toPCI ∘ reverse ∘ firstPairs
stepUp : Pitch → Pitch → Bool
stepUp p q with pitchPairToOpi (p , q)
... | +_ n = isStep n
... | -[1+_] n = false
stepDown : Pitch → Pitch → Bool
stepDown p q with pitchPairToOpi (p , q)
... | +_ n = false
... | -[1+_] n = isStep n
-- Check if q is a passing tone between p and r
-- The interval between end points need to be a 3rd
isPassingTone : Pitch → Pitch → Pitch → Bool
isPassingTone p q r =
(stepUp p q ∧ stepUp q r) ∨ (stepDown p q ∧ stepDown q r) ∨
(isThird (absoluteInterval (pitchPairToOpi (p , r))))
moveUp : Pitch → Pitch → Bool
moveUp p q with pitchPairToOpi (p , q)
... | +_ _ = true
... | -[1+_] _ = false
moveDown : Pitch → Pitch → Bool
moveDown p q = not (moveUp p q)
-- Check if q is left by step in the opposite direction from its approach
isOppositeStep : Pitch → Pitch → Pitch → Bool
isOppositeStep p q r = (moveUp p q ∧ stepDown q r) ∨ (moveDown p q ∧ stepUp q r)
transposePitchInterval : Opi → Pitch → Pitch
transposePitchInterval z p = transposePitch z p
-- transpose pitch class by pci
Tpci : PCI → PC → PC
Tpci n = Tp (toℕ n)
----------
-- Interval Class Vector
ICV : Type
ICV = Vec ℕ ic7
emptyICV : ICV
emptyICV = 0 ∷ 0 ∷ 0 ∷ 0 ∷ 0 ∷ 0 ∷ 0 ∷ []
icVector : List PC → ICV
icVector pcs =
foldl
(λ icv pc → updateAt (toIC pc) suc icv)
(updateAt (# 0) (λ _ → length pcs) emptyICV)
(allPairs pcs)
----------
--Construct matrix out of PC row
matrix : List PC → List (List PC)
matrix [] = []
matrix pcs@(pc ∷ _) =
let r0 = map ((Tpci ∘ Ip) pc) pcs -- start first row at 0
in map (λ p → map ((Tpci ∘ Ip) p) r0) r0
showMatrix : List (List PC) → String
showMatrix m = intersperse "\n" (map showPCs m)
{-
rr : List PC
rr = # 10 ∷ # 9 ∷ # 7 ∷ # 0 ∷ []
rp = rr ++ map (Tp 4) rr ++ map (Tp 8) rr
-- Belle's matrix
aa = showMatrix (matrix rp)
0 b 9 2 4 3 1 6 8 7 5 a
1 0 a 3 5 4 2 7 9 8 6 b
3 2 0 5 7 6 4 9 b a 8 1
a 9 7 0 2 1 b 4 6 5 3 8
8 7 5 a 0 b 9 2 4 3 1 6
9 8 6 b 1 0 a 3 5 4 2 7
b a 8 1 3 2 0 5 7 6 4 9
6 5 3 8 a 9 7 0 2 1 b 4
4 3 1 6 8 7 5 a 0 b 9 2
5 4 2 7 9 8 6 b 1 0 a 3
7 6 4 9 b a 8 1 3 2 0 5
2 1 b 4 6 5 3 8 a 9 7 0
rd : List PC
rd2 = reverse (map (Tp 4) rr)
rd3' = reverse (map (Tp 8) rr)
rd3 = reverse (take 2 rd3') ++ reverse (drop 2 rd3')
rd = rr ++ rd2 ++ rd3
-- Dan's matrix
ad = showMatrix (matrix rd)
0 b 9 2 6 1 3 4 5 a 8 7
1 0 a 3 7 2 4 5 6 b 9 8
3 2 0 5 9 4 6 7 8 1 b a
a 9 7 0 4 b 1 2 3 8 6 5
6 5 3 8 0 7 9 a b 4 2 1
b a 8 1 5 0 2 3 4 9 7 6
9 8 6 b 3 a 0 1 2 7 5 4
8 7 5 a 2 9 b 0 1 6 4 3
7 6 4 9 1 8 a b 0 5 3 2
2 1 b 4 8 3 5 6 7 0 a 9
4 3 1 6 a 5 7 8 9 2 0 b
5 4 2 7 b 6 8 9 a 3 1 0
-}
|
lemma continuous_transform_within: fixes f g :: "'a::metric_space \<Rightarrow> 'b::topological_space" assumes "continuous (at x within s) f" and "0 < d" and "x \<in> s" and "\<And>x'. \<lbrakk>x' \<in> s; dist x' x < d\<rbrakk> \<Longrightarrow> f x' = g x'" shows "continuous (at x within s) g" |
function time_unit(t)
units = ["hr", "min", "s", "ms", "us", "ns", "ps"]
sizes = [60.0 * 60.0, 60.0, 1.0, 1e-3, 1e-6, 1e-9, 1e-12]
for (unit, size) in zip(units, sizes)
if t / 10 > size
return unit, size
end
end
return units[end], sizes[end]
end
function fmt_time(ts)
avg = sum(ts) / length(ts)
unit, size = time_unit(avg)
return @sprintf "%d %s" (avg / size) unit
end
function bench_graph_union(nodes :: Int, edges :: Int)
us = [convert(Int, floor(nodes * rand()) + 1) for e in 1:edges]
vs = [convert(Int, floor(nodes * rand()) + 1) for e in 1:edges]
uf = UnionFinder(nodes)
t = @elapsed union!(uf, us, vs)
return uf, t
end
function bench_grid_union(nodes :: Int, edges :: Int)
width = convert(Int, ceil(sqrt(nodes)))
us = [convert(Int, floor(nodes * rand()) + 1) for e in 1:edges]
dirs = [1, nodes - 1, width, nodes - width]
vs = [(dirs[convert(Int, floor(4 * rand()) + 1)] + us[e]) % nodes + 1
for e in 1:edges]
uf = UnionFinder(nodes)
t = @elapsed union!(uf, us, vs)
return uf, t
end
function bench_avg(nodes :: Int, frac :: Float64, sweeps :: Int, f :: Function)
uts = Vector{Float64}(undef, sweeps)
cts = Vector{Float64}(undef, sweeps)
edges = convert(Int, ceil(nodes * frac))
for i in 1:sweeps
uf, uts[i] = f(nodes, edges)
cts[i] = @elapsed CompressedFinder(uf)
uts[i] = edges == 0 ? uts[i] : uts[i] / edges
cts[i] = cts[i] / nodes
end
@printf "%12s/edge %12s/node\n" fmt_time(uts) fmt_time(cts)
end
function benchmark_main()
@printf "%30s%17s %17s\n" " " "UnionFinder" "CompressedFinder"
@printf "%30s" "Sparse 1,000 node graph"
bench_avg(1000, 0.1, 1000, bench_graph_union)
@printf "%30s" "Dense 1,000 node graph"
bench_avg(1000, 0.8, 1000, bench_graph_union)
@printf "%30s" "Sparse 1,000,000 node graph"
bench_avg(1000 * 1000, 0.1, 10, bench_graph_union)
@printf "%30s" "Dense 1,000,000 node graph"
bench_avg(1000 * 1000, 0.8, 10, bench_graph_union)
println()
@printf "%30s" "Sparse 1,000 node grid"
bench_avg(1000, 0.1, 1000, bench_grid_union)
@printf "%30s" "Dense 1,000 node grid"
bench_avg(1000, 0.8, 1000, bench_grid_union)
@printf "%30s" "Sparse 1,000,000 node grid"
bench_avg(1000 * 1000, 0.1, 10, bench_grid_union)
@printf "%30s" "Dense 1,000,000 node grid"
bench_avg(1000 * 1000, 0.8, 10, bench_grid_union)
end
benchmark_main()
|
module Control.Comonad.Traced.Interface
import Data.Morphisms
import Control.Comonad
import Control.Comonad.Env.Env
import Control.Comonad.Store.Store
import Control.Comonad.Traced.Traced
import Control.Comonad.Trans
%default total
public export
interface Comonad w => ComonadTraced m w | w where
trace : m -> w a -> a
--------------------------------------------------------------------------------
-- Utilities
--------------------------------------------------------------------------------
public export %inline
traces : ComonadTraced m w => (a -> m) -> w a -> a
traces f wa = trace (f $ extract wa) wa
public export %inline
lowerTrace : (ComonadTrans t, ComonadTraced m w) => m -> t w a -> a
lowerTrace m = trace m . lower
--------------------------------------------------------------------------------
-- Implementations
--------------------------------------------------------------------------------
public export %inline
Monoid m => ComonadTraced m (Morphism m) where
trace m (Mor f) = f m
public export %inline
ComonadTraced m w => ComonadTraced m (EnvT e w) where
trace = lowerTrace
public export %inline
ComonadTraced m w => ComonadTraced m (StoreT s w) where
trace = lowerTrace
public export %inline
(Comonad w, Monoid m) => ComonadTraced m (TracedT m w) where
trace m (MkTracedT wf) = extract wf m
|
function J=polarU2DCrossGrad(uList,systemType)
%%POLARU2DCROSSGRAD Given the direction cosine value u in 2D, obtain the
% derivative of the polar azimuth angle with respect to u.
%
%INPUTS: uList A 1XnumPoints (for only u) or a 2XnumPoints (if full unit
% vectors are given) set of direction cosines in 2D.
% systemType An optional parameter specifying the axis from which the
% angles are measured. Possible values are
% 0 (The default if omitted or an empty matrix is passed) The
% azimuth angle is counterclockwise from the x axis.
% 1 The azimuth angle is measured clockwise from the y axis.
%
%OUTPUTS: J A 1XnumPoints set of derivatives of the azimuth angle with
% respect to u evaluated at the given u values.
%
%EXAMPLE:
%Here, we verify that the derivatives returned by this function are about
%equal to those returned via numeric differentiation (forward
%differencing).
% points=[0.1,0.2,-0.2,0,-0.9];%u points
% systemType=1;
% epsVal=1e-9;
%
% az=u2PolAng2D(points,systemType);
% az1=u2PolAng2D(points+epsVal,systemType);
% JNumDiff=(az1-az)/epsVal;
% J=polarU2DCrossGrad(points,systemType);
%
% max(abs(JNumDiff-J))
%One will see that the difference is on the order of 3e-8, which is a good
%agreement.
%
%June 2017 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
if(nargin<2||isempty(systemType))
systemType=0;
end
hasV=size(uList,1)>1;
N=size(uList,2);
J=zeros(1,N);
for curPoint=1:N
u=uList(1,curPoint);
if(hasV)
v=uList(2,curPoint);
else
v=sqrt(1-u^2);
end
switch(systemType)
case 0
J(curPoint)=-1/v;
case 1
J(curPoint)=1/v;
otherwise
error('Invalid system type specified.')
end
end
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
Require Import Wf_nat PeanoNat Psatz. (* lt_wf =? lia *)
Check lt_wf.
Definition dec : forall (b:bool), {b = true} + {b = false} :=
fun (b:bool) =>
match b as b' return {b' = true} + {b' = false} with
| true => left (eq_refl true)
| false => right (eq_refl false)
end.
Definition fac : nat -> nat.
Proof.
refine (Fix lt_wf (fun _ => nat)
(fun (n:nat) =>
fun (fac : forall (y:nat), y < n -> nat) =>
if dec (n =? 0)
then 1
else n * (fac (n - 1) _)
)).
clear fac.
destruct n as [|n].
- inversion e.
- lia.
Defined.
Compute fac 8.
(* works but no idea why *)
Lemma fac_S (n:nat) : fac (S n) = (S n) * fac n.
Proof.
unfold fac at 1; rewrite Fix_eq; fold fac.
now replace (S n - 1) with n by lia.
now intros x f g H; case dec; intros; rewrite ?H.
Qed.
|
[STATEMENT]
lemma word_le_not_less:
"((b::'a::len word) \<le> a) = (\<not>(a < b))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (b \<le> a) = (\<not> a < b)
[PROOF STEP]
by fastforce |
[STATEMENT]
lemma approx_minus_iff2: "x \<approx> y \<longleftrightarrow> - y + x \<approx> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (x \<approx> y) = (- y + x \<approx> 0)
[PROOF STEP]
by (simp add: approx_def add.commute) |
"""
Definit le type RlxMPCC :
min f(x)
s.t. l <= x <= u
lcon(tb) <= cnl(x) <= ucon
with
cnl(x) := c(x),G(x),H(x),Phi(G(x),H(x),t)
Some functions are compatible with slack variables:
min f(x)
s.t. 0 = G(x)-yg
0 = H(x)-yh
l <= x <= u
lcon(tb) <= cnl_slack(x) <= ucon
with
cnl_slack(x) := c(x),yg,yh,Phi(yg,yh,t)
"""
mutable struct RlxMPCC <: AbstractNLPModel
meta :: NLPModelMeta
counters :: Counters
x0 :: Vector
mod :: AbstractMPCCModel
r :: Float64
s :: Float64
t :: Float64
tb :: Float64
n :: Int64 #better use meta.nvar
ncc :: Int64
function RlxMPCC(mod :: AbstractMPCCModel,
r :: Float64,
s :: Float64,
t :: Float64,
tb :: Float64;
meta :: NLPModelMeta = mod.mp.meta,
x :: Vector = mod.meta.x0)
#meta is given without slacks
ncc = mod.meta.ncc
n = mod.meta.nvar
ncon = mod.meta.ncon + 3*ncc
new_lcon = vcat(mod.meta.lcon,
mod.meta.lccG+tb*ones(ncc),
mod.meta.lccH+tb*ones(ncc),
-Inf*ones(ncc))
new_ucon = vcat(mod.meta.ucon,
Inf*ones(2*ncc),
zeros(ncc))
meta = NLPModelMeta(n, x0 = x, lvar = mod.meta.lvar, uvar = mod.meta.uvar,
ncon = ncon,
lcon = new_lcon, ucon = new_ucon)
if tb > 0.0 throw(error("Domain error: tb must be non-positive")) end
if minimum([r,s,t])<0.0
throw(error("Domain error: (r,s,t) must be non-negative"))
end
return new(meta,Counters(),x,mod,r,s,t,tb,n,ncc)
end
end
#return the number of "classical" non-linear constraints
function get_ncon(rlxmpcc :: RlxMPCC) return rlxmpcc.meta.ncon - 3*rlxmpcc.ncc end
function update_rlx!(rlx, r, s, t, tb)
rlx.r = r
rlx.s = s
rlx.t = t
rlx.tb = tb
return rlx
end
############################################################################
#
# Classical NLP functions on RlxMPCC
# obj, grad, grad!, hess, cons, cons!
#
############################################################################
function obj(rlxmpcc :: RlxMPCC, x :: Vector)
return obj(rlxmpcc.mod, x)
end
function grad(rlxmpcc :: RlxMPCC, x :: Vector)
return grad(rlxmpcc.mod, x)
end
function grad!(rlxmpcc :: RlxMPCC, x :: Vector, gx :: Vector)
return grad!(rlxmpcc.mod, x, gx)
end
function objgrad(rlxmpcc :: RlxMPCC, x :: Vector)
return obj(rlxmpcc, x), grad(rlxmpcc, x)
end
function objgrad!(rlxmpcc :: RlxMPCC, x :: Vector, gx :: Vector)
return obj(rlxmpcc, x), grad!(rlxmpcc, x, gx)
end
function hess(rlxmpcc :: RlxMPCC, x :: Vector; obj_weight = 1.0, y = zeros)
if y != zeros
return hess(rlxmpcc, x, obj_weight, y)
else
return hess(rlxmpcc.mod, x)
end
end
function hess(rlxmpcc :: RlxMPCC, x :: Vector, obj_weight :: Float64, y :: Vector)
ncc = rlxmpcc.ncc
nl = get_ncon(rlxmpcc)
y_nl, y_G, y_H, y_Phi = y[1:nl], y[nl+1:nl+ncc], y[nl+ncc+1:nl+2*ncc], y[nl+2*ncc+1:nl+3*ncc]
hess_mp = hess(rlxmpcc.mod, x, obj_weight = obj_weight, y = y_nl)
if ncc > 0
G, H = consG(rlxmpcc.mod, x), consH(rlxmpcc.mod, x)
dgg, dgh, dhh = ddphi(G, H, rlxmpcc.r, rlxmpcc.s, rlxmpcc.t)
dph = dphi(G, H, rlxmpcc.r, rlxmpcc.s, rlxmpcc.t)
y_G += dph[1:ncc]
hess_G = hessG(rlxmpcc.mod, x, obj_weight = obj_weight, y = y_G)
y_H += dph[ncc+1:2*ncc]
hess_H = hessH(rlxmpcc.mod, x, obj_weight = obj_weight, y = y_H)
jG, jH = jacG(rlxmpcc.mod, x), jacH(rlxmpcc.mod, x)
hess_P = jG' * (diagm(0 => dgg) * jG) + jH' * (diagm(0 => dgh) * jH) + jG' * (diagm(0 => dgh) * jH) + jH' * (diagm(0 => dgh) * jG)
else
hess_G, hess_H, hess_P = Float64[], Float64[], Float64[]
end
return tril(hess_mp + hess_G + hess_H + hess_P)
end
function hprod(rlxmpcc :: RlxMPCC, x :: AbstractVector, v :: AbstractVector;
obj_weight = 1.0, y :: AbstractVector = zeros(nlp.meta.ncon))
NLPModels.increment!(nlp, :neval_hprod)
H=hess(rlxmpcc,x,obj_weight=obj_weight,y=y)
return (H+H'-diag(H))*v
end
function hprod!(rlxmpcc :: RlxMPCC, x :: AbstractVector, v :: AbstractVector,
Hv :: AbstractVector;
obj_weight = 1.0, y :: AbstractVector = zeros(nlp.meta.ncon))
#NLPModels.increment!(nlp, :neval_hprod)
Hv = hprod(rlxmpcc, x, v; obj_weight = obj_weight, y=y)
return Hv
end
function hess_nlslack(rlxmpcc :: RlxMPCC, x :: Vector, v :: Vector, objw :: Float64)
n, ncc, ncon = rlxmpcc.meta.nvar, rlxmpcc.ncc, rlxmpcc.mod.meta.ncon
xn = x[1:n]
if ncc > 0
test = (hessnl(rlxmpcc.mod,xn,y=v[2*ncc+1:2*ncc+2*n+2*ncon], obj_weight = objw)
+ hessG(rlxmpcc.mod,xn,y=v[1:ncc], obj_weight = 0.0)
+ hessH(rlxmpcc.mod,xn,y=v[1+ncc:2*ncc], obj_weight = 0.0))
else
test = hessnl(rlxmpcc.mod,xn,y=v[2*ncc+1:2*ncc+2*n+2*ncon], obj_weight = objw)
end
return test
end
function hess_nlwthslack(rlxmpcc :: RlxMPCC, x :: Vector, v :: Vector, objw :: Float64)
n, ncc, ncon = rlxmpcc.meta.nvar, rlxmpcc.ncc, rlxmpcc.mod.meta.ncon
xn = x[1:n]
if ncc > 0
test = (hessnl(rlxmpcc.mod,xn,y=v[2*ncc+1:2*ncc+2*ncon], obj_weight = objw)
+ hessG(rlxmpcc.mod,xn,y=v[1:ncc], obj_weight = 0.0)
+ hessH(rlxmpcc.mod,xn,y=v[1+ncc:2*ncc], obj_weight = 0.0))
else
test = hessnl(rlxmpcc.mod,xn,y=v[2*ncc+1:2*ncc+2*ncon], obj_weight = objw)
end
return test
end
function hess_relax(rlxmpcc :: RlxMPCC, yg :: Vector, yh :: Vector, y :: Vector)
ncc = rlxmpcc.ncc
if length(y) == 3*ncc
y_Phi = y[2*ncc+1:3*ncc]
elseif length(y) == ncc
y_Phi = y
else
throw(error("Dimension error: y"))
end
if ncc > 0
dgg, dgh, dhh = ddphi(yg, yh, rlxmpcc.r, rlxmpcc.s, rlxmpcc.t)
hess_P = vcat(hcat(diagm(0 => y_Phi.*dgg),diagm(0 => y_Phi.*dgh)),
hcat(diagm(0 => y_Phi.*dgh),diagm(0 => y_Phi.*dhh)))
else
hess_P = zeros(0,0)
end
return hess_P #matrix of size 2*ncc x 2*ncc
end
function hess_coord(rlxmpcc :: RlxMPCC,
x :: Vector;
obj_weight :: Float64 = 1.0,
y :: Vector = zeros(rlxmpcc.meta.ncon))
# return findnz(hess(rlxmpcc, x, obj_weight = obj_weight, y = y))
A = hess(rlxmpcc, x, obj_weight = obj_weight, y = y)
I = findall(!iszero, A)
return (getindex.(I, 1), getindex.(I, 2), A[I])
end
#########################################################
#
# Return the vector of the constraints
# |yg - G(x)|, |yh-H(x)|, c(x), yG, yH, Phi(yg,yh,t)
#
#########################################################
function cons(rlxmpcc :: RlxMPCC, x :: Vector)
n, ncc = rlxmpcc.meta.nvar, rlxmpcc.ncc
if length(x) == n
c = cons_nl(rlxmpcc.mod, x) #c(x)
G = consG(rlxmpcc.mod, x)
H = consH(rlxmpcc.mod, x)
sc = vcat(G,H)
cc = phi(G, H, rlxmpcc.r, rlxmpcc.s, rlxmpcc.t)
elseif length(x) == n + 2*ncc
xn, yg, yh = x[1:n], x[n+1:n+ncc], x[n+ncc+1:n+2*ncc]
G = consG(rlxmpcc.mod, xn)
H = consH(rlxmpcc.mod, xn)
c = vcat(G-yg,H-yh,cons_nl(rlxmpcc.mod, xn))
sc = vcat(yg, yh)
cc = phi(yg,yh,rlxmpcc.r,rlxmpcc.s,rlxmpcc.t)
else
throw(error("Domain error"))
end
return vcat(c,sc,cc)
end
function cons_cc(rlxmpcc :: RlxMPCC, x :: Vector)
n, ncc = rlxmpcc.meta.nvar, rlxmpcc.ncc
if length(x) == n
G = consG(rlxmpcc.mod, x)
H = consH(rlxmpcc.mod, x)
cc = phi(G, H, rlxmpcc.r, rlxmpcc.s, rlxmpcc.t)
elseif length(x) == n + 2*ncc
xn, yg, yh = x[1:n], x[n+1:n+ncc], x[n+ncc+1:n+2*ncc]
cc = phi(yg,yh,rlxmpcc.r,rlxmpcc.s,rlxmpcc.t)
else
throw(error("Domain error"))
end
return cc
end
#########################################################
#
# Return the violation of the constraints
# |yg - G(x)|, |yh-H(x)|
# lx <= x <= ux
# lc <= c(x) <= uc
# 0 <= yg, 0 <= yh
# Phi(yg,yh,t) <= 0
#
#########################################################
function viol(rlxmpcc :: RlxMPCC, x :: Vector)
return viol(rlxmpcc, x, cons(rlxmpcc, x))
end
#same function but avoid:
# c = cons(rlxmpcc, x)
function viol(rlxmpcc :: RlxMPCC, x :: Vector, c :: Vector)
n, ncc = rlxmpcc.meta.nvar, rlxmpcc.ncc
if length(x) == n
viol_slack = Float64[]
viol_x = max.(rlxmpcc.meta.lvar-x, 0)+max.(x-rlxmpcc.meta.uvar, 0)
viol_c = max.(rlxmpcc.meta.lcon-c, 0)+max.(c-rlxmpcc.meta.ucon, 0)
elseif length(x) == n + 2*ncc
c_slack = c[1:2*ncc]
viol_slack = abs.(c_slack)
xn = x[1:n]
viol_x = max.(rlxmpcc.meta.lvar-xn, 0)+max.(xn-rlxmpcc.meta.uvar, 0)
c_nl = c[2*ncc+1:length(c)]
viol_c = max.(rlxmpcc.meta.lcon-c_nl, 0)+max.(c_nl-rlxmpcc.meta.ucon, 0)
else
throw(error("Domain error"))
end
return vcat(viol_slack,viol_x, viol_c)
end
#########################################################
#
# Return the violation of the "classical" constraints
# |yg - G(x)|, |yh-H(x)|, l <= x <= u, lc <= c(x) <= uc
#
#########################################################
function viol_cons_nl(rlxmpcc :: RlxMPCC, x :: Vector)
return viol_cons_nl(rlxmpcc, x, viol_mp(rlxmpcc.mod, x))
end
function viol_cons_nl(rlxmpcc :: RlxMPCC, x :: Vector, c :: Vector)
n, ncc = rlxmpcc.meta.nvar, rlxmpcc.ncc
if length(x) == n
#c = viol_mp(rlxmpcc.mod, x)
elseif length(x) == n+2*ncc
xn, yg, yh = x[1:n], x[n+1:n+ncc], x[n+ncc+1:n+2*ncc]
#c = viol_mp(rlxmpcc.mod, xn)
G = consG(rlxmpcc.mod, xn)
H = consH(rlxmpcc.mod, xn)
c = vcat(G-yg,H-yh,c)
else
throw(error("error wrong dimension"))
end
return c
end
#########################################################
#
# Return the violation of the "classical" constraints
# |yg - G(x)|, |yh-H(x)|, -c(x) <= -lc, c(x) <= uc
#
# use for the penalization
#
#########################################################
function viol_cons_nlwth(rlxmpcc :: RlxMPCC, x :: Vector)
n, ncc = rlxmpcc.meta.nvar, rlxmpcc.ncc
if length(x) == n
c = viol_mp(rlxmpcc.mod, x)
elseif length(x) == n+2*ncc
xn, yg, yh = x[1:n], x[n+1:n+ncc], x[n+ncc+1:n+2*ncc]
c = viol_nl(rlxmpcc.mod, xn)
G = consG(rlxmpcc.mod, xn)
H = consH(rlxmpcc.mod, xn)
c = vcat(G-yg,H-yh,c)
else
throw(error("error wrong dimension"))
end
return c
end
function viol_cons_nlwth(rlxmpcc :: RlxMPCC, x :: Vector, c :: Vector)
n, ncc = rlxmpcc.meta.nvar, rlxmpcc.ncc
if length(x) == n
#c = viol_mp(rlxmpcc.mod, x)
elseif length(x) == n+2*ncc
xn, yg, yh = x[1:n], x[n+1:n+ncc], x[n+ncc+1:n+2*ncc]
#c = viol_nl(rlxmpcc.mod, xn)
G = consG(rlxmpcc.mod, xn)
H = consH(rlxmpcc.mod, xn)
c = vcat(G-yg,H-yh,c)
else
throw(error("error wrong dimension"))
end
return c
end
function jacl(rlxmpcc :: RlxMPCC, x :: Vector)
A, Il,Iu,Ig,Ih,IG,IH,IPHI = jac_actif(rlxmpcc, x, 0.0)
return A
end
function jac(rlxmpcc :: RlxMPCC, x :: Vector)
A = jac_actif2(rlxmpcc, x, 0.0)
return A
end
function jprod(rlxmpcc :: RlxMPCC, x :: AbstractVector, v :: AbstractVector)
#NLPModels.increment!(nlp, :neval_jprod)
return jac(rlxmpcc, x)*v
end
function jprod!(rlxmpcc :: RlxMPCC, x :: AbstractVector, v :: AbstractVector, Jv :: AbstractVector)
#NLPModels.increment!(nlp, :neval_jprod)
Jv = jprod(rlxmpcc,x,v)
return Jv
end
function jtprod(rlxmpcc :: RlxMPCC, x :: AbstractVector, v :: AbstractVector)
#NLPModels.increment!(nlp, :neval_jtprod)
return jac(rlxmpcc, x)'*v
end
function jtprod!(rlxmpcc :: RlxMPCC, x :: AbstractVector, v :: AbstractVector, Jtv :: AbstractVector)
#NLPModels.increment!(nlp, :neval_jtprod)
Jtv = jtprod(rlxmpcc, x, v)
return Jtv
end
function jac_coord(rlxmpcc :: RlxMPCC, x :: Vector)
return findnz(jac(rlxmpcc, x))
end
function jac_nl(rlxmpcc :: RlxMPCC, x :: Vector)
n = rlxmpcc.meta.nvar
ncon = get_ncon(rlxmpcc)
Jl, Ju = eye(n), eye(n)
if ncon > 0
Jc = jac_nl(rlxmpcc.mod, x)
A = vcat(-Jl,Ju,-Jc,Jc)
else
A = vcat(-Jl,Ju)
end
return A
end
function jac_slack(rlxmpcc :: RlxMPCC, x :: Vector)
JG = jacG(rlxmpcc.mod,x)
JH = jacH(rlxmpcc.mod,x)
A = vcat(JG,JH)
return A
end
function jac_nlslack(rlxmpcc :: RlxMPCC, x :: Vector)
if rlxmpcc.ncc > 0
rslt = vcat(jac_slack(rlxmpcc, x), jac_nl(rlxmpcc, x))
else
rslt = jac_nl(rlxmpcc, x)
end
return rslt
end
function jac_nlwthslack(rlxmpcc :: RlxMPCC, x :: Vector)
if rlxmpcc.ncc > 0
Jc = jac_nl(rlxmpcc.mod, x)
rslt = vcat(jac_slack(rlxmpcc, x), -Jc,Jc)
else
Jc = jac_nl(rlxmpcc.mod, x)
rslt = vcat(-Jc,Jc)
end
return rslt # 2ncc+2ncon x n
end
function jtprod_nlslack(rlxmpcc :: RlxMPCC, x :: Vector, v :: Vector)
#v of size 2*ncc + 2*n + 2*ncon
n, ncc, ncon = rlxmpcc.meta.nvar, rlxmpcc.ncc, rlxmpcc.mod.meta.ncon
xn = x[1:n]
vbl, vbu = v[2*ncc+1:2*ncc+n], v[2*ncc+n+1:2*ncc+2*n]
vlc, vuc = v[2*ncc+2*n+1:2*ncc+2*n+ncon], v[2*ncc+2*n+ncon+1:length(v)]
rslt = - vbl + vbu
if ncon > 0
rslt += - jtprodnl(rlxmpcc.mod, xn, vlc) + jtprodnl(rlxmpcc.mod, xn, vuc)
end
if ncc > 0
rslt += jtprodG(rlxmpcc.mod,xn,v[1:ncc]) + jtprodH(rlxmpcc.mod,xn,v[ncc+1:2*ncc])
end
return rslt
end
function jtprod_nlwthslack(rlxmpcc :: RlxMPCC, x :: Vector, v :: Vector)
#v of size 2*ncc + 2*ncon
n, ncc, ncon = rlxmpcc.meta.nvar, rlxmpcc.ncc, rlxmpcc.mod.meta.ncon
xn = x[1:n]
vlc, vuc = v[2*ncc+1:2*ncc+ncon], v[2*ncc+ncon+1:length(v)]
rslt = zeros(n)
if ncon > 0
rslt += - jtprodnl(rlxmpcc.mod, xn, vlc) .+ jtprodnl(rlxmpcc.mod, xn, vuc)
end
if ncc > 0
rslt += jtprodG(rlxmpcc.mod,xn,v[1:ncc]) + jtprodH(rlxmpcc.mod,xn,v[ncc+1:2*ncc])
end
return rslt
end
function jac_rlx(rlxmpcc :: RlxMPCC, x :: Vector)
JPHI = dphi(G,H,rlxmpcc.r,rlxmpcc.s,rlxmpcc.t)
JPHIG, JPHIH = JPHI[1:rlxmpcc.ncc], JPHI[rlxmpcc.ncc+1:2*rlxmpcc.ncc]
JG = jacG(rlxmpcc.mod,x)
JH = jacH(rlxmpcc.mod,x)
A = vcat(-JG, -JH, diagm(0 => JPHIG)*JG + diagm(0 => JPHIH)*JH)
return A
end
function jac_cc(rlxmpcc :: RlxMPCC, x :: Vector)
n, ncc, ncon = rlxmpcc.meta.nvar, rlxmpcc.ncc, rlxmpcc.mod.meta.ncon
if length(x) != n+2*ncc
throw(error("error wrong dimension"))
end
xn, yg, yh = x[1:n], x[n+1:n+ncc], x[n+ncc+1:n+2*ncc]
JPHI = dphi(yg, yh, rlxmpcc.r, rlxmpcc.s, rlxmpcc.t) #vector of size 2ncc
JPHIG, JPHIH = JPHI[1:rlxmpcc.ncc], JPHI[rlxmpcc.ncc+1:2*rlxmpcc.ncc]
A = hcat(zeros(ncc,n), diagm(0 => JPHIG), diagm(0 => JPHIH))
return A #array of size n+2ncc x ncon
end
###########################################################################
#
# jac_actif(rlxmpcc :: RlxMPCC, x :: Vector, prec :: Float64)
# return A, Il,Iu,Ig,Ih,IG,IH,IPHI
#
###########################################################################
include("rlxmpcc_jacactif.jl")
|
# Copyright (c) 2018-2021, Carnegie Mellon University
# See LICENSE for details
_symGreater1 := o -> IsSymbolic(o) or o > 1;
Class(RulesRankedIndf, RuleSet);
RewriteRules(RulesRankedIndf, rec(
fCompose_flatten := ARule(fCompose, [@(1,fCompose)], e->@(1).val.children()),
# Degenerate HH rules (HH -> fAdd)
#HH_to_fAdd := Rule([HH, @, @, @(1).cond(e->e[1]=1 and Sum(e)=1)], e -> fAdd(e.N, e.n, 0)),
fAddElim := ARule(fCompose,
[@(1), @(2, fAdd, e -> e.params[3] = 0 and range(e) = range(@(1).val) and domain(e)=domain(@(1).val))],
e -> [ @(1).val ]),
HHZ_fAdd := ARule(fCompose, [@(1, HHZ), @(2, [fAdd])], e -> When(@(2).val.params[3]=0, [ @(1).val ],
[ HHZ(@(1).val.params[1], @(2).val.params[2], @(1).val.params[3] + @(2).val.params[3] * @(1).val.params[4][1], @(1).val.params[4])])),
HH_fAdd := ARule(fCompose, [@(1, HH), @(2, fAdd)], e ->
[ HH(@(1).val.params[1], @(2).val.params[2], @(1).val.params[3] + @(2).val.params[3] * @(1).val.params[4][1],
@(1).val.params[4]) ]),
fAdd_HH := ARule(fCompose, [@(2, fAdd), @(1, HH)], e ->
[ HH(range(@(2).val), domain(@(1).val), @(1).val.params[3] + @(2).val.params[3], @(1).val.params[4]) ]),
# Maude:
# var b1 b2 m n MM NN : NatExp . var d : Diagf . var i : Indf .
# var w ww : NatExpList . var v vv : NeNatExpList . vars gg gf : Genf .
# eq H ( b1, n w) o H ( b2, m ww) = H ( b1 + n * b2, (n * m) (w + lmul(n, ww))) .
# eq HZ(NN, b1, n w) o H ( b2, m ww) = HZ(NN, b1 + n * b2, (n * m) (w + lmul(n, ww))) .
# eq HZ(NN, b1, n w) o HZ(MM, b2, m ww) = HZ(NN, b1 + n * b2, (n * m) (w + lmul(n, ww))) .
#
HHoHH := ARule(fCompose, [@(1, HH), @(2, HH)], e -> [ let(
s1 := @(1).val.params[4], s2 := @(2).val.params[4],
b1 := @(1).val.params[3], b2 := @(2).val.params[3],
n := s1[1], m := s2[1], w := Drop(s1, 1), ww := List(Drop(s2, 1), x -> n*x),
HH(@(1).val.params[1], @(2).val.params[2], b1 + n*b2, Concatenation([n*m], ListAddZP(w, ww)))) ]),
# This rule is invalid when HH stride is odd, so in general it should not be used
# NOTE: this rule is needed for DCTs, it seems that in that case stride is always even, so its ok to use it
# how to guard this for libraries??
BHHoHH := ARule(fCompose, [@(1, BHH), @(2, HH, e->_symGreater1(e.params[4][1]))], e -> [ let(
s1 := @(1).val.params[4], s2 := @(2).val.params[4],
b1 := @(1).val.params[3], b2 := @(2).val.params[3],
n := s1[1], m := s2[1], w := Drop(s1, 1), ww := List(Drop(s2, 1), x -> n*x),
BHH(@(1).val.params[1], @(2).val.params[2], b1 + n*b2, Concatenation([n*m], ListAddZP(w, ww)),
@(1).val.params[5])) ]),
#BHH_pull_out_base := Rule([BHH, @, @, @.cond(e->e<>0), @, @], e ->
# fCompose(HH(e.params[1], e.params[1], e.params[3], [1]), BHH(e.params[1], e.params[2], 0, e.params[4], e.params[5]-2*e.params[3]))),
HH_1_ftensor_pull_out := Rule([fTensor, [@(1,fCompose), [@(2,HH), @, @, @, [ListClass, _1]], ...], @(3, fId)], e->
let(h := @(2).val, id := @(3).val.params[1],
fCompose(HH(id*h.params[1], id*h.params[2], id*h.params[3], [1]),
fTensor(fCompose(Drop(@(1).val.rChildren(), 1)), @(3).val)))),
# HHxI2_base_pull_out is for closure reduction in autolib RDFT.
# Commented out to get smaller closure in DFT and it conflicts with HHxfId_base_pull_in rule below
#HHxI2_base_pull_out := Rule([fTensor, [@(2,HH), @, @, @.cond(e->e<>0), @], @(3, fId)], e->
# let(hp := @(2).val.params, id := @(3).val.params[1],
# fCompose(HH(id*hp[1], id*hp[1], id*hp[3], [1]),
# fTensor(HH(hp[1], hp[2], 0, hp[4]), @(3).val)))),
# Pull in HH into fTensor.
#
HHxfId_base_pull_in := ARule(fCompose, [[HH, @(1), @(2), @(3), [ListClass, _1]],
[fTensor, [@(4,HH), @, @, @, @], @(5, fId, x -> ForAll([@(1).val, @(2).val, @(3).val], e -> _divides(x.params[1], e)))]],
e -> let( t := @(5).val.params[1], hh := @(4).val.params,
[fTensor( HH( div(@(1).val, t), hh[2], hh[3] + div(@(3).val, t), hh[4] ), @(5).val )])),
HHoBHH := ARule(fCompose, [@(1, HH, e->e.params[3]=0 and _symGreater1(e.params[4][1])), @(2, BHH)], e -> [ let(
s1 := @(1).val.params[4], s2 := @(2).val.params[4],
b1 := @(1).val.params[3], b2 := @(2).val.params[3],
n := s1[1], m := s2[1], w := Drop(s1, 1), ww := List(Drop(s2, 1), x -> n*x),
BHH(@(1).val.params[1], @(2).val.params[2], b1 + n*b2, Concatenation([n*m], ListAddZP(w, ww)),
n*@(2).val.params[5])) ]),
HHZoHH := ARule(fCompose, [@(1, HHZ), @(2, HH)], e -> [ let(
s1 := @(1).val.params[4],
s2 := @(2).val.params[4],
b := @(1).val.params[3], bb := @(2).val.params[3],
n := s1[1], m := s2[1], w := Drop(s1, 1), ww := List(Drop(s2, 1), x->n*x),
HHZ(@(1).val.params[1], @(2).val.params[2], b + n*bb, Concatenation([n*m], ListAddZP(w, ww)))) ]),
HHZoHHZ := ARule(fCompose, [@(1, HHZ), @(2, HHZ)], e -> [ let(
s1 := @(1).val.params[4],
s2 := @(2).val.params[4],
b := @(1).val.params[3], bb := @(2).val.params[3],
n := s1[1], m := s2[1], w := Drop(s1, 1), ww := List(Drop(s2, 1), x->n*x),
HHZ(@(1).val.params[1], @(2).val.params[2], b + n*bb, Concatenation([n*m], ListAddZP(w, ww)))) ]),
KHoKH := ARule(fCompose, [@(1, KH),
[@(2, KH), @, @, _0, [ListClass, @, _1], [ListClass, _0, _0]]],
e -> [ let(
s1 := @(1).val.params[4],
s2 := @(2).val.params[4],
corr := @(1).val.params[5],
b := @(1).val.params[3], bb := @(2).val.params[3],
n := s1[1], m := s2[1], w := Drop(s1, 1), ww := List(Drop(s2, 1), x->n*x),
KH(@(1).val.params[1], @(2).val.params[2], b,
Concatenation([n*m], ListAddZP(w, ww)), corr)) ]),
# HHxIoHH assumes @(3) cannot be pulled into fTensor
HHxIoHH := ARule(fCompose, [[fTensor, [@(1, HH), @, @, @, [ListClass, _1]], @(2, fId)], @(3, HH)],
e -> let( p := @(1).val.params, n := @(2).val.params[1],
[ HH(n*p[1], n*p[2], n*p[3], [1]), @(3).val ] )),
# eq tr(n, m) o H(0, 1 n) = H(0, m 1) .
# H.domain must be less or equal to n
TroHH := ARule(fCompose,
[@(1, Tr), @(2, HH, e -> let(n := @(1).val.params[1], Cond(AnySyms(n, e.params[2]), n=e.params[2], n>=e.params[2]) and e.params[4][1]=1 and ForAll([e.params[3]]::Drop(e.params[4],1), x -> _divides(n, x))))],
e -> [ let(
n := @(1).val.params[1], m := @(1).val.params[2], b := @(2).val.params[3], str := Drop(@(2).val.params[4], 1),
HH(@(2).val.params[1], @(2).val.params[2], b/n, [m] :: List(str, x -> x/n))) ]),
Refl0_u_HH0 := ARule(fCompose, [@(1, Refl0_u), @(2, HH, e -> e.params[3]=0 and e.params[4]=[1])], e -> [ let(
k := @(1).val.params[1],
HH(@(1).val.range(), @(2).val.domain(), 0, [k])) ]),
Refl0_u_HHrest := ARule(fCompose, [@(1, Refl0_u), @(2, HH, e -> e.params[3]=e.params[2] and e.params[4]=[1, @(1).val.params[2]])], e -> [ let(
k := @(1).val.params[1],
BHH(@(1).val.range(), @(2).val.domain(), 1, [2*k,1], 2*@(1).val.range())) ]),
Refl0_odd_HH0 := ARule(fCompose, [@(1, Refl0_odd), @(2, HH, e -> e.params[3]=0 and e.params[4]=[1])], e -> [ let(
k := @(1).val.params[1],
HH(@(1).val.range(), @(2).val.domain(), 0, [2*k+1])) ]),
Refl0_odd_HHrest := ARule(fCompose, [@(1, Refl0_odd), @(2, HH, e -> e.params[4]=[1, @(1).val.params[2]])], e -> [ let(
k := @(1).val.params[1],
BHH(@(1).val.range(), @(2).val.domain(), 1, [2*k+1,1], 2*@(1).val.range())) ]),
Refl1_HH := ARule(fCompose, [@(1, Refl1), @(2, HH, e -> e.params[3]=0 and e.params[4]=[1, @(1).val.params[2]])], e -> [ let(
k := @(1).val.params[1],
BHH(@(1).val.range(), @(2).val.domain(), 0, [2*k,1], 2*@(1).val.range() - 1)) ]),
MMoHH := ARule(fCompose, [@(1, MM), @(2, HH, e -> e.params[3]=0 and e.params[4]=[1, @(1).val.params[1]])], e -> [ let(
m := @(1).val.params[2],
KH(@(2).val.params[1], @(2).val.params[2], 0, [m, 1], [0,0])) ]),
fTensor_AxI_HH_stride_1 := ARule(fCompose, [[@(1, fTensor), ..., [fId, @(2)]], # NOTE: assumes d | base
[@(3,HH), @,
@.cond(e->_dividesUnsafe(@(2).val,e)),
@.cond(e->_dividesUnsafe(@(2).val,e)),
[ListClass, _1, ...]]], e ->
let(p := @(3).val.params, d := @(2).val, vs := Drop(p[4],1), base := p[3],
[ fTensor( fCompose( fTensor(DropLast(@(1).val.children(), 1)), HH(div(p[1],d), div(p[2],d), div(base,d),
Concatenation([1], List(vs, v -> div(v,d))))), fId(d))])),
fTensor_AxI_HH_stride_n := ARule(fCompose, # NOTE: check assumptions
[[@(1,fTensor), ..., [fId, @(2)]],
[@(3,HH), @, @, _0, [ListClass, @.cond(e->e=@(2).val), _0, _1]]],
e -> let(
d := @(2).val,
p := @(3).val.params,
ch := @(1).val.children(),
A := fTensor(DropLast(ch, 1)),
[ fCompose(HH(range(A)*d, range(A), 0, [d, 0, 1]), A) ])
),
fTensor_HH_domain1_fId := ARule(fTensor, [ [@(1, HH), @, _1, @, @], @(2, fId) ],
e -> let(
p := @(1).val.params,
n := @(2).val.params[1],
[ HH(n*p[1], n, n*p[3], [1] :: List(Drop(p[4], 1), x->n*x)) ])
),
HH_domain1_s1_to_fId1 := Rule( [HH, _1, _1, _0, [ListClass, _1]], e -> fId(1) ),
fTensor_fId_X_HH := ARule(fCompose, [[@(4,fTensor), @(1, fId), @(2)],
[@(3,HH), @, @.cond(e->e=@(2).val.domain()), _0, [ListClass, _1, @.cond(e->e=@(2).val.domain())]]],
e ->let(p := @(3).val.params, vs := p[4][2],
[ HH(@(4).val.range(), @(2).val.domain(), 0, [1, vs]), @(2).val ])),
# This rule is a hack because it changing advdims of SPL. Range of the outer loop is also
# assumed to be less than @(5).val (though if it's not then there is overlap in the
# inner loop.
f2DTensor_HH_HH_HH :=ARule( fCompose, [
[f2DTensor, [HH, @(1), @(2), @(3), [ListClass, _1]],
[HH, @(4), @(5), @(6), [ListClass, _1]]],
[HH, @(7).cond(x->x=@(2).val*@(5).val or x=@(5).val*@(2).val),
@(8).cond(x->x=@(2).val),
_0,
[ListClass, @(9).cond(x->x=@(5).val), _1]]],
e -> [HH( @(1).val*@(4).val, @(2).val, @(6).val+@(4).val*@(3).val, [@(4).val, 1] )]),
# simplified merge_tensors rule
fTensor_fTensor := ARule(fCompose,
[[fTensor, @(1), @(2)], [fTensor, @(3), @(4).cond(
e->e.range()=@(2).val.domain() or @(3).val.range() = @(1).val.domain())]],
e -> [ fTensor(fCompose(@(1).val, @(3).val), fCompose(@(2).val, @(4).val)) ]),
# eq crt(n, m) o H(0, 1 m) = HZ(m * n, 0, m n) .
# eq crt(n, m) o H(0, m 1) = HZ(m * n, 0, n m) .
CRToHH := ARule(fCompose, [ [CRT, @(1), @(2), 1, 1],
@(3,HH,e -> e.params[3]=0 and e.params[4] in [[1,@(2).val], [@(2).val, 1]]) ], e -> [ let(
n := @(1).val,
m := @(2).val,
s1 := @(3).val.params[4][1],
HHZ(@(3).val.params[1], @(3).val.params[2], 0, When(s1=1, [m, n], [n,m]))) ]),
## Scat * ISumn, ISumn * Gath
Scat_ISumn := ARule(Compose, [ @(1, Scat), @(2, ISumn) ],
e -> [ ISumn(@(2).val.domain,
Scat(@(1).val.func.upRank()) * @(2).val.child(1)) ]),
ISumn_Gath := ARule(Compose, [ @(1, ISumn), @(2, Gath) ],
e -> [ ISumn(@(1).val.domain,
@(1).val.child(1) * Gath(@(2).val.func.upRank())) ]),
## Prm * ISumn, ISumn * Prm
Prm_ISumn := ARule(Compose, [ @(1, Prm), @(2, ISumn) ],
e -> [ ISumn(@(2).val.domain,
Prm(@(1).val.func.upRank()) * @(2).val.child(1)) ]),
ISumn_Prm := ARule(Compose, [ @(1, ISumn), @(2, Prm) ],
e -> [ ISumn(@(1).val.domain,
@(1).val.child(1) * Prm(@(2).val.func.upRank())) ]),
## Diag * ISumn, ISumn * Diag
Diag_ISumn := ARule(Compose, [ @(1, Diag), @(2, ISumn) ],
e -> [ ISumn(@(2).val.domain,
Diag(@(1).val.element.upRank()) * @(2).val.child(1)) ]),
ISumn_Diag := ARule(Compose, [ @(1, ISumn), @(2, Diag) ],
e -> [ ISumn(@(1).val.domain,
@(1).val.child(1) * Diag(@(2).val.element.upRank())) ]),
##UU
#Implementing non-zero base would probably require to have a 2D base
#Implicit requirements 2 has base=0 and both leading dims are the same.
UU_UU := ARule(fCompose, [@(1, UU), @(2, UU)],
e-> let(b1X := @(1).val.params[3], b2X := @(2).val.params[3], b1Y := @(1).val.params[4], b2Y := @(2).val.params[4], s1 := @(1).val.params[5], s2 := @(2).val.params[5],
c2 := @(2).val.params[6], ld := @(1).val.params[7], ss1 := @(1).val.params[8],
ss2 := @(2).val.params[8], ss:=ListAddLP(When(Length(ss2)>0,Transposed([List(Transposed(ss2)[1],x->x*s1)
,Transposed(ss2)[2]]),[]), ss1),
[UU(@(1).val.params[1], @(2).val.params[2], b1X + s1*b2X, b1Y+b2Y, s1*s2, c2, ld, ss)])),
# UU_H := ARule(fCompose, [@(1, UU), @(2, H)],
# e-> let(b1X := @(1).val.params[3], b2X := @(2).val.params[3], b1Y := @(1).val.params[4], s1 := @(1).val.params[5], s2 := @(2).val.params[4],
# ld := @(1).val.params[7], ss1 := @(1).val.params[8], c1 := @(1).val.params[6],
# [UU(@(1).val.params[1], @(2).val.params[2], b1X + s1*b2X, b1Y, s1*s2, c1, ld, ss1)])),
));
|
/-
Copyright (c) 2021 Adam Topaz. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Adam Topaz
-/
import category_theory.sites.sheaf
/-!
# The plus construction for presheaves.
This file contains the construction of `P⁺`, for a presheaf `P : Cᵒᵖ ⥤ D`
where `C` is endowed with a grothendieck topology `J`.
See https://stacks.math.columbia.edu/tag/00W1 for details.
-/
namespace category_theory.grothendieck_topology
open category_theory
open category_theory.limits
open opposite
universes w v u
variables {C : Type u} [category.{v} C] (J : grothendieck_topology C)
variables {D : Type w} [category.{max v u} D]
noncomputable theory
variables [∀ (P : Cᵒᵖ ⥤ D) (X : C) (S : J.cover X), has_multiequalizer (S.index P)]
variables (P : Cᵒᵖ ⥤ D)
/-- The diagram whose colimit defines the values of `plus`. -/
@[simps]
def diagram (X : C) : (J.cover X)ᵒᵖ ⥤ D :=
{ obj := λ S, multiequalizer (S.unop.index P),
map := λ S T f,
multiequalizer.lift _ _ (λ I, multiequalizer.ι (S.unop.index P) (I.map f.unop)) $
λ I, multiequalizer.condition (S.unop.index P) (I.map f.unop),
map_id' := λ S, by { ext I, cases I, simpa },
map_comp' := λ S T W f g, by { ext I, simpa } }
/-- A helper definition used to define the morphisms for `plus`. -/
@[simps]
def diagram_pullback {X Y : C} (f : X ⟶ Y) :
J.diagram P Y ⟶ (J.pullback f).op ⋙ J.diagram P X :=
{ app := λ S, multiequalizer.lift _ _
(λ I, multiequalizer.ι (S.unop.index P) I.base) $
λ I, multiequalizer.condition (S.unop.index P) I.base,
naturality' := λ S T f, by { ext, dsimp, simpa } }
/-- A natural transformation `P ⟶ Q` induces a natural transformation
between diagrams whose colimits define the values of `plus`. -/
@[simps]
def diagram_nat_trans {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (X : C) :
J.diagram P X ⟶ J.diagram Q X :=
{ app := λ W, multiequalizer.lift _ _
(λ i, multiequalizer.ι _ i ≫ η.app _) begin
intros i,
erw [category.assoc, category.assoc, ← η.naturality,
← η.naturality, ← category.assoc, ← category.assoc, multiequalizer.condition],
refl,
end,
naturality' := λ _ _ _, by { dsimp, ext, simpa } }
@[simp]
lemma diagram_nat_trans_id (X : C) (P : Cᵒᵖ ⥤ D) :
J.diagram_nat_trans (𝟙 P) X = 𝟙 (J.diagram P X) :=
begin
ext,
dsimp,
simp only [multiequalizer.lift_ι, category.id_comp],
erw category.comp_id
end
@[simp]
lemma diagram_nat_trans_comp {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) (X : C) :
J.diagram_nat_trans (η ≫ γ) X = J.diagram_nat_trans η X ≫ J.diagram_nat_trans γ X :=
by { ext, dsimp, simp }
variable [∀ (X : C), has_colimits_of_shape (J.cover X)ᵒᵖ D]
/-- The plus construction, associating a presheaf to any presheaf.
See `plus_functor` below for a functorial version. -/
def plus_obj : Cᵒᵖ ⥤ D :=
{ obj := λ X, colimit (J.diagram P X.unop),
map := λ X Y f, colim_map (J.diagram_pullback P f.unop) ≫ colimit.pre _ _,
map_id' := begin
intros X,
ext S,
dsimp,
simp only [diagram_pullback_app, colimit.ι_pre,
ι_colim_map_assoc, category.comp_id],
let e := S.unop.pullback_id,
dsimp only [functor.op, pullback_obj],
erw [← colimit.w _ e.inv.op, ← category.assoc],
convert category.id_comp _,
ext I,
dsimp,
simp only [multiequalizer.lift_ι, category.id_comp, category.assoc],
dsimp [cover.arrow.map, cover.arrow.base],
cases I,
congr,
simp,
end,
map_comp' := begin
intros X Y Z f g,
ext S,
dsimp,
simp only [diagram_pullback_app, colimit.ι_pre_assoc,
colimit.ι_pre, ι_colim_map_assoc, category.assoc],
let e := S.unop.pullback_comp g.unop f.unop,
dsimp only [functor.op, pullback_obj],
erw [← colimit.w _ e.inv.op, ← category.assoc, ← category.assoc],
congr' 1,
ext I,
dsimp,
simp only [multiequalizer.lift_ι, category.assoc],
cases I,
dsimp only [cover.arrow.base, cover.arrow.map],
congr' 2,
simp,
end }
/-- An auxiliary definition used in `plus` below. -/
def plus_map {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) : J.plus_obj P ⟶ J.plus_obj Q :=
{ app := λ X, colim_map (J.diagram_nat_trans η X.unop),
naturality' := begin
intros X Y f,
dsimp [plus_obj],
ext,
simp only [diagram_pullback_app, ι_colim_map, colimit.ι_pre_assoc,
colimit.ι_pre, ι_colim_map_assoc, category.assoc],
simp_rw ← category.assoc,
congr' 1,
ext,
dsimp,
simpa,
end }
@[simp]
lemma plus_map_id (P : Cᵒᵖ ⥤ D) : J.plus_map (𝟙 P) = 𝟙 _ :=
begin
ext x : 2,
dsimp only [plus_map, plus_obj],
rw [J.diagram_nat_trans_id, nat_trans.id_app],
ext,
dsimp,
simp,
end
@[simp]
lemma plus_map_comp {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) :
J.plus_map (η ≫ γ) = J.plus_map η ≫ J.plus_map γ :=
begin
ext : 2,
dsimp only [plus_map],
rw J.diagram_nat_trans_comp,
ext,
dsimp,
simp,
end
variable (D)
/-- The plus construction, a functor sending `P` to `J.plus_obj P`. -/
@[simps]
def plus_functor : (Cᵒᵖ ⥤ D) ⥤ Cᵒᵖ ⥤ D :=
{ obj := λ P, J.plus_obj P,
map := λ P Q η, J.plus_map η,
map_id' := λ _, plus_map_id _ _,
map_comp' := λ _ _ _ _ _, plus_map_comp _ _ _ }
variable {D}
/-- The canonical map from `P` to `J.plus.obj P`.
See `to_plus` for a functorial version. -/
def to_plus : P ⟶ J.plus_obj P :=
{ app := λ X, cover.to_multiequalizer (⊤ : J.cover X.unop) P ≫
colimit.ι (J.diagram P X.unop) (op ⊤),
naturality' := begin
intros X Y f,
dsimp [plus_obj],
delta cover.to_multiequalizer,
simp only [diagram_pullback_app, colimit.ι_pre, ι_colim_map_assoc, category.assoc],
dsimp only [functor.op, unop_op],
let e : (J.pullback f.unop).obj ⊤ ⟶ ⊤ := hom_of_le (order_top.le_top _),
rw [← colimit.w _ e.op, ← category.assoc, ← category.assoc, ← category.assoc],
congr' 1,
ext,
dsimp,
simp only [multiequalizer.lift_ι, category.assoc],
dsimp [cover.arrow.base],
simp,
end }
@[simp, reassoc]
lemma to_plus_naturality {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) :
η ≫ J.to_plus Q = J.to_plus _ ≫ J.plus_map η :=
begin
ext,
dsimp [to_plus, plus_map],
delta cover.to_multiequalizer,
simp only [ι_colim_map, category.assoc],
simp_rw ← category.assoc,
congr' 1,
ext,
dsimp,
simp,
end
variable (D)
/-- The natural transformation from the identity functor to `plus`. -/
@[simps]
def to_plus_nat_trans : (𝟭 (Cᵒᵖ ⥤ D)) ⟶ J.plus_functor D :=
{ app := λ P, J.to_plus P,
naturality' := λ _ _ _, to_plus_naturality _ _ }
variable {D}
/-- `(P ⟶ P⁺)⁺ = P⁺ ⟶ P⁺⁺` -/
@[simp]
lemma plus_map_to_plus : J.plus_map (J.to_plus P) = J.to_plus (J.plus_obj P) :=
begin
ext X S,
dsimp [to_plus, plus_obj, plus_map],
delta cover.to_multiequalizer,
simp only [ι_colim_map],
let e : S.unop ⟶ ⊤ := hom_of_le (order_top.le_top _),
simp_rw [← colimit.w _ e.op, ← category.assoc],
congr' 1,
ext I,
dsimp,
simp only [diagram_pullback_app, colimit.ι_pre, multiequalizer.lift_ι,
ι_colim_map_assoc, category.assoc],
dsimp only [functor.op],
let ee : (J.pullback (I.map e).f).obj S.unop ⟶ ⊤ := hom_of_le (order_top.le_top _),
simp_rw [← colimit.w _ ee.op, ← category.assoc],
congr' 1,
ext II,
dsimp,
simp only [limit.lift_π, multifork.of_ι_π_app, multiequalizer.lift_ι, category.assoc],
dsimp [multifork.of_ι],
convert multiequalizer.condition (S.unop.index P)
⟨_, _, _, II.f, 𝟙 _, I.f, II.f ≫ I.f, I.hf, sieve.downward_closed _ I.hf _, by simp⟩,
{ cases I, refl },
{ dsimp [cover.index],
erw [P.map_id, category.comp_id],
refl }
end
lemma is_iso_to_plus_of_is_sheaf (hP : presheaf.is_sheaf J P) : is_iso (J.to_plus P) :=
begin
rw presheaf.is_sheaf_iff_multiequalizer at hP,
resetI,
suffices : ∀ X, is_iso ((J.to_plus P).app X),
{ resetI, apply nat_iso.is_iso_of_is_iso_app },
intros X, dsimp,
suffices : is_iso (colimit.ι (J.diagram P X.unop) (op ⊤)),
{ resetI, apply is_iso.comp_is_iso },
suffices : ∀ (S T : (J.cover X.unop)ᵒᵖ) (f : S ⟶ T), is_iso ((J.diagram P X.unop).map f),
{ resetI, apply is_iso_ι_of_is_initial (initial_op_of_terminal is_terminal_top) },
intros S T e,
have : S.unop.to_multiequalizer P ≫ (J.diagram P (X.unop)).map e =
T.unop.to_multiequalizer P, by { ext, dsimp, simpa },
have : (J.diagram P (X.unop)).map e = inv (S.unop.to_multiequalizer P) ≫
T.unop.to_multiequalizer P, by simp [← this],
rw this, apply_instance,
end
/-- The natural isomorphism between `P` and `P⁺` when `P` is a sheaf. -/
def iso_to_plus (hP : presheaf.is_sheaf J P) : P ≅ J.plus_obj P :=
by letI := is_iso_to_plus_of_is_sheaf J P hP; exact as_iso (J.to_plus P)
@[simp]
lemma iso_to_plus_hom (hP : presheaf.is_sheaf J P) : (J.iso_to_plus P hP).hom = J.to_plus P := rfl
/-- Lift a morphism `P ⟶ Q` to `P⁺ ⟶ Q` when `Q` is a sheaf. -/
def plus_lift {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : presheaf.is_sheaf J Q) :
J.plus_obj P ⟶ Q :=
J.plus_map η ≫ (J.iso_to_plus Q hQ).inv
@[simp, reassoc]
lemma to_plus_plus_lift {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : presheaf.is_sheaf J Q) :
J.to_plus P ≫ J.plus_lift η hQ = η :=
begin
dsimp [plus_lift],
rw ← category.assoc,
rw iso.comp_inv_eq,
dsimp only [iso_to_plus, as_iso],
rw to_plus_naturality,
end
lemma plus_lift_unique {P Q : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (hQ : presheaf.is_sheaf J Q)
(γ : J.plus_obj P ⟶ Q) (hγ : J.to_plus P ≫ γ = η) : γ = J.plus_lift η hQ :=
begin
dsimp only [plus_lift],
rw [iso.eq_comp_inv, ← hγ, plus_map_comp],
dsimp,
simp,
end
lemma plus_hom_ext {P Q : Cᵒᵖ ⥤ D} (η γ : J.plus_obj P ⟶ Q) (hQ : presheaf.is_sheaf J Q)
(h : J.to_plus P ≫ η = J.to_plus P ≫ γ) : η = γ :=
begin
have : γ = J.plus_lift (J.to_plus P ≫ γ) hQ,
{ apply plus_lift_unique, refl },
rw this,
apply plus_lift_unique, exact h
end
@[simp]
lemma iso_to_plus_inv (hP : presheaf.is_sheaf J P) : (J.iso_to_plus P hP).inv =
J.plus_lift (𝟙 _) hP :=
begin
apply J.plus_lift_unique,
rw [iso.comp_inv_eq, category.id_comp],
refl,
end
@[simp]
lemma plus_map_plus_lift {P Q R : Cᵒᵖ ⥤ D} (η : P ⟶ Q) (γ : Q ⟶ R) (hR : presheaf.is_sheaf J R) :
J.plus_map η ≫ J.plus_lift γ hR = J.plus_lift (η ≫ γ) hR :=
begin
apply J.plus_lift_unique,
rw [← category.assoc, ← J.to_plus_naturality, category.assoc, J.to_plus_plus_lift],
end
end category_theory.grothendieck_topology
|
lemma adjoint_works: fixes f :: "'n::euclidean_space \<Rightarrow> 'm::euclidean_space" assumes lf: "linear f" shows "x \<bullet> adjoint f y = f x \<bullet> y" |
"""
function layerbuilder(d::Int,k::Int,o::Int,n::Int,ftype::String,lastlayer::String = "",ltype::String = "Dense")
create a chain with `n` layers of with `k` neurons with transfer function `ftype`.
input and output dimension is `d` / `o`
If lastlayer is no specified, all layers use the same function.
If lastlayer is "linear", then the last layer is forced to be Dense.
It is also possible to specify dimensions in a vector.
```juliadoctest
julia> FluxExtensions.layerbuilder(4,11,1,3,"relu")
Chain(Dense(4, 11, NNlib.relu), Dense(11, 11, NNlib.relu), Dense(11, 1, NNlib.relu))
julia> FluxExtensions.layerbuilder([4,11,11,1],"relu")
Chain(Dense(4, 11, NNlib.relu), Dense(11, 11, NNlib.relu), Dense(11, 1, NNlib.relu))
julia> FluxExtensions.layerbuilder(4,11,1,3,"relu","tanh")
Chain(Dense(4, 11, NNlib.relu), Dense(11, 11, NNlib.relu), Dense(11, 1, tanh))
# TODO fix this
julia> FluxExtensions.layerbuilder(4,11,1,3,"relu","tanh","ResDense")
Chain(ResDense(Dense(11, 11, NNlib.relu)), ResDense(Dense(11, 11, NNlib.relu)), ResDense(Dense(1, 1, tanh)))
# TODO fix this
julia> FluxExtensions.layerbuilder(4,11,1,3,"relu","linear","ResDense")
Chain(ResDense(Dense(11, 11, NNlib.relu)), ResDense(Dense(11, 11, NNlib.relu)), Dense(11, 1))
```
"""
layerbuilder(k::Vector{Int},l::Vector,f::Vector) = Flux.Chain(map(i -> i[1](i[3],i[4],i[2]),zip(l,f,k[1:end-1],k[2:end]))...)
layerbuilder(d::Int,k::Int,o::Int,n::Int, args...) =
layerbuilder(vcat(d,fill(k,n-1)...,o), args...)
function layerbuilder(ks::Vector{Int},ftype::String,lastlayer::String = "",ltype::String = "Dense")
ftype = (ftype == "linear") ? "identity" : ftype
ls = Array{Any}(fill(eval(:($(Symbol(ltype)))),length(ks)-1))
fs = Array{Any}(fill(eval(:($(Symbol(ftype)))),length(ks)-1))
if !isempty(lastlayer)
fs[end] = (lastlayer == "linear") ? identity : eval(:($(Symbol(lastlayer))))
ls[end] = (lastlayer == "linear") ? Dense : ls[end]
end
layerbuilder(ks,ls,fs)
end |
[STATEMENT]
lemma perpendicular_sym:
shows "perpendicular H1 H2 \<longleftrightarrow> perpendicular H2 H1"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. perpendicular H1 H2 = perpendicular H2 H1
[PROOF STEP]
unfolding perpendicular_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (cos_angle (of_circline H1) (of_circline H2) = 0) = (cos_angle (of_circline H2) (of_circline H1) = 0)
[PROOF STEP]
by (transfer, transfer, auto simp add: field_simps) |
repeat process ACV_megs {
/execute @e[type=creeper,name=ACV_MEG] ~ ~ ~ kill @e[type=armor_stand,name=ACV_Cube,dy=0]
/scoreboard players tag @a[tag=ACV_Blue] remove ACV_Blue
/scoreboard players tag @a[tag=ACV_Orange] remove ACV_Orange
/scoreboard players tag @a[tag=ACV_Purple] remove ACV_Purple
/scoreboard players tag @a[tag=ACV_Red] remove ACV_Red
/scoreboard players tag @a add ACV_Blue {Inventory:[{id:"minecraft:bow",tag:{ench:[{id:48,lvl:6}]}}]}
/scoreboard players tag @a add ACV_Orange {Inventory:[{id:"minecraft:bow",tag:{ench:[{id:48,lvl:7}]}}]}
/scoreboard players tag @a add ACV_Purple {Inventory:[{id:"minecraft:bow",tag:{ench:[{id:48,lvl:8}]}}]}
/scoreboard players tag @a add ACV_Red {Inventory:[{id:"minecraft:bow",tag:{ench:[{id:48,lvl:9}]}}]}
/execute @e[type=creeper,name=ACV_MEG] ~ ~ ~ /scoreboard players tag @a[dy=0] add ACV_megDeletePortal_PARAM
conditional: /setblock ${this + 1} redstone_block
skip
impulse: /setblock ${this - 1} stone
/execute @p[tag=ACV_megDeletePortal_PARAM] ~ ~ ~ execute @p[tag=ACV_Blue,r=0] ~ ~ ~ summon area_effect_cloud ~ ~ ~ {CustomName:"ACV_remote_deletePortals_PARAM",Tags:["ACV_Blue"],Duration:2147483647}
/execute @p[tag=ACV_megDeletePortal_PARAM] ~ ~ ~ execute @p[tag=ACV_Orange,r=0] ~ ~ ~ summon area_effect_cloud ~ ~ ~ {CustomName:"ACV_remote_deletePortals_PARAM",Tags:["ACV_Orange"],Duration:2147483647}
/execute @p[tag=ACV_megDeletePortal_PARAM] ~ ~ ~ execute @p[tag=ACV_Purple,r=0] ~ ~ ~ summon area_effect_cloud ~ ~ ~ {CustomName:"ACV_remote_deletePortals_PARAM",Tags:["ACV_Purple"],Duration:2147483647}
/execute @p[tag=ACV_megDeletePortal_PARAM] ~ ~ ~ execute @p[tag=ACV_Red,r=0] ~ ~ ~ summon area_effect_cloud ~ ~ ~ {CustomName:"ACV_remote_deletePortals_PARAM",Tags:["ACV_Red"],Duration:2147483647}
/scoreboard players tag @a remove ACV_megDeletePortal_PARAM
start ACV_remote_deletePortals
}
|
Sex and the University appeared in The California Aggie during Fall 2003 to March 2004 when the author, Arturo Garcia, was The California Aggie Controversies dismissed. SATU dealt with the relationship aspect of college life.
|
\documentclass{article}
% \usepackage{biber}
\usepackage{listings}
\usepackage{qtree}
\author{Henry S. Sjoen}
\title{TDT4120 \\ Fall 2018}
\begin{document}
%.root left right
\Tree[.Top
[ .Left
[.Left Left Right ]
[.Right Left Right ]
]
[ .Right
[.Left Left Right ]
[.Right Left Right ]
]
]
\maketitle
\tableofcontents
\section{Algorithm Design}
\subsection{Divide and Conquer}
Divide and Conquer is an Algorithm design paradigm based on multi-branch recursion. A divide and conquer algorithm works by recursively breaking down a problem into two or more sub-problems of the same or related type, until these become simple enough to be solved directly. The solutions to the sub-problems are then combined to give a solution to the original problem.
% \cite{wikiDivide}
\section{Loose Recurrences}
\subsection{The Master-Theorem}
% https://en.wikipedia.org/wiki/Master_theorem_(analysis_of_algorithms)
Generic form
$T(n)=a T(\frac{n}{b})+f(n)$
\subsection{Recursion Trees}
$T(n)=4T(\frac{n}{2})+n^2$
%.root left right
\Tree[.$n^2$
[ .$(\frac{n}{2})^2$
[.$(\frac{n}{4})^2$ $(\frac{n}{8})^2$ $(\frac{n}{8})^2$ ]
[.$(\frac{n}{4})^2$ $(\frac{n}{8})^2$ $(\frac{n}{8})^2$ ]
]
[ .$(\frac{n}{2})^2$
[.$(\frac{n}{4})^2$ $(\frac{n}{8})^2$ $(\frac{n}{8})^2$ ]
[.$(\frac{n}{4})^2$ $(\frac{n}{8})^2$ $(\frac{n}{8})^2$ ]
]
]
% $(n\frac{n}{8}^2$
\begin{lstlisting}
This is supposed to be a tree...
n^2
-(n/2)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
-(n/2)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
-(n/2)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
-(n/2)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
--(n/4)^2
\end{lstlisting}
\subsection{Variable-switching}
$T(n) = 2T(\sqrt{n})+\log n$
\section{Sorting Algorithms}
\subsection{Mergesort}
$\theta(n \log n)$
\subsection{Quicksort}
Howto:
\begin{lstlisting}[caption=Julia example]
function traverse_recursive_max(node, start_value)
highest_value = start_value
if (node.value > highest_value)
highest_value = node.value
end
if node.next == nothing
return highest_value
end
traverse_recursive_max(node.next,highest_value)
end
traversemax(node) = traverse_recursive_max(node,node.value)
\end{lstlisting}
\section{Code example overview}
\lstlistoflistings
\end{document} |
------------------------------------------------------------------------------
-- Inductive PA properties using the induction principle
------------------------------------------------------------------------------
{-# OPTIONS --exact-split #-}
{-# OPTIONS --no-sized-types #-}
{-# OPTIONS --no-universe-polymorphism #-}
{-# OPTIONS --without-K #-}
module PA.Inductive.PropertiesByInductionI where
open import PA.Inductive.Base
open import PA.Inductive.PropertiesByInduction
open import PA.Inductive.Relation.Binary.EqReasoning
------------------------------------------------------------------------------
+-comm : ∀ m n → m + n ≡ n + m
+-comm m n = ℕ-ind A A0 is m
where
A : ℕ → Set
A i = i + n ≡ n + i
A0 : A zero
A0 = sym (+-rightIdentity n)
is : ∀ i → A i → A (succ i)
is i ih = succ (i + n) ≡⟨ succCong ih ⟩
succ (n + i) ≡⟨ sym (x+Sy≡S[x+y] n i) ⟩
n + succ i ∎
|
{-
This file contains:
- The inductive construction of James.
-}
{-# OPTIONS --safe #-}
module Cubical.HITs.James.Inductive.Base where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Pointed
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.Isomorphism
open import Cubical.Data.Nat
open import Cubical.HITs.SequentialColimit
private
variable
ℓ : Level
module _
(X∙@(X , x₀) : Pointed ℓ) where
-- The family 𝕁ames n is equivalence to Brunerie's J n
data 𝕁ames : ℕ → Type ℓ where
[] : 𝕁ames 0
_∷_ : {n : ℕ} → X → 𝕁ames n → 𝕁ames (1 + n)
incl : {n : ℕ} → 𝕁ames n → 𝕁ames (1 + n)
incl∷ : {n : ℕ} → (x : X)(xs : 𝕁ames n) → incl (x ∷ xs) ≡ x ∷ incl xs
unit : {n : ℕ} → (xs : 𝕁ames n) → incl xs ≡ x₀ ∷ xs
coh : {n : ℕ} → (xs : 𝕁ames n) → PathP (λ i → incl (unit xs i) ≡ x₀ ∷ incl xs) (unit (incl xs)) (incl∷ x₀ xs)
-- The direct system defined by 𝕁ames
open Sequence
𝕁amesSeq : Sequence ℓ
𝕁amesSeq .space = 𝕁ames
𝕁amesSeq .map = incl
-- The 𝕁ames∞ wanted is the direct colimit of 𝕁ames n
𝕁ames∞ : Type ℓ
𝕁ames∞ = Lim→ 𝕁amesSeq
|
/-
Copyright (c) 2020 Simon Hudon. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Author: Simon Hudon
-/
import Mathlib.PrePort
import Mathlib.Lean3Lib.init.default
import Mathlib.tactic.core
import Mathlib.PostPort
namespace Mathlib
/-!
# `pretty_cases` tactic
When using `induction` and `cases`, `pretty_cases` prints a `"Try
this:"` advice that shows how to structure the proof with
`case { ... }` commands. In the following example, we apply induction on a
permutation assumption about lists. `pretty_cases` gives us a proof
skeleton that explicit selects the branches and explicit names the
new local constants:
```lean
example {α} (xs ys : list α) (h : xs ~ ys) : true :=
begin
induction h,
pretty_cases,
-- Try this:
-- case list.perm.nil :
-- { admit },
-- case list.perm.cons : h_x h_l₁ h_l₂ h_a h_ih
-- { admit },
-- case list.perm.swap : h_x h_y h_l
-- { admit },
-- case list.perm.trans : h_l₁ h_l₂ h_l₃ h_a h_a_1 h_ih_a h_ih_a_1
-- { admit },
end
```
## Main definitions
* `pretty_cases_advice` return `pretty_cases` advice without printing it
* `pretty_cases` main tactic
-/
namespace tactic
/-- Query the proof goal and print the skeleton of a proof by cases. -/
namespace interactive
/--
Query the proof goal and print the skeleton of a proof by
cases.
For example, let us consider the following proof:
```lean
example {α} (xs ys : list α) (h : xs ~ ys) : true :=
begin
induction h,
pretty_cases,
-- Try this:
-- case list.perm.nil :
-- { admit },
-- case list.perm.cons : h_x h_l₁ h_l₂ h_a h_ih
-- { admit },
-- case list.perm.swap : h_x h_y h_l
-- { admit },
-- case list.perm.trans : h_l₁ h_l₂ h_l₃ h_a h_a_1 h_ih_a h_ih_a_1
-- { admit },
end
```
The output helps the user layout the cases and rename the
introduced variables.
-/
|
module Backend
export contacts, chats, update_contact_list, get_key, set_key, get_state, set_state
contacts = Dict()
chats = Dict()
function update_contact_list(message)
id = message["message"]["from"]["id"]
if !(id in keys(contacts))
contacts[id] = Dict("state"=> 0, "message"=>message)
end
end
function get_key(id, key)
return contacts[id][key]
end
function set_key(id, key, value)
contacts[id][key] = value
end
function get_state(id)
return get_key(id,"state")
end
function set_state(id, state)
set_key(id,"state", state)
end
end
|
If two paths are homotopic, then they have the same endpoints. |
# Legendre Functions
Series expansions are used in a variety of circumstances:
- When we need a tractable approximation to some ugly equation
- To transform between equivalent ways of looking at a problem (e.g. time domain vs frequency domain)
- When they are (part of) a solution to a particular class of differential equation
For approximations, there is an important divide between getting the best fit *near a point* (e.g. Taylor series) and getting the best fit *over an interval*. This notebook deals with one example of the latter; there is a separate notebook for Taylor expansions and others for Fourier, Bessel, etc.
## Fitting over an interval
What is the best (tractable) series approximating my function across some range of values? What matters is an overall best fit (e.g. least-squares deviation) across the range, and we can't tolerate wild divergences as with the Taylor series.
There are various series which are useful in different contexts, but a common property is that the terms are *orthogonal* over some interval $[a,b]$. If $f(t)$ is a real-valued function their *inner product* is defined as
$$ \langle f(m t),f(n t) \rangle \colon =\int _a^b f(m t) f(n t) \, dt $$
For orthogonal functions, this is non-zero if $m=n$ and zero if $m \ne n$, i.e.
$$\langle f(m t),f(n t) \rangle = a \delta_{mn}$$
where $\delta$ is the Kronecker delta. If $a = 1$ the functions are said to be orthonormal.
## The Legendre differential equation
This is of the form
$$ (1 - x^2)y'' -2x y' + l(l+1)y = 0 $$
where $l$ is a constant. The most useful solutions are the Legendre polynomials, where $y = P_l(x)$.
## Legendre Polynomials
These are "just" polynomials, so maybe conceptually simpler than, for example, Bessel functions. Their special feature is that the coefficients are chosen so that they are mutually orthogonal over the range $[-1,1]$.
They are given by the formula
$$ P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n $$
They tend to crop up in the sort of problems which naturally use spherical coordinates and/or spherical harmonics, such as fluctuations in the CMB, "sunquakes" in our local star or (at the other end of the scale range) electron orbitals in the hydrogen atom.
## Associated Legendre Functions
the function $P_n^m(x)$ is of degree $n$ and order $m$. It is related to the $n$th order polynomial $P_n(x)$ by
$$ P_n^m(x) = (-1)^m (1-x^2)^{m/2}\ \frac{d^m P_n(x)}{dx^m} $$
Order zero functions are just the corresponding Legendre polynomials: $P_n^0(x) \equiv P_n(x)$.
## Software
Start with a few basics, then we can get mathematical.
```python
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 16})
```
How to work with Legendre functions in Python? A quick Google search turns up quite a few possibilities, though it may not be immediately obvious how these relate to one another:
- `scipy.special.legendre()`
- `scipy.special.lpmn()` and `.lpmv()`
- `numpy.polynomial.legendre`
- `sympy.functions.special.polymomials.legendre()`
- `sympy.functions.special.polymomials.assoc_legendre()`
- `sympy.polys.orthopolys.legendre_poly()`
- `mpmath.legendre()`
### scipy.special
This one is relatively simple. Calling `legendre(n)` returns the nth-order polynomial as a function which can then itself be called with one or more x-values.
```python
import scipy.special as sp
P_3_sp = sp.legendre(3)
display(P_3_sp)
```
poly1d([ 2.5, 0. , -1.5, 0. ])
```python
x10 = np.linspace(-1, 1, 10)
display(P_3_sp(x10))
```
array([-1. , -0.00960219, 0.40466392, 0.40740741, 0.16323731,
-0.16323731, -0.40740741, -0.40466392, 0.00960219, 1. ])
For the associated Legendre functions there are a couple of related SciPy functions which take different approaches to vector input and output. `scipy.special.lpmn(m, n, x)` will only take a single scalar $x$, but returns an $(m+1, n+1)$ array of results for all orders $0 \dots m$ and degrees $0 \dots n$. In contrast, `scipy.special.lpmv(m, n, x)` accepts arrays of $x$ and returns results for just the specified $m$ and $n$.
```python
xs = np.linspace(0, 1, 100)
m = 1
n = 2
P_lm, _ = sp.lpmn(m, n, xs[5])
display(P_lm.shape)
P_lmv = sp.lpmv(m, n, xs)
display(P_lmv.shape)
```
### numpy.polynomial
This is less simple and needs more exploration. Start like this, then read whatever documentation you can find.
```python
from numpy.polynomial import Legendre as P
P_3_npl = P([3])
display(P_3_npl)
```
Legendre([3.], domain=[-1, 1], window=[-1, 1])
### sympy.functions.special.polymomials
This is symbolic math, which will give you differentiation, integration, etc, as well as nice $LaTeX$ output. Not so convenient for plotting.
```python
from sympy import legendre, assoc_legendre, init_printing
init_printing()
from sympy.abc import x
display(legendre(3, x))
display(assoc_legendre(3, 2, x))
```
### sympy.polys.orthopolys
Sort of like sympy.functions.special.polymomials, but with some different options.
```python
from sympy import legendre_poly
display(legendre_poly(3))
display(legendre_poly(3, polys=True))
```
### mpmath
This is aimed at arbitrary-precision floating point arithmetic. It doesn't seem to do symbolic math like SymPy or (more surprisingly?) handle array input like SciPy.
If you don't have the `mpmath` package installed, don't worry: this is the only cell that tries to use it.
```python
import mpmath as mp
for x1 in np.arange(0, 1, 0.2):
display(mp.legendre(3, x1))
```
mpf('0.0')
mpf('-0.28000000000000003')
mpf('-0.44')
mpf('-0.35999999999999988')
mpf('0.08000000000000014')
### Provisional conclusions
It seems like `sympy.functions.special.polymomials` offers the simplest way to do symbolic math, and `scipy.special` the easiest way to do numerical calculations. Other packages no doubt have more sophisticated capabilities but I'm not the right person to judge.
The first few __Legendre polymomials__ look like this. Note that they are alternately odd/even functions.
```python
from IPython.display import Math
from sympy import latex
from sympy.abc import x
for i in range(6):
l_i = latex(legendre(i, x))
display(Math('P_{} = {}'.format(i, l_i)))
```
$\displaystyle P_0 = 1$
$\displaystyle P_1 = x$
$\displaystyle P_2 = \frac{3 x^{2}}{2} - \frac{1}{2}$
$\displaystyle P_3 = \frac{5 x^{3}}{2} - \frac{3 x}{2}$
$\displaystyle P_4 = \frac{35 x^{4}}{8} - \frac{15 x^{2}}{4} + \frac{3}{8}$
$\displaystyle P_5 = \frac{63 x^{5}}{8} - \frac{35 x^{3}}{4} + \frac{15 x}{8}$
The first few __associated Legendre functions__:
```python
for i in range(4):
for j in range(i):
l_ij = latex(assoc_legendre(i, j, x))
display(Math('P_{}^{} = {}'.format(i, j, l_ij)))
```
$\displaystyle P_1^0 = x$
$\displaystyle P_2^0 = \frac{3 x^{2}}{2} - \frac{1}{2}$
$\displaystyle P_2^1 = - 3 x \sqrt{- x^{2} + 1}$
$\displaystyle P_3^0 = \frac{5 x^{3}}{2} - \frac{3 x}{2}$
$\displaystyle P_3^1 = - \sqrt{- x^{2} + 1} \left(\frac{15 x^{2}}{2} - \frac{3}{2}\right)$
$\displaystyle P_3^2 = 15 x \left(- x^{2} + 1\right)$
__Plotting__ the first few Legendre polymomials over the range where they are orthogonal:
```python
import scipy.special as sp
xlims = (-1, 1)
x = np.linspace(xlims[0], xlims[1], 1000)
plt.figure(figsize=(9, 9))
for v in range(0, 6):
plt.plot(x, sp.legendre(v)(x))
plt.xlim(xlims)
plt.ylim((-1.1, 1.1))
plt.legend(('$\mathcal{P}_0(x)$', '$\mathcal{P}_1(x)$', '$\mathcal{P}_2(x)$',
'$\mathcal{P}_3(x)$', '$\mathcal{P}_4(x)$', '$\mathcal{P}_5(x)$'),
loc = 0)
plt.xlabel('$x$')
plt.ylabel('$\mathcal{P}_n(x)$')
plt.title('Plots of the first six Legendre Polynomials')
plt.grid(True)
```
## Spherical coordinates
An interesting use of the associated Legendre functions has $x = \cos(\theta)$. The resulting functions are a component in the spherical harmonics $Y_l^m(\theta, \phi)$, described in another Jupyter notebook in this folder.
We can make polar plots showing the magnitude of $P_l^m(\cos \theta)$ in the direction $\theta$. Here $\theta$ is the angle down from the $+z$ axis. There is no $\phi$ dependency in $P_l^m(\cos \theta)$ so think of these plots as being radially symmetric around the $z$-axis (i.e. rotate them about the vertical axis).
TODO - color-code the plots by the sign of $P_l^m(\cos \theta)$. This would make the nodes clearer to see.
```python
thetas = np.linspace(0, np.pi, 200)
theta_x = np.sin(thetas)
theta_y = np.cos(thetas)
fig = plt.figure(figsize = (15,15))
for n in range(3):
for m in range(n+1):
P_lm = sp.lpmv(m, n, np.cos(thetas))
x_coords = theta_x*np.abs(P_lm)
y_coords = theta_y*np.abs(P_lm)
ax = fig.add_subplot(3, 3, m+1+3*n)
ax.plot(x_coords, y_coords, 'b-', label='$P_{}^{}$'.format(n,m))
# reflect the plot across the z-axis
ax.plot(-x_coords, y_coords, 'b-')
ax.axis('equal')
# ax.set_title('$P_{}^{}$'.format(n,m))
ax.legend()
```
<a id='refs'></a>
## References
- Boas, "Mathematical methods in the physical sciences", 3rd ed, chapter 12
- MathWorld, http://mathworld.wolfram.com/LegendrePolynomial.html and http://mathworld.wolfram.com/AssociatedLegendrePolynomial.html
- Wikipedia, https://en.wikipedia.org/wiki/Legendre_polynomials
- Binney & Tremaine, "Galactic Dynamics", 2nd ed, appendix C.5
- Griffiths & Schroeter, "Introduction to Quantum Mechanics", 3rd ed, section 4.1.2
- Mathews & Walker, "Mathematical Methods of Physics", 2nd ed, section 7.1
```python
```
|
(* Author: Nan Jiang *)
section \<open>More auxiliary lemmas for Lists Sorted wrt $<$\<close>
theory Sorted_Less2
imports Main "HOL-Data_Structures.Cmp" "HOL-Data_Structures.Sorted_Less"
begin
lemma Cons_sorted_less: "sorted (rev xs) \<Longrightarrow> \<forall>x\<in>set xs. x < p \<Longrightarrow> sorted (rev (p # xs))"
by (induct xs) (auto simp add:sorted_wrt_append)
lemma Cons_sorted_less_nth: "\<forall>x<length xs. xs ! x < p \<Longrightarrow> sorted (rev xs) \<Longrightarrow> sorted (rev (p # xs))"
apply(subgoal_tac "\<forall>x\<in>set xs. x < p")
apply(fastforce dest:Cons_sorted_less)
apply(auto simp add: set_conv_nth)
done
lemma distinct_sorted_rev: "sorted (rev xs) \<Longrightarrow> distinct xs"
by (induct xs) (auto simp add:sorted_wrt_append)
lemma sorted_le2lt:
assumes "List.sorted xs"
and "distinct xs"
shows "sorted xs"
using assms
proof (induction xs)
case Nil then show ?case by auto
next
case (Cons x xs)
note ind_hyp_xs = Cons(1)
note sorted_le_x_xs = Cons(2)
note dist_x_xs = Cons(3)
from dist_x_xs have x_neq_xs: "\<forall>v \<in> set xs. x \<noteq> v"
and dist: "distinct xs" by auto
from sorted_le_x_xs have sorted_le_xs: "List.sorted xs"
and x_le_xs: "\<forall>v \<in> set xs. v \<ge> x" by auto
from x_neq_xs x_le_xs have x_lt_xs: "\<forall>v \<in> set xs. v > x" by fastforce
from ind_hyp_xs[OF sorted_le_xs dist] have "sorted xs" by auto
with x_lt_xs show ?case by auto
qed
lemma sorted_less_sorted_list_of_set: "sorted (sorted_list_of_set S)"
by (auto intro:sorted_le2lt)
lemma distinct_sorted: "sorted xs \<Longrightarrow> distinct xs"
by (induct xs) (auto simp add: sorted_wrt_append)
lemma sorted_less_set_unique:
assumes "sorted xs"
and "sorted ys"
and "set xs = set ys"
shows "xs = ys"
using assms
proof -
from assms(1) have "distinct xs" and "List.sorted xs" by (induct xs) auto
also from assms(2) have "distinct ys" and "List.sorted ys" by (induct ys) auto
ultimately show "xs = ys" using assms(3) by (auto intro: sorted_distinct_set_unique)
qed
lemma sorted_less_rev_set_unique:
assumes "sorted (rev xs)"
and "sorted (rev ys)"
and "set xs = set ys"
shows "xs = ys"
using assms sorted_less_set_unique[of "rev xs" "rev ys"] by auto
lemma sorted_less_set_eq:
assumes "sorted xs "
shows "xs = sorted_list_of_set (set xs)"
using assms
apply(subgoal_tac "sorted (sorted_list_of_set (set xs))")
apply(auto intro: sorted_less_set_unique sorted_le2lt)
done
lemma sorted_less_rev_set_eq:
assumes "sorted (rev xs) "
shows "sorted_list_of_set (set xs) = rev xs"
using assms sorted_less_set_eq[of "rev xs"] by auto
lemma sorted_insort_remove1: "sorted w \<Longrightarrow> (insort a (remove1 a w)) = sorted_list_of_set (insert a (set w)) "
proof-
assume "sorted w"
then have "(sorted_list_of_set (set w - {a})) = remove1 a w" using sorted_less_set_eq
by (fastforce simp add:sorted_list_of_set_remove)
hence "insort a (remove1 a w) = insort a (sorted_list_of_set (set w - {a}))" by simp
then show ?thesis by (auto simp add:sorted_list_of_set_insert)
qed
end |
[STATEMENT]
lemma Deriv_lists[simp]: "c : S \<Longrightarrow> Deriv c (lists S) = lists S"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. c \<in> S \<Longrightarrow> Deriv c (lists S) = lists S
[PROOF STEP]
by(auto simp add: Deriv_def) |
-- @@stderr --
dtrace: failed to compile script test/unittest/funcs/err.D_OP_VFPTR.badop.d: [D_OP_VFPTR] line 23: operator -= requires left-hand scalar operand of known size
|
function imo = cnn_get_im_flow_batch2(images, varargin)
opts.subTractFlow = 'off';
opts.nFramesPerVid = 1;
opts.numAugments = 1;
opts.frameSample = 'uniformly';
opts.flowDir = '';
opts.imageDir = '';
opts.temporalStride = 0;
opts.imageSize = [227, 227] ;
opts.border = [29, 29] ;
opts.averageImage = [] ;
opts.rgbVariance = [] ;
opts.augmentation = 'croponly' ;
opts.interpolation = 'bilinear' ;
opts.numAugments = 1 ;
opts.numThreads = 0 ;
opts.prefetch = false ;
opts.keepAspect = true;
opts.flowScales = [];
opts.cheapResize = 0;
opts.nFrameStack = 10;
opts.frameList = NaN;
opts.nFrames = [];
opts.subMedian = false;
opts.stretchAspect = 4/3 ;
opts.stretchScale = 1.2 ;
opts.fetchGPU = true ;
[opts, varargin] = vl_argparse(opts, varargin);
flowDir = opts.flowDir;
imgDir = opts.imageDir;
% prefetch is used to load images in a separate thread
prefetch = opts.prefetch & isempty(opts.frameList);
fetchOpts= {'numThreads', opts.numThreads};
if opts.fetchGPU
fetchOpts{end+1} = 'Gpu' ;
end
switch opts.augmentation
case 'croponly'
tfs = [.5 ; .5 ; 0 ];
case 'f5'
tfs = [...
.5 0 0 1 1 .5 0 0 1 1 ;
.5 0 1 0 1 .5 0 1 0 1 ;
0 0 0 0 0 1 1 1 1 1] ;
case 'f25'
[tx,ty] = meshgrid(linspace(0,1,5)) ;
tfs = [tx(:)' ; ty(:)' ; zeros(1,numel(tx))] ;
tfs_ = tfs ;
tfs_(3,:) = 1 ;
tfs = [tfs,tfs_] ;
case 'f25noCtr'
[tx1,ty1] = meshgrid(linspace(.75,1,20)) ;
[tx2,ty2] = meshgrid(linspace(0,.25,20)) ;
tx = [tx1 tx2]; ty = [ty1 ty2];
tfs = [tx(:)' ; ty(:)' ; zeros(1,numel(tx))] ;
tfs_ = tfs ;
tfs_(3,:) = 1 ;
tfs = [tfs,tfs_] ;
end
nStack = opts.imageSize(3);
if iscell(opts.frameList)
im = vl_imreadjpeg(opts.frameList{1}, fetchOpts{:}) ;
sampled_frame_nr = opts.frameList{2};
else
sampleFrameLeftRight = floor(nStack/4); % divide by 4 because of left,right,u,v
frameOffsets = [-sampleFrameLeftRight:sampleFrameLeftRight-1]';
frames = cell(numel(images), nStack, opts.nFramesPerVid);
frames_rgb = cell(numel(images), 1, opts.nFramesPerVid);
sampled_frame_nr = cell(numel(images),1);
for i=1:numel(images)
vid_name = images{i};
nFrames = opts.nFrames(i);
if strcmp(opts.frameSample, 'uniformly')
sampleRate = max(floor((nFrames-nStack/2)/opts.nFramesPerVid),1);
frameSamples = nStack/4+1:sampleRate:nFrames - nStack/4 ;
opts.temporalStride = sampleRate;
frameSamples = vl_colsubset(nStack/4+1:nFrames-nStack/4, opts.nFramesPerVid, 'uniform') ;
opts.temporalStride = frameSamples(2) - frameSamples(1);
elseif strcmp(opts.frameSample, 'temporalStride')
frameSamples = nStack/4+1:opts.temporalStride:nFrames-nStack/4 ;
if length(frameSamples) < opts.nFrameStack,
frameSamples = round(linspace(nStack/4+1, nFrames - nStack/4, opts.nFramesPerVid)) ;
opts.temporalStride = frameSamples(2) - frameSamples(1);
end
elseif strcmp(opts.frameSample, 'random')
frameSamples = randperm(nFrames-nStack/2)+nStack/4;
elseif strcmp(opts.frameSample, 'temporalStrideRandom')
frameSamples = nStack/4 +1:opts.temporalStride:nFrames - nStack/4 ;
if length(frameSamples) < opts.nFrameStack,
frameSamples = round(linspace(nStack/4+1, nFrames - nStack/4, opts.nFrameStack)) ;
opts.temporalStride = frameSamples(2) - frameSamples(1);
end
end
if length(frameSamples) < opts.nFramesPerVid,
if length(frameSamples) > opts.nFrameStack
frameSamples = frameSamples(1:length(frameSamples)-mod(length(frameSamples),opts.nFrameStack));
end
diff = opts.nFramesPerVid - length(frameSamples);
addFrames = 0;
while diff > 0
last_frame = min(frameSamples(end), max(nFrames - nStack/4 - opts.nFrameStack,nStack/4 ));
if mod(addFrames,2) % add to the front
addSamples = nStack/4+1:opts.temporalStride:nFrames - nStack/4;
addSamples = addSamples(1: length(addSamples) - mod(length(addSamples),opts.nFrameStack));
if length(addSamples) > diff, addSamples = addSamples(1:diff); end
else % add to the back
addSamples = fliplr(nFrames - nStack/4 : -opts.temporalStride: nStack/4+1);
addSamples = addSamples(mod(length(addSamples),opts.nFrameStack)+1:length(addSamples));
if length(addSamples) > diff, addSamples = addSamples(end-diff+1:end); end
end
if addFrames > 20
addSamples = round(linspace(nStack/4+1, nFrames - nStack/4, opts.nFrameStack)) ;
end
frameSamples = [frameSamples addSamples];
diff = opts.nFramesPerVid - length(frameSamples);
opts.temporalStride = max(ceil(opts.temporalStride-1), 1);
addFrames = addFrames+1;
end
end
if length(frameSamples) > opts.nFramesPerVid
if strcmp(opts.frameSample, 'temporalStride')
s = fix((length(frameSamples)-opts.nFramesPerVid)/2);
else % random
s = randi(length(frameSamples)-opts.nFramesPerVid);
end
frameSamples = frameSamples(s+1:s+opts.nFramesPerVid);
end
for k = 1:opts.nFramesPerVid
frames_rgb{i,1,k} = [vid_name 'frame' sprintf('%06d.jpg', frameSamples(k))] ;
end
frameSamples = repmat(frameSamples,nStack/2,1) + repmat(frameOffsets,1,size(frameSamples,2));
for k = 1:opts.nFramesPerVid
for j = 1:nStack/2
frames{i,(j-1)*2+1, k} = ['u' filesep vid_name 'frame' sprintf('%06d.jpg', frameSamples(j,k)) ] ;
frames{i,(j-1)*2+2, k} = ['v' frames{i,(j-1)*2+1, k}(2:end)];
end
end
sampled_frame_nr{i} = frameSamples;
end
if iscell(opts.imageDir)
imgDir = opts.imageDir{i};
flowDir = opts.flowDir{i};
end
frames_rgb = strcat([imgDir filesep], frames_rgb);
if ~isempty(flowDir)
frames = strcat([flowDir filesep], frames);
frames = cat(2, frames, frames_rgb);
else
frames = frames_rgb;
end
if opts.numThreads > 0
if prefetch
vl_imreadjpeg(frames, fetchOpts{:}, 'prefetch') ;
imo = {frames sampled_frame_nr} ;
return ;
end
im = vl_imreadjpeg(frames, fetchOpts{:} ) ;
end
end
if strcmp(opts.augmentation, 'none')
szw = cellfun(@(x) size(x,2),im);
szh = cellfun(@(x) size(x,1),im);
h_min = min(szh(:));
w_min = min(szw(:));
sz = [h_min w_min] ;
sz = max(opts.imageSize(1:2), sz);
sz = min(2*opts.imageSize(1:2), sz);
scal = ([h_min w_min] ./ sz);
imo = zeros(sz(1), sz(2), opts.imageSize(3)+3, ...
numel(images), 2 * opts.nFramesPerVid, 'single') ;
if opts.fetchGPU
imo = gpuArray(imo);
end
for i=1:numel(images)
si = 1 ;
for k = 1:opts.nFramesPerVid
if numel(unique(szw)) > 1 || numel(unique(szh)) > 1
for l=1:size(im,2)
im{i,l,k} = im{i,l,k}(1:h_min,1:w_min,:);
end
end
imt = cat(3, im{i,:,k}) ;
if any(scal ~= 1)
imo(:, :, :, i, si) = imresize(cat(3, im{i,:,k}),sz) ;
else
imo(:, :, :, i, si) = imt ;
end
imt = [];
imo(:, :, :, i, si+1) = imo(:, end:-1:1, :, i, si);
imo(:, :, 1:2:nStack, i, si+1) = -imo(:, :, 1:2:nStack, i, si+1) + 255; %invert u if we flip
si = si + 2 ;
end
end
if opts.subMedian
median_flow = median(imo(:,:,1:nStack),1);
median_flow = median(median_flow,2);
imo(:,:,1:nStack,:,:) = bsxfun(@minus, imo(:,:,1:nStack,:,:), median_flow ) ;
imo(:,:,1:nStack,:,:) = bsxfun(@plus, imo(:,:,1:nStack,:,:), 128 ) ;
end
if ~isempty(opts.averageImage)
opts.averageImage = mean(mean(opts.averageImage,1),2) ;
imo = bsxfun(@minus, imo,opts.averageImage) ;
end
return;
end
% augment now
if exist('tfs', 'var')
[~,transformations] = sort(rand(size(tfs,2), numel(images)*opts.nFramesPerVid), 1) ;
end
imo = ( zeros(opts.imageSize(1), opts.imageSize(2), opts.imageSize(3)+3, ...
numel(images), opts.numAugments * opts.nFramesPerVid, 'single') ) ;
if opts.fetchGPU
imo = gpuArray(imo);
end
for i=1:numel(images)
si = 1 ;
szw = cellfun(@(x) size(x,2),im);
szh = cellfun(@(x) size(x,1),im);
h_min = min(szh(:));
w_min = min(szw(:));
if strcmp( opts.augmentation, 'multiScaleRegular')
reg_szs = [256, 224, 192, 168] ;
sz(1) = reg_szs(randi(4)); sz(2) = reg_szs(randi(4));
elseif strcmp( opts.augmentation, 'stretch')
aspect = exp((2*rand-1) * log(opts.stretchAspect)) ;
scale = exp((2*rand-1) * log(opts.stretchScale)) ;
tw = opts.imageSize(2) * sqrt(aspect) * scale ;
th = opts.imageSize(1) / sqrt(aspect) * scale ;
reduce = min([w_min / tw, h_min / th, 1]) ;
sz = round(reduce * [th ; tw]) ;
else
sz = round(min(opts.imageSize(1:2)' .* (.75+0.5*rand(2,1)), [h_min; w_min])) ; % 0.75 +- 0.5, not keep aspect
end
for k = 1:opts.nFramesPerVid
if numel(unique(szw)) > 1 || numel(unique(szh)) > 1
for l=1:size(im,2)
im{i,l,k} = im{i,l,k}(1:h_min,1:w_min,:);
end
end
imt = cat(3, im{i,:,k}) ;
if opts.subMedian
median_flow = median(imt(:,:,1:nStack),1);
median_flow = median(median_flow,2);
imt(:,:,1:nStack) = bsxfun(@minus, imt(:,:,1:nStack), median_flow ) ;
imt(:,:,1:nStack) = bsxfun(@plus, imt(:,:,1:nStack), 128 ) ;
end
% imt = gpuArray(imt);
w = size(imt,2) ;
h = size(imt,1) ;
if ~strcmp(opts.augmentation, 'uniform')
if ~isempty(opts.rgbVariance) % colour jittering only in training case
offset = zeros(size(imt));
offset = bsxfun(@minus, offset, reshape(opts.rgbVariance * randn(opts.imageSize(3),1), 1,1,opts.imageSize(3))) ;
imt = bsxfun(@minus, imt, offset) ;
end
for ai = 1:opts.numAugments
switch opts.augmentation
case 'stretch'
dx = randi(w - sz(2) + 1 ) ;
dy = randi(h - sz(1) + 1 ) ;
flip = rand > 0.5 ;
case 'multiScaleRegular'
dy = [0 h-sz(1) 0 h-sz(1) floor((h-sz(1)+1)/2)] + 1; % 4 corners & centre
dx = [0 w-sz(2) w-sz(2) 0 floor((w-sz(2)+1)/2)] + 1;
corner = randi(5);
dx = dx(corner); dy = dy(corner); % pick one corner of the image
flip = rand > 0.5 ;
case 'f25noCtr'
tf = tfs(:, transformations(mod(i+ai-1, numel(transformations)) + 1)) ;
dx = floor((w - sz(2)) * tf(2)) + 1 ;
dy = floor((h - sz(1)) * tf(1)) + 1 ;
flip = tf(3) ;
otherwise
sz = opts.imageSize(1:2) ;
tf = tfs(:, transformations(mod(ai-1, numel(transformations)) + 1)) ;
dx = floor((w - sz(2)) * tf(2)) + 1 ;
dy = floor((h - sz(1)) * tf(1)) + 1 ;
flip = tf(3) ;
end
if opts.cheapResize
sx = round(linspace(dx, sz(2)+dx-1, opts.imageSize(2))) ;
sy = round(linspace(dy, sz(1)+dy-1, opts.imageSize(1))) ;
else
factor = [opts.imageSize(1)/sz(1) ...
opts.imageSize(2)/sz(2)];
if any(abs(factor - 1) > 0.0001)
imt = imresize(imt(dy:sz(1)+dy-1,dx:sz(2)+dx-1,:), [opts.imageSize(1:2)]);
end
sx = 1:opts.imageSize(2); sy = 1:opts.imageSize(1);
end
if flip
sx = fliplr(sx) ;
imo(:,:,:,i,si) = imt(sy,sx,:) ;
imo(:,:,1:2:nStack,i,si) = -imt(sy,sx,1:2:nStack) + 255; %invert u if we flip
else
imo(:,:,:,i,si) = imt(sy,sx,:) ;
end
si = si + 1 ;
end
else
w = size(imt,2) ; h = size(imt,1) ;
indices_y = [0 h-opts.imageSize(1)] + 1;
indices_x = [0 w-opts.imageSize(2)] + 1;
center_y = floor(indices_y(2) / 2)+1;
center_x = floor(indices_x(2) / 2)+1;
if opts.numAugments == 6, indices_y = center_y;
elseif opts.numAugments == 2, indices_x = []; indices_y = [];
elseif opts.numAugments ~= 10, error('only 6 or 10 uniform crops allowed'); end
for y = indices_y
for x = indices_x
imo(:, :, :, i, si) = ...
imt(y:y+opts.imageSize(1)-1, x:x+opts.imageSize(2)-1, :);
imo(:, :, :, i, si+1) = imo(:, end:-1:1, :, i, si);
imo(:, :, 1:2:nStack, i, si+1) = -imo(:, end:-1:1, 1:2:nStack, i, si) + 255; %invert u if we flip
si = si + 2 ;
end
end
imo(:,:,:, i,si) = imt(center_y:center_y+opts.imageSize(1)-1,center_x:center_x+opts.imageSize(2)-1,:);
imo(:,:,:, i,si+1) = imo(:, end:-1:1, :, i, si);
imo(:,:,1:2:nStack, i,si+1) = -imo(:, end:-1:1, 1:2:nStack, i, si) + 255; %invert u if we flip
si = si + 2;
end
end
end
if ~isempty(opts.averageImage)
imo = bsxfun(@minus, imo, opts.averageImage) ;
end
end
|
theory flash9Rev imports flashPub
begin
section{*Main defintions*}
lemma NI_FAckVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_FAck ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_InvVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Inv iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_InvAck_1VsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_InvAck_1 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by(cut_tac a1 a2 a3 a4, auto)
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_InvAck_1_HomeVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_InvAck_1_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_InvAck_2VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_InvAck_2 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_GetX_GetXVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_GetX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_GetX_Nak1VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_GetX_Nak2VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_Nak2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_GetX_Nak3VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_Nak3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_GetX_PutX1VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX1 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX2VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX2 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX3VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX3 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX4VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX4 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX5VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX5 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX6VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX6 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX7VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX7 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX8VsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX8 N iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX8_homeVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX8_home N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX9VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX9 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX10VsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX10 N iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX10_homeVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX10_home N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_GetX_PutX11VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_GetX_PutX11 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_Get_GetVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Get iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Nak1VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Nak2VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Nak2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Nak3VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Nak3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Put1VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Put1 N iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Put2VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Put2 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Local_Get_Put3VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Get_Put3 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_Get )) ( eqn ( IVar ( Para ''CacheState'' iInv1) ) ( Const CACHE_E )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''CacheState'' iInv1) ) ( Const CACHE_E )) ( eqn ( IVar ( Global ''Dir_local'') ) ( Const true )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Local_PutVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_Put ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( cut_tac a1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''CacheState'' iInv1) ) ( Const CACHE_E )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' Home) ) ( Const UNI_Put )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_Local_PutXAcksDoneVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Local_PutXAcksDone ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_NakVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Nak iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Nak_ClearVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Nak_Clear ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_Nak_HomeVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Nak_Home ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_Remote_GetX_NakVsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_GetX_Nak iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by(cut_tac a1 a2 a3 a4, auto)
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_GetX_Nak_HomeVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_GetX_Nak_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Remote_GetX_PutXVsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_GetX_PutX iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_GetX_PutX_HomeVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_GetX_PutX_Home iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_Get_Nak1VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_Get_Nak1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_Remote_Get_Nak2VsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_Get_Nak2 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P2 s"
by(cut_tac a1 a2 a3 a4, auto)
then show "?P1 s\<or>?P2 s\<or>?P3 s"
by auto
qed
lemma NI_Remote_Get_Put1VsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_Get_Put1 iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_Get_Put2VsInv9:
(*Rule2VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iRule2 \<le> N" and a3:"iInv1 \<le> N" and a4:"iRule1~=iRule2 "
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_Get_Put2 iRule1 iRule2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1\<and>(iRule2~=iInv1 )) \<or>((iRule1~=iInv1 )\<and>iRule2=iInv1) \<or>((iRule1~=iInv1 )\<and>(iRule2~=iInv1 )) "
by( cut_tac a1 a2 a3 a4 , auto)
moreover
{assume b1:"(iRule1=iInv1\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>iRule2=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 )\<and>(iRule2~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 a3 a4 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_PutVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_Put iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have allCases:"formEval ( eqn ( IVar ( Para ''InvMarked'' iInv1) ) ( Const true )) s \<or>formEval (neg ( eqn ( IVar ( Para ''InvMarked'' iInv1) ) ( Const true )) ) s "
by auto
moreover
{assume c1:"formEval ( eqn ( IVar ( Para ''InvMarked'' iInv1) ) ( Const true )) s"
have "?P1 s"
apply(cut_tac a1 a2 b1 c1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume c1:"formEval (neg ( eqn ( IVar ( Para ''InvMarked'' iInv1) ) ( Const true )) ) s"
have "?P1 s"
apply(cut_tac a1 a2 b1 c1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately have "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_Remote_PutXVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Remote_PutX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P3 s"
apply( cut_tac a1 a2 b1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Global ''Dir_Dirty'') ) ( Const false )) ( eqn ( IVar ( Para ''UniMsg_Cmd'' iInv1) ) ( Const UNI_PutX )) ) ) " in exI,auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma NI_ReplaceVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Replace iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_ReplaceHomeVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_ReplaceHome ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_ReplaceHomeShrVldVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_ReplaceHomeShrVld ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma NI_ReplaceShrVldVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_ReplaceShrVld iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma NI_ShWbVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_ShWb N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( cut_tac a1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''CacheState'' iInv1) ) ( Const CACHE_E )) ( eqn ( IVar ( Global ''ShWbMsg_Cmd'') ) ( Const SHWB_ShWb )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma NI_WbVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (NI_Wb ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( cut_tac a1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''CacheState'' iInv1) ) ( Const CACHE_E )) ( eqn ( IVar ( Global ''WbMsg_Cmd'') ) ( Const WB_Wb )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_GetX1VsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_GetX_GetX1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_GetX2VsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_GetX_GetX2 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_GetX_PutX1VsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_GetX_PutX1 N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_PutX2VsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_GetX_PutX2 N ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_PutX3VsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_GetX_PutX3 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_GetX_PutX4VsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_GetX_PutX4 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P1 s"
apply(cut_tac a1 , auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_Get_GetVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_Get_Get ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_Get_PutVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_Get_Put ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Local_PutXVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_PutX ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have "?P3 s"
apply( cut_tac a1 , simp)
apply(rule_tac x=" (neg ( andForm ( eqn ( IVar ( Para ''CacheState'' iInv1) ) ( Const CACHE_E )) ( eqn ( IVar ( Para ''CacheState'' Home) ) ( Const CACHE_E )) ) ) " in exI,auto)
done
then show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by blast
qed
lemma PI_Local_ReplaceVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Local_Replace ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
lemma PI_Remote_GetVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Remote_Get iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma PI_Remote_GetXVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Remote_GetX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma PI_Remote_PutXVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Remote_PutX iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
proof -
have allCases:"(iRule1=iInv1) \<or>((iRule1~=iInv1 )) "
by( cut_tac a1 a2 , auto)
moreover
{assume b1:"(iRule1=iInv1)"
have "?P1 s"
apply(cut_tac a1 a2 b1 , auto)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
moreover
{assume b1:"((iRule1~=iInv1 ))"
have "?P2 s"
apply(cut_tac a1 a2 b1 , auto intro!:forallVars1 simp add :invHoldForRule2'_def varsOfVar_def)
done
then have "?P1 s\<or> ?P2 s \<or> ?P3 s"
by blast
}
ultimately show "?P1 s\<or> ?P2 s\<or> ?P3 s"
by metis
qed
lemma PI_Remote_ReplaceVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (PI_Remote_Replace iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma StoreVsInv9:
(*Rule1VsPInv1*)
assumes a1:"iRule1 \<le> N" and a2:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (Store iRule1 ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 a2 , auto)
lemma StoreHomeVsInv9:
(*Rule0VsPInv1*)
assumes a1:"iInv1 \<le> N"
shows "invHoldForRule' s (inv9 iInv1 ) (StoreHome ) (invariants N)" (is " ?P1 s\<or>?P2 s\<or>?P3 s")
by (cut_tac a1 , auto)
end
|
(* This program is free software; you can redistribute it and/or *)
(* modify it under the terms of the GNU Lesser General Public License *)
(* as published by the Free Software Foundation; either version 2.1 *)
(* of the License, or (at your option) any later version. *)
(* *)
(* This program is distributed in the hope that it will be useful, *)
(* but WITHOUT ANY WARRANTY; without even the implied warranty of *)
(* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *)
(* GNU General Public License for more details. *)
(* *)
(* You should have received a copy of the GNU Lesser General Public *)
(* License along with this program; if not, write to the Free *)
(* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA *)
(* 02110-1301 USA *)
Require Export cercle.
Set Implicit Arguments.
Unset Strict Implicit.
Theorem angle_inscrit :
forall O A B C : PO,
A <> B ->
A <> C ->
O <> B ->
circonscrit O A B C ->
double_AV (cons_AV (vec A B) (vec A C)) = cons_AV (vec O B) (vec O C).
unfold double_AV in |- *; intros O A B C H H0 H2 H4.
cut (O <> A); intros.
2: apply circonscrit_distinct3 with (2 := H4); auto.
cut (O <> C); intros.
2: apply circonscrit_distinct2 with (2 := H4); auto.
generalize (circonscrit_isocele (O:=O) (A:=A) (B:=B) (C:=C)); intros.
lapply (circonscrit_isocele (O:=O) (A:=A) (B:=B) (C:=C)); auto; intros.
unfold circonscrit in H4.
elim H4; clear H4; intros.
replace (cons_AV (vec O B) (vec O C)) with
(plus (cons_AV (vec O B) (vec O A)) (cons_AV (vec O A) (vec O C))).
generalize (somme_triangle (A:=O) (B:=B) (C:=A)); intros.
replace (cons_AV (vec O B) (vec O A)) with
(plus (image_angle pi)
(opp (plus (cons_AV (vec B A) (vec B O)) (cons_AV (vec A O) (vec A B))))).
generalize (somme_triangle (A:=O) (B:=A) (C:=C)); intros.
replace (cons_AV (vec O A) (vec O C)) with
(plus (image_angle pi)
(opp (plus (cons_AV (vec A C) (vec A O)) (cons_AV (vec C O) (vec C A))))).
replace
(opp (plus (cons_AV (vec B A) (vec B O)) (cons_AV (vec A O) (vec A B))))
with
(plus (opp (cons_AV (vec B A) (vec B O)))
(opp (cons_AV (vec A O) (vec A B)))).
replace
(opp (plus (cons_AV (vec A C) (vec A O)) (cons_AV (vec C O) (vec C A))))
with
(plus (opp (cons_AV (vec A C) (vec A O)))
(opp (cons_AV (vec C O) (vec C A)))).
replace (cons_AV (vec A B) (vec A C)) with
(plus (cons_AV (vec A B) (vec A O)) (cons_AV (vec A O) (vec A C))).
rewrite def_opp; auto.
rewrite def_opp; auto.
rewrite def_opp; auto.
rewrite def_opp; auto.
generalize (isocele_angles_base (A:=O) (B:=A) (C:=B)); auto; intros.
rewrite <- H10; auto.
generalize (isocele_angles_base (A:=O) (B:=C) (C:=A)); auto with geo; intros.
rewrite <- H11; auto with geo.
mesure A B A O.
mesure C A C O.
replace (pi + (x + x) + (pi + (x0 + x0))) with
(pi + pi + (x + x + (x0 + x0))).
replace (pi + pi) with deuxpi; auto.
symmetry in |- *.
rewrite add_mes_compatible; rewrite pi_plus_pi; rewrite <- add_mes_compatible.
replace (0 + (x + x + (x0 + x0))) with (x + x0 + (x + x0)); auto.
ring.
ring.
rewrite Chasles; auto.
rewrite <- opp_plus_plus_opp; auto.
rewrite <- opp_plus_plus_opp; auto.
rewrite <- H9; auto.
rewrite opp_plus_plus_opp; auto.
mesure A C A O.
mesure C O C A.
mesure O A O C.
replace (x1 + (x + x0) + (- x + - x0)) with x1; auto.
ring.
rewrite <- H8; auto.
rewrite opp_plus_plus_opp; auto.
mesure B A B O.
mesure A O A B.
mesure O B O A.
replace (x1 + (x + x0) + (- x + - x0)) with x1; auto.
ring.
rewrite Chasles; auto.
Qed.
Theorem angle_inscrit2 :
forall O A B C : PO,
A <> C ->
B <> C ->
O <> B ->
circonscrit O A B C ->
double_AV (cons_AV (vec C A) (vec C B)) = cons_AV (vec O A) (vec O B).
intros O A B C H H0 H1 H2; try assumption.
cut (O <> A); intros.
2: apply circonscrit_distinct3 with (2 := H2); auto.
apply angle_inscrit; auto.
hcercle H2.
rewrite <- H6; auto.
Qed.
Lemma circonscrit_triangle_non_point :
forall O A B C : PO,
triangle A B C -> circonscrit O A B C -> O <> A /\ O <> B /\ O <> C.
intros.
cut (O <> A); intros.
split; [ try assumption | idtac ].
split; [ try assumption | idtac ].
apply circonscrit_distinct1 with (2 := H0); auto.
apply circonscrit_distinct2 with (2 := H0); auto.
hcercle H0.
deroule_triangle A B C.
red in |- *; intros.
apply H6.
rewrite <- H7.
assert (distance O B = 0); auto with geo.
rewrite <- H2.
rewrite <- H7; auto with geo.
Qed.
Theorem cocyclicite :
forall A B C D : PO,
triangle A B C ->
triangle A B D ->
sont_cocycliques A B C D ->
double_AV (cons_AV (vec C A) (vec C B)) =
double_AV (cons_AV (vec D A) (vec D B)).
unfold sont_cocycliques in |- *; intros.
deroule_triangle A B C.
deroule_triangle A B D.
elim H1; (clear H1; (intros O; intros)).
elim H1; (clear H1; intros).
lapply (circonscrit_triangle_non_point (O:=O) (A:=A) (B:=B) (C:=C)); auto;
intros.
lapply (circonscrit_triangle_non_point (O:=O) (A:=A) (B:=B) (C:=D)); auto;
intros.
elim H12; (clear H12; intros).
elim H13; (clear H13; intros).
elim H11; (clear H11; intros).
elim H15; (clear H15; intros).
lapply (angle_inscrit2 (O:=O) (A:=A) (B:=B) (C:=C)); auto; intros.
lapply (angle_inscrit2 (O:=O) (A:=A) (B:=B) (C:=D)); auto; intros.
rewrite H18; auto.
try exact H1.
try exact H10.
Qed.
Theorem existence_cercle_circonscrit :
forall A B C : PO, triangle A B C -> ex (fun O : PO => circonscrit O A B C).
intros.
deroule_triangle A B C.
soit_mediatrice A B M K.
soit_mediatrice B C J L.
lapply
(mediatrices_triangle_concours (A:=A) (B:=B) (C:=C) (I:=M) (J:=J) (K:=K)
(L:=L)); auto; intros.
elim (def_concours2 (A:=M) (B:=K) (C:=J) (D:=L)); auto; intros.
exists x.
apply circonscrit_permute.
unfold circonscrit in |- *.
elim H19; clear H19; intros.
split; apply mediatrice_isocele.
discrimine J x.
apply milieu_mediatrice; auto.
apply orthogonale_segment_milieu_mediatrice with J; auto.
apply ortho_sym; auto.
apply paralleles_orthogonal with (A := J) (B := L); auto.
apply alignes_paralleles; auto.
apply mediatrice_orthogonale_segment; auto.
apply milieu_mediatrice; auto.
discrimine M x.
apply mediatrice_permute.
apply milieu_mediatrice; auto.
apply mediatrice_permute.
apply orthogonale_segment_milieu_mediatrice with M; auto.
apply ortho_sym; auto.
apply paralleles_orthogonal with (A := M) (B := K); auto.
apply alignes_paralleles; auto.
apply mediatrice_orthogonale_segment; auto.
apply milieu_mediatrice; auto.
Qed.
Lemma existence_cercle_circonscrit_diametre :
forall A B C : PO,
triangle A B C ->
exists O : PO,
(exists D : PO,
circonscrit O A B C /\
cercle_diametre A D C /\ sont_cocycliques A B C D).
intros.
elim existence_cercle_circonscrit with (A := A) (B := B) (C := C);
[ unfold circonscrit, isocele in |- *; intros O H0; try exact H0 | auto ].
exists O.
symetrique O A D.
exists D.
split; [ try assumption | idtac ].
applatit_and.
icercle.
exists O.
icercle.
exists O.
icercle.
Qed.
Lemma cocycliques_trivial :
forall A B C : PO, triangle A B C -> sont_cocycliques A B C A.
icercle.
elim existence_cercle_circonscrit with (A := A) (B := B) (C := C);
[ unfold sont_cocycliques, circonscrit, isocele in |- *; intros O H0;
try exact H0
| auto ].
exists O.
split; [ try assumption | idtac ].
split; [ idtac | auto ].
elim H0; intros H1 H2; try clear H0; try exact H1.
Qed.
#[export] Hint Resolve cocycliques_trivial: geo.
(* soit_circonscrit construit le centre du cercle circonscrit du triangle ABC
et ne marche que si on a (triangle A B C) dans les hypotheses*)
Ltac soit_circonscrit A B C O :=
elim (existence_cercle_circonscrit (A:=A) (B:=B) (C:=C));
[ intros O; intros;
generalize (circonscrit_triangle_non_point (O:=O) (A:=A) (B:=B) (C:=C));
intros toto; elim toto; clear toto;
[ intros; applatit_and | auto | auto ]
| auto ].
(* deroule_circonscrit ne marche que si on a (cisrconscrit O A B C) dans les hypothèses*)
Ltac deroule_circonscrit A B C O :=
elim (circonscrit_triangle_non_point (O:=O) (A:=A) (B:=B) (C:=C));
try assumption; intro; intros toto; elim toto; clear toto;
intros.
Lemma triangle_intersection_mediatrices :
forall A B C B' C' O : PO,
triangle A B C ->
C' <> O ->
B' <> O ->
C' = milieu A B ->
B' = milieu A C -> circonscrit O A B C -> ~ alignes C' O B'.
unfold circonscrit, isocele in |- *; intros.
deroule_triangle A B C.
cut (~ paralleles (droite C' O) (droite B' O)); intros.
red in |- *; intros; apply H9.
rewrite droite_permute; auto.
rewrite (droite_permute (A:=B') (B:=O)); auto.
apply alignes_paralleles; auto with geo.
apply angle_non_paralleles; auto.
elim H4; intros H10 H11; try clear H4; try exact H11.
rewrite
(angles_droites_orthogonales (A:=A) (B:=C) (C:=A) (D:=B) (E:=B') (F:=O)
(G:=C') (I:=O)); auto with geo.
apply ortho_sym.
apply mediatrice_orthogonale_segment; auto.
apply milieu_mediatrice; auto.
apply ortho_sym.
apply mediatrice_orthogonale_segment; auto.
apply milieu_mediatrice; auto.
Qed.
Lemma milieu_centrecirconscrit_orthogonal_segment :
forall A B C A' O : PO,
A' = milieu B C -> circonscrit O A B C -> orthogonal (vec O A') (vec B C).
unfold circonscrit, isocele in |- *; intros.
elim H0; intros H1 H2; try clear H0; try exact H2.
discrimine O A'.
apply ortho_sym.
replace (vec A' A') with zero; [ idtac | Ringvec ].
auto with geo.
discrimine B C.
replace (vec C C) with zero; [ auto with geo | Ringvec ].
apply mediatrice_orthogonale_segment; auto.
unfold mediatrice in |- *.
rewrite <- H1; auto.
apply milieu_mediatrice; auto.
Qed.
Axiom
angles_orthogonal :
forall A B C D : PO,
A <> B ->
C <> D ->
double_AV (cons_AV (vec A B) (vec C D)) = image_angle pi ->
orthogonal (vec A B) (vec C D).
Theorem tangente :
forall A B O T : PO,
A <> B ->
O <> A ->
O <> B ->
A <> T ->
isocele O A B ->
orthogonal (vec A T) (vec O A) ->
double_AV (cons_AV (vec A T) (vec A B)) = cons_AV (vec O A) (vec O B).
intros A B O T H H0 H1 H2 H3 H4; try assumption.
lapply (isocele_angles_base (A:=O) (B:=A) (C:=B)); auto; intros.
lapply (orthogonal_angles (A:=A) (B:=T) (C:=O) (D:=A)); auto; intros.
lapply (somme_triangle (A:=O) (B:=A) (C:=B)); auto; intros.
replace (cons_AV (vec O A) (vec O B)) with
(plus (image_angle pi)
(opp (plus (cons_AV (vec A B) (vec A O)) (cons_AV (vec B O) (vec B A))))).
replace (cons_AV (vec A T) (vec A B)) with
(plus (cons_AV (vec A T) (vec O A)) (cons_AV (vec O A) (vec A B))).
unfold double_AV in |- *.
rewrite <- H5; auto.
replace
(plus (plus (cons_AV (vec A T) (vec O A)) (cons_AV (vec O A) (vec A B)))
(plus (cons_AV (vec A T) (vec O A)) (cons_AV (vec O A) (vec A B)))) with
(plus (plus (cons_AV (vec A T) (vec O A)) (cons_AV (vec A T) (vec O A)))
(plus (cons_AV (vec O A) (vec A B)) (cons_AV (vec O A) (vec A B)))).
replace (plus (cons_AV (vec A T) (vec O A)) (cons_AV (vec A T) (vec O A)))
with (double_AV (cons_AV (vec A T) (vec O A))); auto.
rewrite H6; auto.
rewrite opp_plus_plus_opp; auto.
replace (cons_AV (vec O A) (vec A B)) with
(plus (cons_AV (vec O A) (vec A O)) (cons_AV (vec A O) (vec A B)));
auto.
rewrite <- angle_plat; auto.
rewrite def_opp; auto.
mesure A O A B.
replace (pi + (pi + x + (pi + x))) with (pi + (x + x) + (pi + pi)).
rewrite add_mes_compatible.
replace (pi + pi) with deuxpi; auto.
rewrite pi_plus_pi.
repeat rewrite <- add_mes_compatible.
replace (pi + (x + x) + 0) with (pi + (x + x)); auto.
ring.
ring.
rewrite Chasles; auto.
mesure A T O A.
mesure O A A B.
replace (x + x + (x0 + x0)) with (x + x0 + (x + x0)); auto.
ring.
rewrite Chasles; auto.
rewrite <- H7; auto.
rewrite opp_plus_plus_opp; auto.
rewrite <- H5; auto.
mesure A B A O.
mesure O A O B.
replace (x0 + (x + x) + (- x + - x)) with x0; auto.
ring.
Qed.
Theorem tangente_reciproque :
forall A B O T T' : PO,
A <> B ->
O <> A ->
O <> B ->
A <> T' ->
isocele O A B ->
orthogonal (vec A T) (vec O A) ->
double_AV (cons_AV (vec A T') (vec A B)) = cons_AV (vec O A) (vec O B) ->
alignes A T T'.
intros A B O T T' H H0 H1 H3 H4 H5 H6; try assumption.
discrimine A T.
apply alignes_angle; auto.
unfold double_AV in |- *.
replace (cons_AV (vec A T) (vec A T')) with
(plus (cons_AV (vec A T) (vec A B)) (cons_AV (vec A B) (vec A T')));
auto.
replace
(plus (plus (cons_AV (vec A T) (vec A B)) (cons_AV (vec A B) (vec A T')))
(plus (cons_AV (vec A T) (vec A B)) (cons_AV (vec A B) (vec A T')))) with
(plus (plus (cons_AV (vec A T) (vec A B)) (cons_AV (vec A T) (vec A B)))
(plus (cons_AV (vec A B) (vec A T')) (cons_AV (vec A B) (vec A T'))));
auto.
replace (plus (cons_AV (vec A B) (vec A T')) (cons_AV (vec A B) (vec A T')))
with (cons_AV (vec O B) (vec O A)); auto.
rewrite <- (def_opp (A:=O) (B:=A) (C:=O) (D:=B)); auto.
rewrite <- (tangente (A:=A) (B:=B) (O:=O) (T:=T)); auto.
unfold double_AV in |- *.
rewrite opp_plus_plus_opp; auto.
mesure A T A B.
replace (x + x + (- x + - x)) with 0; auto.
ring.
rewrite <- (def_opp (A:=O) (B:=A) (C:=O) (D:=B)); auto.
rewrite <- H6.
unfold double_AV in |- *.
rewrite opp_plus_plus_opp; auto.
rewrite def_opp; auto.
mesure A T A B.
mesure A B A T'.
replace (x + x + (x0 + x0)) with (x + x0 + (x + x0)); auto.
ring.
rewrite Chasles; auto.
Qed.
Theorem unicite_circonscrit_triangle :
forall A B C O O1 : PO,
triangle A B C -> circonscrit O A B C -> circonscrit O1 A B C -> O = O1.
intros.
deroule_triangle A B C.
soit_mediatrice A B M K.
soit_mediatrice B C J L.
lapply
(mediatrices_triangle_concours (A:=A) (B:=B) (C:=C) (I:=M) (J:=J) (K:=K)
(L:=L)); auto; intros.
cut (M <> J); intros.
generalize H1; unfold circonscrit in |- *; intros.
generalize H0; unfold circonscrit in |- *; intros.
lapply (circonscrit_isocele (O:=O) (A:=A) (B:=B) (C:=C)); auto; intros.
lapply (circonscrit_isocele (O:=O1) (A:=A) (B:=B) (C:=C)); auto; intros.
elim H22; (clear H22; intros).
elim H23; (clear H23; intros).
lapply (milieu_mediatrice (A:=A) (B:=B) (M:=M)); auto; intros.
lapply (milieu_mediatrice (A:=B) (B:=C) (M:=J)); auto; intros.
lapply (mediatrice_droite (A:=A) (B:=B) (I:=M) (J:=K) (K:=O)); auto; intros.
lapply (mediatrice_droite (A:=B) (B:=C) (I:=J) (J:=L) (K:=O)); auto; intros.
lapply (mediatrice_droite (A:=A) (B:=B) (I:=M) (J:=K) (K:=O1)); auto; intros.
lapply (mediatrice_droite (A:=B) (B:=C) (I:=J) (J:=L) (K:=O1)); auto; intros.
cut
(double_AV (cons_AV (vec M K) (vec J L)) =
double_AV (cons_AV (vec B A) (vec B C))).
intros H101.
elim (classic (alignes J M L)); intros.
apply (concours_unique (A:=M) (B:=J) (A1:=K) (B1:=M) (I:=O) (J:=O1)).
red in |- *; intros; apply H2.
cut
(double_AV (cons_AV (vec M K) (vec M J)) =
double_AV (cons_AV (vec B A) (vec B C))); intros.
apply permute_alignes; auto.
apply alignes_angle; auto.
rewrite <- H36.
apply angle_alignes; auto.
rewrite <- H101.
halignes H34 x.
absurd (J = M); auto.
cut (vec J L = mult_PP (- x) (vec M J)); intros.
cut (vec M K = mult_PP 1 (vec M K)); intros.
apply angles_et_colinearite with (5 := H38) (6 := H37); auto.
Ringvec.
rewrite H36.
Ringvec.
auto.
apply H30; auto.
discrimine O M.
discrimine O J.
apply alignes_ordre_cycle; auto.
apply alignes_ordre_cycle; auto.
apply alignes_trans with (B := L); auto with geo.
apply H32; auto.
discrimine O1 M.
discrimine O1 J.
apply alignes_ordre_cycle; auto.
apply alignes_ordre_cycle; auto.
apply alignes_trans with (B := L); auto with geo.
apply (concours_unique (A:=J) (B:=M) (A1:=L) (B1:=K) (I:=O) (J:=O1)); auto.
red in |- *; intros; apply H34.
apply alignes_ordre_permute; auto.
discrimine O M.
discrimine O K.
apply alignes_ordre_cycle; auto with geo.
discrimine O1 M.
discrimine O1 K.
apply alignes_ordre_cycle; auto with geo.
apply angles_droites_orthogonales; auto.
cut (orthogonal (vec M K) (vec A B)); intros; auto with geo.
apply mediatrice_orthogonale_segment; auto.
apply ortho_sym; auto.
apply mediatrice_orthogonale_segment; auto.
apply deux_milieux_distincts with (2 := H11) (3 := H18); auto.
Qed.
Lemma circonscrit_mediatrice :
forall O A B C : PO,
circonscrit O A B C ->
mediatrice A B O /\ mediatrice B C O /\ mediatrice A C O.
unfold circonscrit, isocele, mediatrice in |- *; intros; auto.
elim H; intros; auto.
split; [ assumption | split; [ idtac | try assumption ] ].
rewrite <- H0; auto.
Qed.
Require Export rotation_plane.
Theorem reciproque_cocyclicite :
forall A B C D : PO,
triangle A B C ->
triangle A B D ->
double_AV (cons_AV (vec C A) (vec C B)) =
double_AV (cons_AV (vec D A) (vec D B)) -> sont_cocycliques A B C D.
unfold sont_cocycliques in |- *; intros.
deroule_triangle A B C.
deroule_triangle A B D.
soit_circonscrit A B C O2.
soit_circonscrit A B D O1.
mesure C A C B.
lapply (angle_inscrit2 (O:=O2) (A:=A) (B:=B) (C:=C)); auto; intros.
lapply (angle_inscrit2 (O:=O1) (A:=A) (B:=B) (C:=D)); auto; intros.
exists O2.
split; [ try assumption | idtac ].
unfold circonscrit in |- *.
split; [ try assumption | idtac ].
elim H10; auto.
cut (double_AV (cons_AV (vec C A) (vec C B)) <> image_angle 0);
[ intros H50 | idtac ].
cut (O2 = O1); intros.
rewrite H21.
elim H12; auto.
soit_mediatrice A B M J.
elim (circonscrit_mediatrice (O:=O2) (A:=A) (B:=B) (C:=C)); try assumption;
intros H60 H61.
elim H61; clear H61; intros H61 H62.
elim (circonscrit_mediatrice (O:=O1) (A:=A) (B:=B) (C:=D)); try assumption;
intros H70 H71.
elim H71; clear H71; intros H71 H72.
lapply (mediatrice_droite (A:=A) (B:=B) (I:=M) (J:=J) (K:=O2)); auto; intros.
lapply (mediatrice_droite (A:=A) (B:=B) (I:=M) (J:=J) (K:=O1)); auto; intros.
elim (existence_rotation_Ia A M (pisurdeux + - x)); intros K; intros.
cut (A <> K); intros.
cut (alignes A K O2); intros.
cut (alignes A K O1); intros.
elim (classic (alignes A K M)); intros.
apply (concours_unique (A:=M) (B:=A) (A1:=J) (B1:=K) (I:=O2) (J:=O1)); auto.
apply orthogonal_non_alignes; auto.
lapply (mediatrice_orthogonale_segment (A:=A) (B:=B) (M:=M) (N:=J)); auto;
intros.
elim (orthogonal_segment_milieu (A:=A) (B:=B) (C:=M) (D:=J) (I:=M)); auto;
intros.
auto with geo.
apply ortho_sym.
apply H35; auto.
apply milieu_mediatrice; auto.
apply permute_alignes; auto.
apply permute_alignes; auto.
apply (concours_unique (A:=A) (B:=M) (A1:=K) (B1:=J) (I:=O2) (J:=O1)); auto.
assert (alignes M J O2); auto with geo.
assert (alignes M J O1); auto with geo.
discrimine K O1.
apply alignes_angle; [ auto | auto | idtac ].
unfold double_AV in |- *.
replace (cons_AV (vec A K) (vec A O1)) with
(plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A M) (vec A O1))).
generalize (somme_triangle (A:=O1) (B:=A) (C:=B)); intros.
generalize (isocele_angles_base (A:=O1) (B:=A) (C:=B)); auto; intros.
generalize (angles_milieu (A:=O1) (B:=A) (C:=B) (I:=M)); auto; intros.
rewrite <- H36; auto.
replace
(plus (plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A B) (vec A O1)))
(plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A B) (vec A O1)))) with
(plus (plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A K) (vec A M)))
(plus (cons_AV (vec A B) (vec A O1)) (cons_AV (vec A B) (vec A O1)))).
replace (plus (cons_AV (vec A B) (vec A O1)) (cons_AV (vec A B) (vec A O1)))
with (plus (image_angle pi) (opp (cons_AV (vec O1 A) (vec O1 B)))).
rewrite def_opp; auto.
rewrite <- (mes_oppx (A:=O1) (B:=A) (C:=O1) (D:=B) (x:=x + x)); auto.
rewrite <- (mes_oppx (A:=A) (B:=M) (C:=A) (D:=K) (x:=pisurdeux + - x)); auto.
repeat rewrite <- add_mes_compatible.
replace pi with (pisurdeux + pisurdeux); auto.
replace
(- (pisurdeux + - x) + - (pisurdeux + - x) +
(pisurdeux + pisurdeux + - (x + x))) with 0; auto.
ring.
elim (rotation_def (I:=A) (A:=M) (B:=K) (a:=pisurdeux + - x)); auto.
rewrite <- H20; auto.
rewrite <- H1; unfold double_AV in |- *.
rewrite <- H16.
rewrite add_mes_compatible; auto.
rewrite <- H34; auto.
rewrite <- H35; auto.
mesure A B A O1.
mesure O1 A O1 B.
replace (x1 + (x0 + x0) + - x1) with (x0 + x0); auto.
ring.
mesure A K A M.
mesure A B A O1.
replace (x0 + x0 + (x1 + x1)) with (x0 + x1 + (x0 + x1)); auto.
ring.
rewrite Chasles; auto.
discrimine K O2.
apply alignes_angle; [ auto | auto | idtac ].
unfold double_AV in |- *.
replace (cons_AV (vec A K) (vec A O2)) with
(plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A M) (vec A O2))).
generalize (somme_triangle (A:=O2) (B:=A) (C:=B)); intros.
generalize (isocele_angles_base (A:=O2) (B:=A) (C:=B)); auto; intros.
generalize (angles_milieu (A:=O2) (B:=A) (C:=B) (I:=M)); auto; intros.
rewrite <- H35; auto.
replace
(plus (plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A B) (vec A O2)))
(plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A B) (vec A O2)))) with
(plus (plus (cons_AV (vec A K) (vec A M)) (cons_AV (vec A K) (vec A M)))
(plus (cons_AV (vec A B) (vec A O2)) (cons_AV (vec A B) (vec A O2)))).
replace (plus (cons_AV (vec A B) (vec A O2)) (cons_AV (vec A B) (vec A O2)))
with (plus (image_angle pi) (opp (cons_AV (vec O2 A) (vec O2 B)))).
rewrite def_opp; auto.
rewrite <- (mes_oppx (A:=O2) (B:=A) (C:=O2) (D:=B) (x:=x + x)); auto.
rewrite <- (mes_oppx (A:=A) (B:=M) (C:=A) (D:=K) (x:=pisurdeux + - x)); auto.
repeat rewrite <- add_mes_compatible.
replace pi with (pisurdeux + pisurdeux); auto.
replace
(- (pisurdeux + - x) + - (pisurdeux + - x) +
(pisurdeux + pisurdeux + - (x + x))) with 0; auto.
ring.
elim (rotation_def (I:=A) (A:=M) (B:=K) (a:=pisurdeux + - x)); auto.
rewrite <- H19; auto.
unfold double_AV in |- *.
rewrite <- H16.
rewrite add_mes_compatible; auto.
rewrite <- H33; auto.
rewrite <- H34; auto.
mesure A B A O2.
mesure O2 A O2 B.
replace (x1 + (x0 + x0) + - x1) with (x0 + x0); auto.
ring.
mesure A K A M.
mesure A B A O2.
replace (x0 + x0 + (x1 + x1)) with (x0 + x1 + (x0 + x1)); auto.
ring.
rewrite Chasles; auto.
apply image_distinct_centre with (2 := H30); auto.
red in |- *; intros; apply H2.
apply alignes_ordre_cycle; auto.
apply alignes_angle; auto.
Qed. |
lemma continuous_inj_imp_mono: fixes f :: "'a::linear_continuum_topology \<Rightarrow> 'b::linorder_topology" assumes x: "a < x" "x < b" and cont: "continuous_on {a..b} f" and inj: "inj_on f {a..b}" shows "(f a < f x \<and> f x < f b) \<or> (f b < f x \<and> f x < f a)" |
(*
Copyright (C) 2017 M.A.L. Marques
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
*)
(* type: gga_exc *)
$include "gga_x_ityh.mpl"
ityh_attenuation := a -> attenuation_yukawa(a):
|
#ifndef OPENMC_TALLIES_FILTER_MU_H
#define OPENMC_TALLIES_FILTER_MU_H
#include <vector>
#include <gsl/gsl>
#include "openmc/tallies/filter.h"
namespace openmc {
//==============================================================================
//! Bins the incoming-outgoing direction cosine. This is only used for scatter
//! reactions.
//==============================================================================
class MuFilter : public Filter
{
public:
//----------------------------------------------------------------------------
// Constructors, destructors
~MuFilter() = default;
//----------------------------------------------------------------------------
// Methods
std::string type() const override {return "mu";}
void from_xml(pugi::xml_node node) override;
void get_all_bins(const Particle& p, TallyEstimator estimator, FilterMatch& match)
const override;
void to_statepoint(hid_t filter_group) const override;
std::string text_label(int bin) const override;
//----------------------------------------------------------------------------
// Accessors
void set_bins(gsl::span<double> bins);
private:
//----------------------------------------------------------------------------
// Data members
std::vector<double> bins_;
};
} // namespace openmc
#endif // OPENMC_TALLIES_FILTER_MU_H
|
import face_recognition
import cv2
import numpy as np
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
def humanFace():
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample photo
person1_image = face_recognition.load_image_file("abby.PNG")
person1_encoding = face_recognition.face_encodings(person1_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
person1_encoding
]
known_face_names = [
"MasterWard"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
return name
print(name)
face_names.append(name)
process_this_frame = not process_this_frame
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
def catFace():
face_cascade = cv2.CascadeClassifier('haarcascade_frontalcatface.xml')
cap = cv2.VideoCapture(0)
# loop runs if capturing has been initialized.
while True:
# reads frames from a camera
ret, img = cap.read()
# convert to gray scale of each frames
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Detects faces of different sizes in the input image
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
# To draw a rectangle in a face
cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
# Display an image in a window
cv2.imshow('img',img)
# Wait for Esc key to stop
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Close the window
cap.release()
# De-allocate any associated memory usage
cv2.destroyAllWindows()
def main():
print("finding the human face")
name = humanFace()
print(name)
print("Finding cat face")
# Find the cat here
main()
|
{-# OPTIONS --cubical --safe #-}
module Cubical.Structures.Ring where
open import Cubical.Foundations.Prelude
open import Cubical.Foundations.Equiv
open import Cubical.Foundations.HLevels
open import Cubical.Data.Sigma
open import Cubical.Foundations.SIP renaming (SNS-PathP to SNS)
open import Cubical.Structures.NAryOp
open import Cubical.Structures.Monoid hiding (⟨_⟩)
open import Cubical.Structures.AbGroup hiding (⟨_⟩)
open import Cubical.Structures.Pointed
private
variable
ℓ ℓ' : Level
raw-ring-structure : Type ℓ → Type ℓ
raw-ring-structure X = (X → X → X) × X × (X → X → X)
-- Maybe this is not the best way? (Suggestions welcome, maybe having raw-monoid-iso defined?)
raw-ring-is-SNS : SNS {ℓ} raw-ring-structure _
raw-ring-is-SNS = join-SNS (nAryFunIso 2) (nAryFunSNS 2)
(join-iso pointed-iso (nAryFunIso 2))
(join-SNS pointed-iso pointed-is-SNS (nAryFunIso 2) (nAryFunSNS 2))
ring-axioms : (X : Type ℓ) (s : raw-ring-structure X) → Type ℓ
ring-axioms X (_+_ , ₁ , _·_) = abelian-group-axioms X _+_ ×
monoid-axioms X (₁ , _·_) ×
((x y z : X) → x · (y + z) ≡ (x · y) + (x · z)) ×
((x y z : X) → (x + y) · z ≡ (x · z) + (y · z))
ring-structure : Type ℓ → Type ℓ
ring-structure = add-to-structure raw-ring-structure ring-axioms
Ring : Type (ℓ-suc ℓ)
Ring {ℓ} = TypeWithStr ℓ ring-structure
ring-iso : StrIso ring-structure ℓ
ring-iso = add-to-iso (join-iso (nAryFunIso 2) (join-iso pointed-iso (nAryFunIso 2))) ring-axioms
ring-axioms-isProp : (X : Type ℓ) (s : raw-ring-structure X) → isProp (ring-axioms X s)
ring-axioms-isProp X (_+_ , ₁ , _·_) = isPropΣ (abelian-group-axioms-isProp X _+_)
λ _ → isPropΣ (monoid-axioms-are-Props X (₁ , _·_))
λ { (isSetX , _) → isPropΣ (isPropΠ3 (λ _ _ _ → isSetX _ _))
λ _ → isPropΠ3 (λ _ _ _ → isSetX _ _)}
ring-is-SNS : SNS {ℓ} ring-structure ring-iso
ring-is-SNS = add-axioms-SNS _ ring-axioms-isProp raw-ring-is-SNS
RingPath : (M N : Ring {ℓ}) → (M ≃[ ring-iso ] N) ≃ (M ≡ N)
RingPath = SIP ring-is-SNS
-- Rings have an abelian group
Ring→AbGroup : Ring {ℓ} → AbGroup {ℓ}
Ring→AbGroup (R , (_+_ , _) , +AbGroup , _) = R , _+_ , +AbGroup
-- Rings have a monoid
Ring→Monoid : Ring {ℓ} → Monoid {ℓ}
Ring→Monoid (R , (_ , ₁ , _·_) , _ , ·Monoid , _) = R , (₁ , _·_) , ·Monoid
-- Ring extractors
⟨_⟩ : Ring {ℓ} → Type ℓ
⟨ R , _ ⟩ = R
module _ (R : Ring {ℓ}) where
ring+-operation = abgroup-operation (Ring→AbGroup R)
ring-is-set = abgroup-is-set (Ring→AbGroup R)
ring+-assoc = abgroup-assoc (Ring→AbGroup R)
ring+-id = abgroup-id (Ring→AbGroup R)
ring+-rid = abgroup-rid (Ring→AbGroup R)
ring+-lid = abgroup-lid (Ring→AbGroup R)
ring+-inv = abgroup-inv (Ring→AbGroup R)
ring+-rinv = abgroup-rinv (Ring→AbGroup R)
ring+-linv = abgroup-linv (Ring→AbGroup R)
ring+-comm = abgroup-comm (Ring→AbGroup R)
ring·-operation = monoid-operation (Ring→Monoid R)
ring·-assoc = monoid-assoc (Ring→Monoid R)
ring·-id = monoid-id (Ring→Monoid R)
ring·-rid = monoid-rid (Ring→Monoid R)
ring·-lid = monoid-lid (Ring→Monoid R)
module ring-syntax where
ring+-operation-syntax : (R : Ring {ℓ}) → ⟨ R ⟩ → ⟨ R ⟩ → ⟨ R ⟩
ring+-operation-syntax R = ring+-operation R
infixr 14 ring+-operation-syntax
syntax ring+-operation-syntax R x y = x +⟨ R ⟩ y
ring·-operation-syntax : (R : Ring {ℓ}) → ⟨ R ⟩ → ⟨ R ⟩ → ⟨ R ⟩
ring·-operation-syntax R = ring·-operation R
infixr 18 ring·-operation-syntax
syntax ring·-operation-syntax R x y = x ·⟨ R ⟩ y
open ring-syntax
ring-rdist : (R : Ring {ℓ}) (x y z : ⟨ R ⟩) → x ·⟨ R ⟩ (y +⟨ R ⟩ z) ≡ (x ·⟨ R ⟩ y) +⟨ R ⟩ (x ·⟨ R ⟩ z)
ring-rdist (_ , _ , _ , _ , P , _) = P
ring-ldist : (R : Ring {ℓ}) (x y z : ⟨ R ⟩) → (x +⟨ R ⟩ y) ·⟨ R ⟩ z ≡ (x ·⟨ R ⟩ z) +⟨ R ⟩ (y ·⟨ R ⟩ z)
ring-ldist (_ , _ , _ , _ , _ , P) = P
-- Ring ·syntax
module ring-·syntax (R : Ring {ℓ}) where
infixr 14 _+_
infixr 18 _·_
infix 15 -_
_+_ = ring+-operation R
_·_ = ring·-operation R
₀ = ring+-id R
₁ = ring·-id R
-_ = ring+-inv R
|
State Before: α : Type u_1
inst✝¹ : StrictOrderedSemiring α
inst✝ : Archimedean α
x : α
n : ℕ
h : x ≤ n • 1
⊢ x ≤ ↑n State After: no goals Tactic: rwa [← nsmul_one] |
B.e0 = 12.69; % [V]
B.e1 = -3.14; % [V]
B.e2 = 1.58; % [V]
B.A = 1.53; % [V]
B.B = 29.89; % [Ah^(-1)]
B.V0 = 12.59; % [V]
B.Q0 = 0.034; % [Ah]
B.Qf = 1.20; % [Ah]
B.R = 0.061; % [Ohm]
B.tau = 1.95; % [s]
B.p1 = 1.31e-7;
B.p2 = 4.03e-15;
B.p3 = -1.22e-23;
B.p4 = 1.65e-31;
Pboard = 0;
% Pboard = ;
nominal_V = @(Q) B.e0 + B.e1*(Q/B.Qf) + B.e2*(Q/B.Qf)^2; %[V]
power = @(rpm) 0.73*(B.p1*rpm^2 + B.p2*rpm^4 + B.p3*rpm^6 + B.p4*rpm^8); %[W]
Q = B.Q0;
V = B.V0;
i = zeros(5,1);
w1 = 800*30/pi; %rad/s to rpm
w2 = 800*30/pi;
w3 = 800*30/pi;
w4 = 800*30/pi;
window_size = 5;
b = (1/window_size)*ones(1,window_size);
a = 1;
dt = 0.1;
t = 0;
figure;
while Q < B.Qf
% Sum board and motors power
t = t + dt;
Pow = Pboard + power(w1) + power(w2) + power(w3) + power(w4);
i = circshift(i, -1);
i(end) = Pow/V;
signal_filt = filter(b,a,i);
i_filt = signal_filt(end);
Q = Q + i_filt*dt/3600; %[Ah]
Vf = B.A*exp(-B.B*(B.Qf-B.Q0-Q));
V_nom = nominal_V(Q);
V = V_nom - B.R*i_filt - Vf;
plot(t,V,'bo', t,Q,'ro');
hold on;
end
|
lemma null_set_Diff: assumes "B \<in> null_sets M" "A \<in> sets M" shows "B - A \<in> null_sets M" |
[STATEMENT]
lemma eq_ffilter:
"(ffilter P A = ffilter Q A) = (\<forall>x. x |\<in>| A \<longrightarrow> P x = Q x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (ffilter P A = ffilter Q A) = (\<forall>x. x |\<in>| A \<longrightarrow> P x = Q x)
[PROOF STEP]
by transfer auto |
#ifndef __BOOST_PYTHON_STDCON_MAP_ARG_INCLUDED__
#define __BOOST_PYTHON_STDCON_MAP_ARG_INCLUDED__
#include <boost/python.hpp>
#include <associative_arg.hpp>
#include <map>
namespace boost::python {
template<typename Key, typename Value, typename Compare, typename Alloc>
struct arg_from_python<std::map<Key, Value, Compare, Alloc> const&> :
stdcon::associative_copied_from_python<std::map<Key, Value, Compare, Alloc>>
{
typedef stdcon::associative_copied_from_python<std::map<Key, Value, Compare, Alloc>> base_type;
arg_from_python(PyObject *pyobj) : base_type(pyobj) {}
};
template<typename Key, typename Value, typename Compare, typename Alloc>
struct arg_from_python<std::map<Key, Value, Compare, Alloc> &&> :
stdcon::associative_copied_from_python<std::map<Key, Value, Compare, Alloc>>
{
typedef stdcon::associative_copied_from_python<std::map<Key, Value, Compare, Alloc>> base_type;
arg_from_python(PyObject *pyobj) : base_type(pyobj) {}
};
template<typename Key, typename Value, typename Compare, typename Alloc>
struct arg_from_python<std::map<Key, Value, Compare, Alloc>> :
stdcon::associative_copied_from_python<std::map<Key, Value, Compare, Alloc>>
{
typedef stdcon::associative_copied_from_python<std::map<Key, Value, Compare, Alloc>> base_type;
arg_from_python(PyObject *pyobj) : base_type(pyobj) {}
};
namespace converter {
template<typename Key, typename Value, typename Compare, typename Alloc>
struct expected_pytype_for_arg<std::map<Key, Value, Compare, Alloc> const&> : expecting_pydict {};
template<typename Key, typename Value, typename Compare, typename Alloc>
struct expected_pytype_for_arg<std::map<Key, Value, Compare, Alloc> &&> : expecting_pydict {};
template<typename Key, typename Value, typename Compare, typename Alloc>
struct expected_pytype_for_arg<std::map<Key, Value, Compare, Alloc>> : expecting_pydict {};
}
}
#endif
|
We stock a wide variety of sunglasses including many designer labels. All our sunglasses offer 100% UV protection. Come and have a look! |
State Before: 𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
⊢ AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s State After: case zero
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
⊢ AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 Nat.zero f) s
case succ
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 (Nat.succ n) f) s Tactic: induction' n with n IH State Before: case zero
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
⊢ AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 Nat.zero f) s State After: case zero
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
⊢ AnalyticOn 𝕜 (↑(LinearIsometryEquiv.symm (continuousMultilinearCurryFin0 𝕜 E F)) ∘ f) s Tactic: rw [iteratedFDeriv_zero_eq_comp] State Before: case zero
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
⊢ AnalyticOn 𝕜 (↑(LinearIsometryEquiv.symm (continuousMultilinearCurryFin0 𝕜 E F)) ∘ f) s State After: no goals Tactic: exact ((continuousMultilinearCurryFin0 𝕜 E F).symm : F →L[𝕜] E[×0]→L[𝕜] F).comp_analyticOn h State Before: case succ
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 (Nat.succ n) f) s State After: case succ
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ AnalyticOn 𝕜 (↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F) ∘ _root_.fderiv 𝕜 (_root_.iteratedFDeriv 𝕜 n f))
s Tactic: rw [iteratedFDeriv_succ_eq_comp_left] State Before: case succ
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ AnalyticOn 𝕜 (↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F) ∘ _root_.fderiv 𝕜 (_root_.iteratedFDeriv 𝕜 n f))
s State After: case h.e'_9.h.e'_4
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ ↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F) = ↑?g
case g
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ (E →L[𝕜] ContinuousMultilinearMap 𝕜 (fun i => E) F) →L[𝕜] ContinuousMultilinearMap 𝕜 (fun x => E) F Tactic: convert @ContinuousLinearMap.comp_analyticOn 𝕜 E
?_ (ContinuousMultilinearMap 𝕜 (fun _ : Fin (n + 1) ↦ E) F)
?_ ?_ ?_ ?_ ?_ ?_ ?_ ?_
s ?g IH.fderiv State Before: case h.e'_9.h.e'_4
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ ↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F) = ↑?g
case g
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ (E →L[𝕜] ContinuousMultilinearMap 𝕜 (fun i => E) F) →L[𝕜] ContinuousMultilinearMap 𝕜 (fun x => E) F State After: case h.e'_9.h.e'_4
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ ↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F) =
↑↑(ContinuousLinearEquiv.mk (continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F).toLinearEquiv) Tactic: case g =>
exact ↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun _ : Fin (n + 1) => E) F) State Before: case h.e'_9.h.e'_4
𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ ↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F) =
↑↑(ContinuousLinearEquiv.mk (continuousMultilinearCurryLeftEquiv 𝕜 (fun x => E) F).toLinearEquiv) State After: no goals Tactic: rfl State Before: 𝕜 : Type u_2
inst✝⁵ : NontriviallyNormedField 𝕜
E : Type u_3
inst✝⁴ : NormedAddCommGroup E
inst✝³ : NormedSpace 𝕜 E
F : Type u_1
inst✝² : NormedAddCommGroup F
inst✝¹ : NormedSpace 𝕜 F
p : FormalMultilinearSeries 𝕜 E F
r : ℝ≥0∞
f : E → F
x : E
s : Set E
inst✝ : CompleteSpace F
h : AnalyticOn 𝕜 f s
n : ℕ
IH : AnalyticOn 𝕜 (_root_.iteratedFDeriv 𝕜 n f) s
⊢ (E →L[𝕜] ContinuousMultilinearMap 𝕜 (fun i => E) F) →L[𝕜] ContinuousMultilinearMap 𝕜 (fun x => E) F State After: no goals Tactic: exact ↑(continuousMultilinearCurryLeftEquiv 𝕜 (fun _ : Fin (n + 1) => E) F) |
[STATEMENT]
lemma minus_lucas_lehmer_ring: "\<ominus>\<^bsub>lucas_lehmer_ring\<^esub> x = (case x of (a, b) \<Rightarrow> (-a, -b))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<ominus>\<^bsub>lucas_lehmer_ring\<^esub> x = (case x of (a, b) \<Rightarrow> (- a, - b))
[PROOF STEP]
by (rule sym, rule sum_zero_eq_neg)
(auto simp: case_prod_unfold lucas_lehmer_ring_def lucas_lehmer_add'_def) |
# Python Practice Lecture 16 MATH 342W Queens College - K-fold Cross Validation
## Author: Amir ElTabakh
## Date: April 5, 2022
## Agenda:
* The K Tradeoff
* Reducing variance with Cross Validation (i.e. K-fold CV)
## The K Tradeoff
$K$ determines how large the training set is relative to the test set when you're doing honest validation for an algorithm. For now, let's not use K-fold CV, but only examine one split at a time. Consider this simulated dataset with 50 observations:
```python
# importing dependencies
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error # calculates RMSE
import pandas as pd
import random
# ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Data viz
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_palette(sns.color_palette("colorblind")) # setting color palette
sns.set(rc={"figure.figsize":(10, 6)}) #width=10, #height=6
```
```python
# setting seed
np.random.seed(2022)
# constants
n = 50
xmin = 0
xmax = 4
# data vecs
x = np.random.uniform(low = xmin, high = xmax, size = n)
y = [2 + 3 * i ** 2 + np.random.normal(0, 0.8) for i in x]
# convert to df
Xy = pd.DataFrame({'y': y, 'x': x})
# snapshot
Xy.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>y</th>
<th>x</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1.475622</td>
<td>0.037434</td>
</tr>
<tr>
<th>1</th>
<td>13.308573</td>
<td>1.996231</td>
</tr>
<tr>
<th>2</th>
<td>3.327321</td>
<td>0.453535</td>
</tr>
<tr>
<th>3</th>
<td>1.945917</td>
<td>0.199896</td>
</tr>
<tr>
<th>4</th>
<td>23.797992</td>
<td>2.741630</td>
</tr>
</tbody>
</table>
</div>
```python
from plotnine import ggplot, aes, geom_point
# plot the data
data_plot = ggplot(Xy) + aes(x = x, y = y) + geom_point()
data_plot
```
Note how $f(x)$ is quadratic and there is random noise which is "ignorance error". The random noise will be part of generalization error and can never go away.
If we use OLS with no derived features, then we can at most get $h*(x)$. Let's see what $h^*(x) = \beta_0 + \beta_1 x$ truly is. To do this, we imagine we see an absolute ton of data and run OLS on it.
```python
# setting seed
np.random.seed(1738)
n_hidden = 1e6
# data vecs
x_hidden = np.random.uniform(low = xmin, high = xmax, size = n)
y_hidden = [2 + 3 * i ** 2 + np.random.normal(0, 0.8) for i in x_hidden]
# convert to dataframes
x_hidden = pd.DataFrame(x_hidden)
y_hidden = pd.DataFrame(y_hidden)
# h_*
h_star_model = LinearRegression().fit(x_hidden, y_hidden)
print(h_star_model.intercept_[0], h_star_model.coef_[0][0])
```
-5.7301681875688715 11.925324444538758
The fact that $\beta = [-6~12]^\top$ can actually be solved with calculus: $\int_0^4 ((2 + 3x^2) - (b_0 + b_1 x))^2 dx$ and solve for $b_0$ and $b_1$ explicitly by minimizing.
Plotting that over $\mathbb{D}$ we obtain
```python
from plotnine import geom_abline
data_plot + geom_abline(
intercept = h_star_model.intercept_[0],
slope = h_star_model.coef_[0][0],
color = 'green')
```
That is the best we're going to get. However, $g_{final}$ falls far short of it:
```python
# h_*
g_final_model = LinearRegression().fit(Xy[['x']], Xy[['y']])
print(g_final_model.intercept_[0], g_final_model.coef_[0][0])
```
-5.779880916597136 12.040200380319078
The actual standard error of g_final can be estimated by imagining tons of future observations:
```python
y_hat_g_final = g_final_model.predict(x_hidden)
gen_error_true = np.std(y_hidden - y_hat_g_final)
gen_error_true[0]
```
3.895685515342658
The model $g$ can vary quite a bit as we subsample $\mathbb{D}$ which is what happens when you do train-test splits. It varies a lot because there is large misspecification error. If the model was correctly specified, the results of everything that follows will be less impressive. But in the real world - is your model ever correctly specified? is $f \in \mathcal{H}$?? NO. So this is more realistic.
Now let's let K be small. Let K = 2 meaning even 50-50 split of test and train.
Sets, initialized with paranthesis, not square brackets, allow us to reduce one list from the other using the subtract operator.
```python
# setting seed
random.seed(17388) # two different libraries to set seeds for
# constants
K = 2
prop_train = (K - 1) / K
n_train = int(prop_train * n)
# train test split indices
indices = set([i for i in range(n)])
index_train = set(random.sample(indices, n_train))
index_test = indices - index_train
# split based on indices
x_train = pd.DataFrame([x[i] for i in index_train]) # cast from set to list
y_train = pd.DataFrame([y[i] for i in index_train])
x_test = pd.DataFrame([x[i] for i in index_test])
y_test = pd.DataFrame([y[i] for i in index_test])
# fitting model
g_mod = LinearRegression().fit(x_train, y_train)
# yhat via test set
y_hat_g_test = g_mod.predict(x_test)
# yhat via train set
y_hat_g_train = g_mod.predict(x_train)
# standard error
gen_error_K_2_test = np.std(y_test - y_hat_g_test) # test
gen_error_K_2_train = np.std(y_train - y_hat_g_train) # train
print(gen_error_true[0]) # true standard error
print(gen_error_K_2_test[0]) # tested standard error
```
3.895685515342658
3.866651151341103
Although I cooked the books by setting the seed, this realization makes sense. If K=2, I build the model g with half the data than the model g_final. Less data to train on => higher generalization error. How about if K is large. Let's say $K = \frac{n}{2}$ meaning $n_{train} = 48$ and $n_{test} = 2$.
```python
# setting seed
np.random.seed(7777)
random.seed(7777) # two different libraries to set seeds for
# constants
K = n / 2
prop_train = (K - 1) / K
n_train = int(prop_train * n)
# train test split indices
indices = set([i for i in range(n)])
index_train = set(random.sample(indices, n_train))
index_test = indices - index_train
# split based on indices
x_train = pd.DataFrame([x[i] for i in index_train]) # cast from set to list
y_train = pd.DataFrame([y[i] for i in index_train])
x_test = pd.DataFrame([x[i] for i in index_test])
y_test = pd.DataFrame([y[i] for i in index_test])
# fitting model
g_mod = LinearRegression().fit(x_train, y_train)
# yhat via test set
y_hat_g_test = g_mod.predict(x_test)
# yhat via train set
y_hat_g_train = g_mod.predict(x_train)
# standard error
gen_error_K_2_test = np.std(y_test - y_hat_g_test) # test
gen_error_K_2_train = np.std(y_train - y_hat_g_train) # train
print(gen_error_true[0]) # true standard error
print(gen_error_K_2_test[0]) # tested standard error
print(gen_error_K_2_train[0]) # train standard error
```
3.895685515342658
4.721271620787243
3.9750058046527084
Although I cooked the books again by setting the seed, this also makes sense. More data to train on means less error but still more error than all the data. In reality, there is massive variance over specific splits! Let's run the simulation with these two K values many times.
While we're at it, let's do all K's! Well, what are all the valid K's? If you want to keep the sizes the same, any factorization of n except the trivial 1 since n = 1 * n. A $K = 1$ would mean there's no split! How to find divisors? Of course a package for this.
```python
# import divisors() method from sympy
from sympy import divisors
Kuniques = list(set(divisors(n)) - set([1]))
Kuniques
```
[2, 5, 10, 50, 25]
But should we also include the trivial n? Yes K = n is indeed a valid divisor. And this type of CV is called the "leave one out cross validation" (LOOCV). Now we compute the errors over K:
```python
# constants
Nsim_per_K = 2000 # simulations per K fold
num_Kuniques = len(Kuniques)
Ks = Kuniques * Nsim_per_K
results = pd.DataFrame({'standard_error': [0 for i in range(Nsim_per_K * num_Kuniques)],
'K': [0 for i in range(Nsim_per_K * num_Kuniques)]})
for i in range(len(Ks)):
# constants
K = Ks[i]
prop_train = (K - 1) / K
n_train = int(prop_train * n)
# train test split indices
indices = set([i for i in range(n)])
index_train = set(random.sample(indices, n_train))
index_test = indices - index_train
# split based on indices
X_train = pd.DataFrame([x[i] for i in index_train]) # cast from set to list
y_train = pd.DataFrame([y[i] for i in index_train])
X_test = pd.DataFrame([x[i] for i in index_test])
y_test = pd.DataFrame([y[i] for i in index_test])
# fitting model
g_mod = LinearRegression().fit(X_train, y_train)
# get predictions
y_hat_g = g_mod.predict(X_test)
# get error metric
if len(y_test) == 1:
g_s_e = np.absolute([float(y_test.iloc[i]) - y_hat_g[i] for i in range(len(y_test))])
else:
g_s_e = np.std([float(y_test.iloc[i]) - y_hat_g[i] for i in range(len(y_test))])
# update results
results.loc[i] = np.asarray([g_s_e, K])
# output results
results
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>standard_error</th>
<th>K</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>4.854482</td>
<td>2.0</td>
</tr>
<tr>
<th>1</th>
<td>2.285150</td>
<td>5.0</td>
</tr>
<tr>
<th>2</th>
<td>3.105590</td>
<td>10.0</td>
</tr>
<tr>
<th>3</th>
<td>7.310184</td>
<td>50.0</td>
</tr>
<tr>
<th>4</th>
<td>0.852891</td>
<td>25.0</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>9995</th>
<td>4.322564</td>
<td>2.0</td>
</tr>
<tr>
<th>9996</th>
<td>2.757709</td>
<td>5.0</td>
</tr>
<tr>
<th>9997</th>
<td>3.843814</td>
<td>10.0</td>
</tr>
<tr>
<th>9998</th>
<td>3.889604</td>
<td>50.0</td>
</tr>
<tr>
<th>9999</th>
<td>0.186950</td>
<td>25.0</td>
</tr>
</tbody>
</table>
<p>10000 rows × 2 columns</p>
</div>
What are the variabilities? Let's take the average error over each simulated split.
```python
# mean of the standard errors grouped by K
results_summary = pd.DataFrame({'K': list(results.groupby(['K']).mean().index),
'K_avg': list(results.groupby(['K']).mean()['standard_error']),
'K_SE': list(results.groupby(['K']).std()['standard_error'])
}).set_index('K')
results_summary
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>K_avg</th>
<th>K_SE</th>
</tr>
<tr>
<th>K</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2.0</th>
<td>4.138852</td>
<td>0.349815</td>
</tr>
<tr>
<th>5.0</th>
<td>3.923679</td>
<td>0.598949</td>
</tr>
<tr>
<th>10.0</th>
<td>3.586957</td>
<td>0.995828</td>
</tr>
<tr>
<th>25.0</th>
<td>2.351176</td>
<td>1.800594</td>
</tr>
<tr>
<th>50.0</th>
<td>3.716647</td>
<td>1.875211</td>
</tr>
</tbody>
</table>
</div>
```python
sns.displot(results,
x = 'standard_error',
hue = 'K',
kind = 'kde',
multiple="stack",
palette = 'bright',
alpha = 0.9,
aspect = 1.5) # width = height * aspect
plt.axvline(label='Mean 2.0', x=results_summary.iloc[0, 0], color='blue')
plt.axvline(label='Mean 5.0', x=results_summary.iloc[1, 0], color='orange')
plt.axvline(label='Mean 10.0', x=results_summary.iloc[2, 0], color='green')
plt.axvline(label='Mean 25.0', x=results_summary.iloc[3, 0], color='red')
plt.axvline(label='Mean 50.0', x=results_summary.iloc[4, 0], color='purple')
```
The main takeaways are
1. The std err of generalization error estimate is much lower for low K than high K
With high K, the test set is small meaning the estimate has high variance; with low K, the test set is large meaning you can measure it with low variance.
2. The average of generalization error estimate is lower for high K than low K
With high K, the training set is large meaning $g$ is closer to g_final and thus has higher expected accuracy; with low K, the training set is small meaning $g$ is further from g_final and thus has lower expected accuracy.
Thus, the tradeoff is bias vs. variance. There are many similar tradeoffs in statistics. We will see one later when we do machine learning.
Is the estimates' accuracy for what we really care about? No, the generalization error of g_final which we picture below:
```python
sns.displot(results,
x = 'standard_error',
hue = 'K',
kind = 'kde',
multiple="stack",
palette = 'bright',
alpha = 0.9,
aspect = 1.5) # width = height * aspect
plt.axvline(label='Mean 2.0', x=results_summary.iloc[0, 0], color='blue')
plt.axvline(label='Mean 5.0', x=results_summary.iloc[1, 0], color='orange')
plt.axvline(label='Mean 10.0', x=results_summary.iloc[2, 0], color='green')
plt.axvline(label='Mean 25.0', x=results_summary.iloc[3, 0], color='red')
plt.axvline(label='Mean 50.0', x=results_summary.iloc[4, 0], color='purple')
plt.axvline(label='CV', x=gen_error_true[0], color='white')
```
Remember, g_final's error should be lower than both averages since it uses all the data. But we see above it's higher!
So what happened? Simple... we are mixing apples and oranges. We calculated that white line by looking at one million future observations. We calculated the red and blue distributions by looking at our data only which is a random realization of many such datasets! Thus, our generalization errors are biased based on the specific n observations in D we received. We will see that K-fold helps a bit with this. But there is nothing we can do about it beyond that (besides collect more observations). If you get a weird sample, you get a weird sample!
How would we be able to generate the picture we really want to see? We would run this simulation over many datasets and average. That would be a giant simulation. To show that this is the case, go back and change the seed in the first chunk and rerun. You'll see a different white bar.
What is the main takeaway? K matters because it induces a tradeoff. It shouldn't be too large or too small (as we believe at the moment). And, generalization error estimation is very variable in low n. To see this, go back and increase n.
## Reducing variance with Cross Validation (i.e. K-fold CV)
We saw previous there was a lot of variance in generalization error estimation. We can reduce some of this variance by using a very simple trick. We can rotate the train-test split so that each observation will be in the test set once. How many times is this done? K. Now we see the reason for the definition of K as it tells you how many times you validate. Why is it called "cross"? Because the training set crosses over as it does the rotation. Each observation is inside a training set K-1 times. This point will become important later. Why is it called K-fold? Because a fold is one set of training-test and there are K unique folds during the whole procedure.
How does this work? Well, let's say K=10, a typical value. This means in each "fold", 90% of the data is in the training set and 10% of the data is in the test set. As we run through the K folds, we train a model on the training set and predict on the test set and compute oos residuals We aggregate those oos residuals over the folds to result in n oos residuals. We then run our error metric on all n.
Let's begin with the dataset from the previous demo. Here is that code that will create the folds by specifying the K=10 test sets by index. The training sets can then be found by the set difference function.
We're gonna jump right into using sci-kit learn's KFold module.
```python
# importing dependencies
from sklearn.model_selection import KFold
# initializing model
model = LinearRegression() # intercept will fit by default
# 10 folds
K = 10
kf = KFold(n_splits=K)
# list to hold residuals
oos_cross_validation_residuals = []
for train_index , test_index in kf.split(x):
X_train = pd.DataFrame([x[i] for i in train_index])
y_train = pd.DataFrame([y[i] for i in train_index])
X_test = pd.DataFrame([x[i] for i in test_index])
y_test = pd.DataFrame([y[i] for i in test_index])
model.fit(X_train,y_train)
y_hat_g = model.predict(X_test)
oos_cross_validation_residuals += [np.std(y_test - y_hat_g)]
oos_cross_validation_residual_average = np.std(oos_cross_validation_residuals)
oos_cross_validation_residual_average
```
0.8584252859259888
How does this CV error look over K?
```python
# import divisors() method from sympy
from sympy import divisors
Kuniques = list(set(divisors(n)) - set([1]))
Kuniques
```
[2, 5, 10, 50, 25]
```python
# constants
Nsim_per_K = 1 # simulations per K fold
num_Kuniques = len(Kuniques)
Ks = Kuniques * Nsim_per_K
n = len(y)
oos_residuals_CV = pd.DataFrame({'standard_error': [0 for i in range(Nsim_per_K * num_Kuniques)],
'K': [0 for i in range(Nsim_per_K * num_Kuniques)]})
for i in range(len(Ks)):
# constants
K = Ks[i]
prop_train = (K - 1) / K
n_train = int(prop_train * n)
# train test split indices
indices = set([i for i in range(n)])
train_index = set(random.sample(indices, n_train))
test_index = indices - train_index
# cast as lists
train_index = list(train_index)
test_index = list(test_index)
# split based on indices
X_train = pd.DataFrame([x[i] for i in train_index])
y_train = pd.DataFrame([y[i] for i in train_index])
X_test = pd.DataFrame([x[i] for i in test_index])
y_test = pd.DataFrame([y[i] for i in test_index])
# fitting model
g_mod = LinearRegression().fit(X_train, y_train)
# get predictions
y_hat_g = g_mod.predict(X_test)
# get error metric
if len(y_test) == 1:
g_s_e = np.absolute([float(y_test.iloc[i]) - y_hat_g[i] for i in range(len(y_test))])
else:
g_s_e = np.std([float(y_test.iloc[i]) - y_hat_g[i] for i in range(len(y_test))])
# update results
oos_residuals_CV.loc[i] = np.asarray([g_s_e, K])
# output results
oos_residuals_CV
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>standard_error</th>
<th>K</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>3.672747</td>
<td>2.0</td>
</tr>
<tr>
<th>1</th>
<td>3.954330</td>
<td>5.0</td>
</tr>
<tr>
<th>2</th>
<td>3.224956</td>
<td>10.0</td>
</tr>
<tr>
<th>3</th>
<td>7.383997</td>
<td>50.0</td>
</tr>
<tr>
<th>4</th>
<td>1.272782</td>
<td>25.0</td>
</tr>
</tbody>
</table>
</div>
There is still an effect of the one random fold. Let's do this many times and look at the distribution just like before.
```python
# constants
Nsim_per_K = 500 # simulations per K fold
num_Kuniques = len(Kuniques)
Ks = Kuniques * Nsim_per_K
n = len(y)
oos_residuals = pd.DataFrame({'standard_error': [0 for i in range(Nsim_per_K * num_Kuniques)],
'K': [0 for i in range(Nsim_per_K * num_Kuniques)]})
for i in range(len(Ks)):
# constants
K = Ks[i]
prop_train = (K - 1) / K
n_train = int(prop_train * n)
# train test split indices
indices = set([i for i in range(n)])
train_index = set(random.sample(indices, n_train))
test_index = indices - train_index
# cast as lists
train_index = list(train_index)
test_index = list(test_index)
# split based on indices
X_train = pd.DataFrame([x[i] for i in train_index])
y_train = pd.DataFrame([y[i] for i in train_index])
X_test = pd.DataFrame([x[i] for i in test_index])
y_test = pd.DataFrame([y[i] for i in test_index])
# fitting model
g_mod = LinearRegression().fit(X_train, y_train)
# get predictions
y_hat_g = g_mod.predict(X_test)
# get error metric
if len(y_test) == 1:
g_s_e = np.absolute([float(y_test.iloc[i]) - y_hat_g[i] for i in range(len(y_test))])
else:
g_s_e = np.std([float(y_test.iloc[i]) - y_hat_g[i] for i in range(len(y_test))])
# update results
oos_residuals.loc[i] = np.asarray([g_s_e, K])
# output results
oos_residuals
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>standard_error</th>
<th>K</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>4.185499</td>
<td>2.0</td>
</tr>
<tr>
<th>1</th>
<td>4.193835</td>
<td>5.0</td>
</tr>
<tr>
<th>2</th>
<td>3.317849</td>
<td>10.0</td>
</tr>
<tr>
<th>3</th>
<td>2.990152</td>
<td>50.0</td>
</tr>
<tr>
<th>4</th>
<td>0.089897</td>
<td>25.0</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>2495</th>
<td>4.207371</td>
<td>2.0</td>
</tr>
<tr>
<th>2496</th>
<td>3.761627</td>
<td>5.0</td>
</tr>
<tr>
<th>2497</th>
<td>5.222147</td>
<td>10.0</td>
</tr>
<tr>
<th>2498</th>
<td>5.706351</td>
<td>50.0</td>
</tr>
<tr>
<th>2499</th>
<td>3.677768</td>
<td>25.0</td>
</tr>
</tbody>
</table>
<p>2500 rows × 2 columns</p>
</div>
What is the variability?
```python
# mean of the standard errors grouped by K
oos_results_summary = pd.DataFrame({'K': list(oos_residuals.groupby(['K']).mean().index),
'K_avg': list(oos_residuals.groupby(['K']).mean()['standard_error']),
'K_SE': list(oos_residuals.groupby(['K']).std()['standard_error'])
}).set_index('K')
oos_results_summary
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>K_avg</th>
<th>K_SE</th>
</tr>
<tr>
<th>K</th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>2.0</th>
<td>4.146039</td>
<td>0.395974</td>
</tr>
<tr>
<th>5.0</th>
<td>3.877967</td>
<td>0.579897</td>
</tr>
<tr>
<th>10.0</th>
<td>3.546045</td>
<td>1.018892</td>
</tr>
<tr>
<th>25.0</th>
<td>2.368054</td>
<td>1.752468</td>
</tr>
<tr>
<th>50.0</th>
<td>3.727671</td>
<td>1.949647</td>
</tr>
</tbody>
</table>
</div>
```python
sns.displot(oos_residuals,
x = 'standard_error',
hue = 'K',
kind = 'kde',
multiple="stack",
palette = 'bright',
alpha = 0.9,
aspect = 1.5) # width = height * aspect
plt.axvline(label='Mean 2.0', x=oos_results_summary.iloc[0, 0], color='blue')
plt.axvline(label='Mean 5.0', x=oos_results_summary.iloc[1, 0], color='orange')
plt.axvline(label='Mean 10.0', x=oos_results_summary.iloc[2, 0], color='green')
plt.axvline(label='Mean 25.0', x=oos_results_summary.iloc[3, 0], color='red')
plt.axvline(label='Mean 50.0', x=oos_results_summary.iloc[4, 0], color='purple')
plt.axvline(label='CV', x=oos_cross_validation_residual_average, color='black')
```
Admittedly, I don't know the properties of CV estimates as well as I should. Thus, there will be only procedural questions on the next exam. I do know that selecting K "optimally" for general datasets is an open question.
There is one other nice thing about having folds, you can estimate the standard error in your generalization estimate by pretending you have K iid samples and pretending the normal theory applies. For example, let's say K = 5. Instead of aggregating all residuals, we leave them separate and get K = 5 difference estimates for generalization error.
```python
# importing dependencies
from sklearn.model_selection import KFold
# initializing model
model = LinearRegression() # intercept will fit by default
# 10 folds
K = 10
kf = KFold(n_splits=K)
# list to hold residuals
oos_cross_validation_residuals = []
for train_index , test_index in kf.split(x):
X_train = pd.DataFrame([x[i] for i in train_index])
y_train = pd.DataFrame([y[i] for i in train_index])
X_test = pd.DataFrame([x[i] for i in test_index])
y_test = pd.DataFrame([y[i] for i in test_index])
model.fit(X_train,y_train)
y_hat_g = model.predict(X_test)
oos_cross_validation_residuals += [np.std(y_test - y_hat_g)]
oos_cross_validation_residual_mean = np.mean(oos_cross_validation_residuals)
print(oos_cross_validation_residual_mean)
oos_cross_validation_residual_std = np.std(oos_cross_validation_residuals)
print(oos_cross_validation_residual_std)
```
3.950922846645746
0.8584252859259888
```python
# calculating confidence interval
import scipy.stats as st
confidence_interval = st.t.interval(alpha = 0.95,
df = len(y) - 1,
loc = oos_cross_validation_residual_mean,
scale = oos_cross_validation_residual_std)
print(confidence_interval)
print(min(y), max(y))
```
(2.2258526513895607, 5.675993041901931)
1.2627430676861833 48.69439853538999
Although this is technically nonsense since they're not iid samples since the training set is crossed over containing mostly the same observations, at least it's something. In the above example, we've managed to capture the true generalization error.
Coverage in this confidence interval is over D. So I wouldn't gain much insight by simulating different splits with the same K.
I believe confidence intervals for generalization error is an open problem or maybe proved that you can't find them in general situations.
Here's a real data example with the `breast_cancer` dataset.
```python
from sklearn.datasets import load_breast_cancer
data = load_breast_cancer(as_frame = True)
df = data.frame
X = df.iloc[:,:-1]
y = df.iloc[:,-1]
X
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>mean radius</th>
<th>mean texture</th>
<th>mean perimeter</th>
<th>mean area</th>
<th>mean smoothness</th>
<th>mean compactness</th>
<th>mean concavity</th>
<th>mean concave points</th>
<th>mean symmetry</th>
<th>mean fractal dimension</th>
<th>...</th>
<th>worst radius</th>
<th>worst texture</th>
<th>worst perimeter</th>
<th>worst area</th>
<th>worst smoothness</th>
<th>worst compactness</th>
<th>worst concavity</th>
<th>worst concave points</th>
<th>worst symmetry</th>
<th>worst fractal dimension</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>17.99</td>
<td>10.38</td>
<td>122.80</td>
<td>1001.0</td>
<td>0.11840</td>
<td>0.27760</td>
<td>0.30010</td>
<td>0.14710</td>
<td>0.2419</td>
<td>0.07871</td>
<td>...</td>
<td>25.380</td>
<td>17.33</td>
<td>184.60</td>
<td>2019.0</td>
<td>0.16220</td>
<td>0.66560</td>
<td>0.7119</td>
<td>0.2654</td>
<td>0.4601</td>
<td>0.11890</td>
</tr>
<tr>
<th>1</th>
<td>20.57</td>
<td>17.77</td>
<td>132.90</td>
<td>1326.0</td>
<td>0.08474</td>
<td>0.07864</td>
<td>0.08690</td>
<td>0.07017</td>
<td>0.1812</td>
<td>0.05667</td>
<td>...</td>
<td>24.990</td>
<td>23.41</td>
<td>158.80</td>
<td>1956.0</td>
<td>0.12380</td>
<td>0.18660</td>
<td>0.2416</td>
<td>0.1860</td>
<td>0.2750</td>
<td>0.08902</td>
</tr>
<tr>
<th>2</th>
<td>19.69</td>
<td>21.25</td>
<td>130.00</td>
<td>1203.0</td>
<td>0.10960</td>
<td>0.15990</td>
<td>0.19740</td>
<td>0.12790</td>
<td>0.2069</td>
<td>0.05999</td>
<td>...</td>
<td>23.570</td>
<td>25.53</td>
<td>152.50</td>
<td>1709.0</td>
<td>0.14440</td>
<td>0.42450</td>
<td>0.4504</td>
<td>0.2430</td>
<td>0.3613</td>
<td>0.08758</td>
</tr>
<tr>
<th>3</th>
<td>11.42</td>
<td>20.38</td>
<td>77.58</td>
<td>386.1</td>
<td>0.14250</td>
<td>0.28390</td>
<td>0.24140</td>
<td>0.10520</td>
<td>0.2597</td>
<td>0.09744</td>
<td>...</td>
<td>14.910</td>
<td>26.50</td>
<td>98.87</td>
<td>567.7</td>
<td>0.20980</td>
<td>0.86630</td>
<td>0.6869</td>
<td>0.2575</td>
<td>0.6638</td>
<td>0.17300</td>
</tr>
<tr>
<th>4</th>
<td>20.29</td>
<td>14.34</td>
<td>135.10</td>
<td>1297.0</td>
<td>0.10030</td>
<td>0.13280</td>
<td>0.19800</td>
<td>0.10430</td>
<td>0.1809</td>
<td>0.05883</td>
<td>...</td>
<td>22.540</td>
<td>16.67</td>
<td>152.20</td>
<td>1575.0</td>
<td>0.13740</td>
<td>0.20500</td>
<td>0.4000</td>
<td>0.1625</td>
<td>0.2364</td>
<td>0.07678</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>564</th>
<td>21.56</td>
<td>22.39</td>
<td>142.00</td>
<td>1479.0</td>
<td>0.11100</td>
<td>0.11590</td>
<td>0.24390</td>
<td>0.13890</td>
<td>0.1726</td>
<td>0.05623</td>
<td>...</td>
<td>25.450</td>
<td>26.40</td>
<td>166.10</td>
<td>2027.0</td>
<td>0.14100</td>
<td>0.21130</td>
<td>0.4107</td>
<td>0.2216</td>
<td>0.2060</td>
<td>0.07115</td>
</tr>
<tr>
<th>565</th>
<td>20.13</td>
<td>28.25</td>
<td>131.20</td>
<td>1261.0</td>
<td>0.09780</td>
<td>0.10340</td>
<td>0.14400</td>
<td>0.09791</td>
<td>0.1752</td>
<td>0.05533</td>
<td>...</td>
<td>23.690</td>
<td>38.25</td>
<td>155.00</td>
<td>1731.0</td>
<td>0.11660</td>
<td>0.19220</td>
<td>0.3215</td>
<td>0.1628</td>
<td>0.2572</td>
<td>0.06637</td>
</tr>
<tr>
<th>566</th>
<td>16.60</td>
<td>28.08</td>
<td>108.30</td>
<td>858.1</td>
<td>0.08455</td>
<td>0.10230</td>
<td>0.09251</td>
<td>0.05302</td>
<td>0.1590</td>
<td>0.05648</td>
<td>...</td>
<td>18.980</td>
<td>34.12</td>
<td>126.70</td>
<td>1124.0</td>
<td>0.11390</td>
<td>0.30940</td>
<td>0.3403</td>
<td>0.1418</td>
<td>0.2218</td>
<td>0.07820</td>
</tr>
<tr>
<th>567</th>
<td>20.60</td>
<td>29.33</td>
<td>140.10</td>
<td>1265.0</td>
<td>0.11780</td>
<td>0.27700</td>
<td>0.35140</td>
<td>0.15200</td>
<td>0.2397</td>
<td>0.07016</td>
<td>...</td>
<td>25.740</td>
<td>39.42</td>
<td>184.60</td>
<td>1821.0</td>
<td>0.16500</td>
<td>0.86810</td>
<td>0.9387</td>
<td>0.2650</td>
<td>0.4087</td>
<td>0.12400</td>
</tr>
<tr>
<th>568</th>
<td>7.76</td>
<td>24.54</td>
<td>47.92</td>
<td>181.0</td>
<td>0.05263</td>
<td>0.04362</td>
<td>0.00000</td>
<td>0.00000</td>
<td>0.1587</td>
<td>0.05884</td>
<td>...</td>
<td>9.456</td>
<td>30.37</td>
<td>59.16</td>
<td>268.6</td>
<td>0.08996</td>
<td>0.06444</td>
<td>0.0000</td>
<td>0.0000</td>
<td>0.2871</td>
<td>0.07039</td>
</tr>
</tbody>
</table>
<p>569 rows × 30 columns</p>
</div>
```python
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
k = 5
kf = KFold(n_splits=k, random_state=None)
model = LinearRegression()
residuals = []
for train_index , test_index in kf.split(X):
X_train , X_test = X.iloc[train_index,:],X.iloc[test_index,:]
y_train , y_test = y[train_index] , y[test_index]
model.fit(X_train,y_train)
y_hat_g = model.predict(X_test)
residuals += [np.std(y_test - y_hat_g)]
print(np.mean(residuals))
print(np.std(residuals))
```
0.24089771870604665
0.03227633906367752
```python
# calculating confidence interval
import scipy.stats as st
confidence_interval = st.t.interval(alpha = 0.95,
df = len(X) - 1,
loc = np.mean(residuals),
scale = np.std(residuals))
print(confidence_interval)
```
(0.17750217054311385, 0.30429326686897945)
|
function C = VBA_spm_mesh_curvature(M)
% Compute a crude approximation of the curvature of a surface mesh
% FORMAT C = spm_mesh_curvature(M)
% M - a patch structure
%
% C - curvature vector
%__________________________________________________________________________
% Copyright (C) 2008 Wellcome Trust Centre for Neuroimaging
% Guillaume Flandin
% $Id: spm_mesh_curvature.m 3135 2009-05-19 14:49:42Z guillaume $
A = VBA_spm_mesh_adjacency(M);
A = sparse(1:size(M.vertices,1),1:size(M.vertices,1),1./sum(A,2)) * A;
C = (A-speye(size(A))) * double(M.vertices);
N = VBA_spm_mesh_normals(M);
C = sign(sum(N.*C,2)) .* sqrt(sum(C.*C,2));
|
/-
Copyright (c) 2021 Eric Wieser. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Eric Wieser
-/
import ring_theory.subsemiring.basic
import algebra.group_ring_action
import algebra.pointwise
/-! # Pointwise instances on `subsemiring`s
This file provides the action `subsemiring.pointwise_mul_action` which matches the action of
`mul_action_set`.
This actions is available in the `pointwise` locale.
## Implementation notes
This file is almost identical to `group_theory/submonoid/pointwise.lean`. Where possible, try to
keep them in sync.
-/
variables {M R : Type*}
namespace subsemiring
section monoid
variables [monoid M] [semiring R] [mul_semiring_action M R]
/-- The action on a subsemiring corresponding to applying the action to every element.
This is available as an instance in the `pointwise` locale. -/
protected def pointwise_mul_action : mul_action M (subsemiring R) :=
{ smul := λ a S, S.map (mul_semiring_action.to_ring_hom _ _ a),
one_smul := λ S,
(congr_arg (λ f, S.map f) (ring_hom.ext $ by exact one_smul M)).trans S.map_id,
mul_smul := λ a₁ a₂ S,
(congr_arg (λ f, S.map f) (ring_hom.ext $ by exact mul_smul _ _)).trans (S.map_map _ _).symm }
localized "attribute [instance] subsemiring.pointwise_mul_action" in pointwise
open_locale pointwise
lemma pointwise_smul_def {a : M} (S : subsemiring R) :
a • S = S.map (mul_semiring_action.to_ring_hom _ _ a) := rfl
@[simp] lemma coe_pointwise_smul (m : M) (S : subsemiring R) : ↑(m • S) = m • (S : set R) := rfl
@[simp] lemma pointwise_smul_to_add_submonoid (m : M) (S : subsemiring R) :
(m • S).to_add_submonoid = m • S.to_add_submonoid := rfl
lemma mem_smul_pointwise_iff_exists (m : M) (r : R) (S : subsemiring R) :
r ∈ m • S ↔ ∃ (s : R), s ∈ S ∧ m • s = r :=
(set.mem_smul_set : r ∈ m • (S : set R) ↔ _)
instance pointwise_central_scalar [mul_semiring_action Mᵐᵒᵖ R] [is_central_scalar M R] :
is_central_scalar M (subsemiring R) :=
⟨λ a S, congr_arg (λ f, S.map f) $ ring_hom.ext $ by exact op_smul_eq_smul _⟩
end monoid
section group
variables [group M] [semiring R] [mul_semiring_action M R]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff {a : M} {S : subsemiring R} {x : R} :
a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff
lemma mem_pointwise_smul_iff_inv_smul_mem {a : M} {S : subsemiring R} {x : R} :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem
lemma mem_inv_pointwise_smul_iff {a : M} {S : subsemiring R} {x : R} : x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff
@[simp] lemma pointwise_smul_le_pointwise_smul_iff {a : M} {S T : subsemiring R} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff
lemma pointwise_smul_subset_iff {a : M} {S T : subsemiring R} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff
lemma subset_pointwise_smul_iff {a : M} {S T : subsemiring R} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff
/-! TODO: add `equiv_smul` like we have for subgroup. -/
end group
section group_with_zero
variables [group_with_zero M] [semiring R] [mul_semiring_action M R]
open_locale pointwise
@[simp] lemma smul_mem_pointwise_smul_iff₀ {a : M} (ha : a ≠ 0) (S : subsemiring R)
(x : R) : a • x ∈ a • S ↔ x ∈ S :=
smul_mem_smul_set_iff₀ ha (S : set R) x
lemma mem_pointwise_smul_iff_inv_smul_mem₀ {a : M} (ha : a ≠ 0) (S : subsemiring R) (x : R) :
x ∈ a • S ↔ a⁻¹ • x ∈ S :=
mem_smul_set_iff_inv_smul_mem₀ ha (S : set R) x
lemma mem_inv_pointwise_smul_iff₀ {a : M} (ha : a ≠ 0) (S : subsemiring R) (x : R) :
x ∈ a⁻¹ • S ↔ a • x ∈ S :=
mem_inv_smul_set_iff₀ ha (S : set R) x
@[simp] lemma pointwise_smul_le_pointwise_smul_iff₀ {a : M} (ha : a ≠ 0) {S T : subsemiring R} :
a • S ≤ a • T ↔ S ≤ T :=
set_smul_subset_set_smul_iff₀ ha
lemma pointwise_smul_le_iff₀ {a : M} (ha : a ≠ 0) {S T : subsemiring R} : a • S ≤ T ↔ S ≤ a⁻¹ • T :=
set_smul_subset_iff₀ ha
lemma le_pointwise_smul_iff₀ {a : M} (ha : a ≠ 0) {S T : subsemiring R} : S ≤ a • T ↔ a⁻¹ • S ≤ T :=
subset_set_smul_iff₀ ha
end group_with_zero
end subsemiring
|
If $f$ and $g$ are continuous at $a$, then the function $f \cdot g$ is continuous at $a$. |
[GOAL]
S : Type u_1
inst✝ : Semigroup S
a b x y z x' y' : S
h : SemiconjBy a x y
h' : SemiconjBy a x' y'
⊢ SemiconjBy a (x * x') (y * y')
[PROOFSTEP]
unfold SemiconjBy
[GOAL]
S : Type u_1
inst✝ : Semigroup S
a b x y z x' y' : S
h : SemiconjBy a x y
h' : SemiconjBy a x' y'
⊢ a * (x * x') = y * y' * a
[PROOFSTEP]
rw [← mul_assoc, h.eq, mul_assoc, h'.eq, ← mul_assoc]
[GOAL]
S : Type u_1
inst✝ : Semigroup S
a b x y z x' y' : S
ha : SemiconjBy a y z
hb : SemiconjBy b x y
⊢ SemiconjBy (a * b) x z
[PROOFSTEP]
unfold SemiconjBy
[GOAL]
S : Type u_1
inst✝ : Semigroup S
a b x y z x' y' : S
ha : SemiconjBy a y z
hb : SemiconjBy b x y
⊢ a * b * x = z * (a * b)
[PROOFSTEP]
rw [mul_assoc, hb.eq, ← mul_assoc, ha.eq, mul_assoc]
[GOAL]
M : Type u_1
inst✝ : MulOneClass M
a : M
⊢ SemiconjBy a 1 1
[PROOFSTEP]
rw [SemiconjBy, mul_one, one_mul]
[GOAL]
M : Type u_1
inst✝ : Monoid M
a : M
x y : Mˣ
h : SemiconjBy a ↑x ↑y
⊢ a * ↑x⁻¹ = ↑y⁻¹ * (↑y * a) * ↑x⁻¹
[PROOFSTEP]
rw [Units.inv_mul_cancel_left]
[GOAL]
M : Type u_1
inst✝ : Monoid M
a : M
x y : Mˣ
h : SemiconjBy a ↑x ↑y
⊢ ↑y⁻¹ * (↑y * a) * ↑x⁻¹ = ↑y⁻¹ * a
[PROOFSTEP]
rw [← h.eq, mul_assoc, Units.mul_inv_cancel_right]
[GOAL]
M : Type u_1
inst✝ : Monoid M
a : Mˣ
x y : M
h : SemiconjBy (↑a) x y
⊢ ↑a⁻¹ * y = ↑a⁻¹ * (y * ↑a * ↑a⁻¹)
[PROOFSTEP]
rw [Units.mul_inv_cancel_right]
[GOAL]
M : Type u_1
inst✝ : Monoid M
a : Mˣ
x y : M
h : SemiconjBy (↑a) x y
⊢ ↑a⁻¹ * (y * ↑a * ↑a⁻¹) = x * ↑a⁻¹
[PROOFSTEP]
rw [← h.eq, ← mul_assoc, Units.inv_mul_cancel_left]
[GOAL]
M : Type u_1
inst✝ : Monoid M
a x y : M
h : SemiconjBy a x y
n : ℕ
⊢ SemiconjBy a (x ^ n) (y ^ n)
[PROOFSTEP]
induction' n with n ih
[GOAL]
case zero
M : Type u_1
inst✝ : Monoid M
a x y : M
h : SemiconjBy a x y
⊢ SemiconjBy a (x ^ Nat.zero) (y ^ Nat.zero)
[PROOFSTEP]
rw [pow_zero, pow_zero]
[GOAL]
case zero
M : Type u_1
inst✝ : Monoid M
a x y : M
h : SemiconjBy a x y
⊢ SemiconjBy a 1 1
[PROOFSTEP]
exact SemiconjBy.one_right _
[GOAL]
case succ
M : Type u_1
inst✝ : Monoid M
a x y : M
h : SemiconjBy a x y
n : ℕ
ih : SemiconjBy a (x ^ n) (y ^ n)
⊢ SemiconjBy a (x ^ Nat.succ n) (y ^ Nat.succ n)
[PROOFSTEP]
rw [pow_succ, pow_succ]
[GOAL]
case succ
M : Type u_1
inst✝ : Monoid M
a x y : M
h : SemiconjBy a x y
n : ℕ
ih : SemiconjBy a (x ^ n) (y ^ n)
⊢ SemiconjBy a (x * x ^ n) (y * y ^ n)
[PROOFSTEP]
exact h.mul_right ih
[GOAL]
G : Type u_1
inst✝ : DivisionMonoid G
a x y : G
⊢ (a⁻¹ * x⁻¹)⁻¹ = (y⁻¹ * a⁻¹)⁻¹ ↔ SemiconjBy a y x
[PROOFSTEP]
rw [mul_inv_rev, mul_inv_rev, inv_inv, inv_inv, inv_inv, eq_comm, SemiconjBy]
[GOAL]
G : Type u_1
inst✝ : Group G
a✝ x✝ y a x : G
⊢ SemiconjBy a x (a * x * a⁻¹)
[PROOFSTEP]
unfold SemiconjBy
[GOAL]
G : Type u_1
inst✝ : Group G
a✝ x✝ y a x : G
⊢ a * x = a * x * a⁻¹ * a
[PROOFSTEP]
rw [mul_assoc, inv_mul_self, mul_one]
[GOAL]
M : Type u_1
inst✝ : CancelCommMonoid M
a x y : M
h : x = y
⊢ SemiconjBy a x y
[PROOFSTEP]
rw [h, SemiconjBy, mul_comm]
[GOAL]
M : Type u_1
inst✝ : Monoid M
u : Mˣ
x : M
⊢ SemiconjBy (↑u) x (↑u * x * ↑u⁻¹)
[PROOFSTEP]
unfold SemiconjBy
[GOAL]
M : Type u_1
inst✝ : Monoid M
u : Mˣ
x : M
⊢ ↑u * x = ↑u * x * ↑u⁻¹ * ↑u
[PROOFSTEP]
rw [Units.inv_mul_cancel_right]
|
module XAM
export
SAM,
BAM
include("sam/sam.jl")
include("bam/bam.jl")
using .SAM
using .BAM
end # module
|
(* Adapted from https://github.com/antalsz/hs-to-coq/blob/b0db5644e1e7592520d5102e9d74984694766b0e/examples/base-src/manual/GHC/Err.v *)
From Showtime Require Import Max.
Require Import Strings.String.
Class Default (a : Type) := {
default : a
}.
(* The use of [Qed] is crucial, this way we cannot look through [error] in our proofs. *)
Definition error {a} `{Default a} : string -> a.
Proof. exact (fun _ => default). Qed.
(* The use of [Qed] is crucial, this way we cannot look through [error] in our proofs. *)
Definition undefined {a} `{Default a} : a.
Proof. exact default. Qed.
Definition errorWithoutStackTrace {a} `{Default a} :
string -> a := error.
Definition patternFailure {a} `{Default a} : a.
Proof. exact default. Qed.
Instance DefaultMax : Default Max := {
default := MaxZero
}.
Instance DefaultNat : Default nat := {
default := O
}.
Instance DefaultOption : forall {a}, Default (option a) := {
default := None
}.
Instance DefaultPair : forall {a b} `{Default a} `{Default b},
Default (a * b) := {
default := (default, default)
}.
Instance DefaultList : forall {a}, Default (list a) := {
default := nil
}. |
/-
Copyright (c) 2019 Scott Morrison. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Scott Morrison, Justus Springer
-/
import order.complete_lattice
import data.fintype.lattice
import category_theory.limits.shapes.pullbacks
import category_theory.category.preorder
import category_theory.limits.shapes.products
import category_theory.limits.shapes.finite_limits
/-!
# Limits in lattice categories are given by infimums and supremums.
> THIS FILE IS SYNCHRONIZED WITH MATHLIB4.
> Any changes to this file require a corresponding PR to mathlib4.
-/
universes w u
open category_theory
open category_theory.limits
namespace category_theory.limits.complete_lattice
section semilattice
variables {α : Type u}
variables {J : Type w} [small_category J] [fin_category J]
/--
The limit cone over any functor from a finite diagram into a `semilattice_inf` with `order_top`.
-/
def finite_limit_cone [semilattice_inf α] [order_top α] (F : J ⥤ α) : limit_cone F :=
{ cone :=
{ X := finset.univ.inf F.obj,
π := { app := λ j, hom_of_le (finset.inf_le (fintype.complete _)) } },
is_limit := { lift := λ s, hom_of_le (finset.le_inf (λ j _, (s.π.app j).down.down)) } }
/--
The colimit cocone over any functor from a finite diagram into a `semilattice_sup` with `order_bot`.
-/
def finite_colimit_cocone [semilattice_sup α] [order_bot α] (F : J ⥤ α) : colimit_cocone F :=
{ cocone :=
{ X := finset.univ.sup F.obj,
ι := { app := λ i, hom_of_le (finset.le_sup (fintype.complete _)) } },
is_colimit := { desc := λ s, hom_of_le (finset.sup_le (λ j _, (s.ι.app j).down.down)) } }
@[priority 100] -- see Note [lower instance priority]
instance has_finite_limits_of_semilattice_inf_order_top [semilattice_inf α] [order_top α] :
has_finite_limits α :=
⟨λ J 𝒥₁ 𝒥₂, by exactI { has_limit := λ F, has_limit.mk (finite_limit_cone F) }⟩
@[priority 100] -- see Note [lower instance priority]
instance has_finite_colimits_of_semilattice_sup_order_bot [semilattice_sup α] [order_bot α] :
has_finite_colimits α :=
⟨λ J 𝒥₁ 𝒥₂, by exactI { has_colimit := λ F, has_colimit.mk (finite_colimit_cocone F) }⟩
/--
The limit of a functor from a finite diagram into a `semilattice_inf` with `order_top` is the
infimum of the objects in the image.
-/
lemma finite_limit_eq_finset_univ_inf [semilattice_inf α] [order_top α] (F : J ⥤ α) :
limit F = finset.univ.inf F.obj :=
(is_limit.cone_point_unique_up_to_iso (limit.is_limit F)
(finite_limit_cone F).is_limit).to_eq
/--
The colimit of a functor from a finite diagram into a `semilattice_sup` with `order_bot`
is the supremum of the objects in the image.
-/
/--
A finite product in the category of a `semilattice_inf` with `order_top` is the same as the infimum.
-/
lemma finite_product_eq_finset_inf [semilattice_inf α] [order_top α] {ι : Type u}
[fintype ι] (f : ι → α) : (∏ f) = (fintype.elems ι).inf f :=
begin
transitivity,
exact (is_limit.cone_point_unique_up_to_iso (limit.is_limit _)
(finite_limit_cone (discrete.functor f)).is_limit).to_eq,
change finset.univ.inf (f ∘ discrete_equiv.to_embedding) = (fintype.elems ι).inf f,
simp only [←finset.inf_map, finset.univ_map_equiv_to_embedding],
refl,
end
/--
A finite coproduct in the category of a `semilattice_sup` with `order_bot` is the same as the
supremum.
-/
lemma finite_coproduct_eq_finset_sup [semilattice_sup α] [order_bot α] {ι : Type u}
[fintype ι] (f : ι → α) : (∐ f) = (fintype.elems ι).sup f :=
begin
transitivity,
exact (is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit _)
(finite_colimit_cocone (discrete.functor f)).is_colimit).to_eq,
change finset.univ.sup (f ∘ discrete_equiv.to_embedding) = (fintype.elems ι).sup f,
simp only [←finset.sup_map, finset.univ_map_equiv_to_embedding],
refl,
end
@[priority 100] -- see Note [lower instance priority]
instance [semilattice_inf α] [order_top α] : has_binary_products α :=
begin
haveI : ∀ (x y : α), has_limit (pair x y),
{ letI := has_finite_limits_of_has_finite_limits_of_size.{u} α, apply_instance },
apply has_binary_products_of_has_limit_pair
end
/--
The binary product in the category of a `semilattice_inf` with `order_top` is the same as the
infimum.
-/
@[simp]
lemma prod_eq_inf [semilattice_inf α] [order_top α] (x y : α) : limits.prod x y = x ⊓ y :=
calc limits.prod x y = limit (pair x y) : rfl
... = finset.univ.inf (pair x y).obj : by rw finite_limit_eq_finset_univ_inf (pair.{u} x y)
... = x ⊓ (y ⊓ ⊤) : rfl -- Note: finset.inf is realized as a fold, hence the definitional equality
... = x ⊓ y : by rw inf_top_eq
@[priority 100] -- see Note [lower instance priority]
instance [semilattice_sup α] [order_bot α] : has_binary_coproducts α :=
begin
haveI : ∀ (x y : α), has_colimit (pair x y),
{ letI := has_finite_colimits_of_has_finite_colimits_of_size.{u} α, apply_instance },
apply has_binary_coproducts_of_has_colimit_pair
end
/--
The binary coproduct in the category of a `semilattice_sup` with `order_bot` is the same as the
supremum.
-/
@[simp]
lemma coprod_eq_sup [semilattice_sup α] [order_bot α] (x y : α) : limits.coprod x y = x ⊔ y :=
calc limits.coprod x y = colimit (pair x y) : rfl
... = finset.univ.sup (pair x y).obj : by rw finite_colimit_eq_finset_univ_sup (pair x y)
... = x ⊔ (y ⊔ ⊥) : rfl -- Note: finset.sup is realized as a fold, hence the definitional equality
... = x ⊔ y : by rw sup_bot_eq
/--
The pullback in the category of a `semilattice_inf` with `order_top` is the same as the infimum
over the objects.
-/
@[simp]
lemma pullback_eq_inf [semilattice_inf α] [order_top α] {x y z : α} (f : x ⟶ z) (g : y ⟶ z) :
pullback f g = x ⊓ y :=
calc pullback f g = limit (cospan f g) : rfl
... = finset.univ.inf (cospan f g).obj : by rw finite_limit_eq_finset_univ_inf
... = z ⊓ (x ⊓ (y ⊓ ⊤)) : rfl
... = z ⊓ (x ⊓ y) : by rw inf_top_eq
... = x ⊓ y : inf_eq_right.mpr (inf_le_of_left_le f.le)
/--
The pushout in the category of a `semilattice_sup` with `order_bot` is the same as the supremum
over the objects.
-/
@[simp]
lemma pushout_eq_sup [semilattice_sup α] [order_bot α] (x y z : α) (f : z ⟶ x) (g : z ⟶ y) :
pushout f g = x ⊔ y :=
calc pushout f g = colimit (span f g) : rfl
... = finset.univ.sup (span f g).obj : by rw finite_colimit_eq_finset_univ_sup
... = z ⊔ (x ⊔ (y ⊔ ⊥)) : rfl
... = z ⊔ (x ⊔ y) : by rw sup_bot_eq
... = x ⊔ y : sup_eq_right.mpr (le_sup_of_le_left f.le)
end semilattice
variables {α : Type u} [complete_lattice α]
variables {J : Type u} [small_category J]
/--
The limit cone over any functor into a complete lattice.
-/
def limit_cone (F : J ⥤ α) : limit_cone F :=
{ cone :=
{ X := infi F.obj,
π :=
{ app := λ j, hom_of_le (complete_lattice.Inf_le _ _ (set.mem_range_self _)) } },
is_limit :=
{ lift := λ s, hom_of_le (complete_lattice.le_Inf _ _
begin rintros _ ⟨j, rfl⟩, exact (s.π.app j).le, end) } }
/--
The colimit cocone over any functor into a complete lattice.
-/
def colimit_cocone (F : J ⥤ α) : colimit_cocone F :=
{ cocone :=
{ X := supr F.obj,
ι :=
{ app := λ j, hom_of_le (complete_lattice.le_Sup _ _ (set.mem_range_self _)) } },
is_colimit :=
{ desc := λ s, hom_of_le (complete_lattice.Sup_le _ _
begin rintros _ ⟨j, rfl⟩, exact (s.ι.app j).le, end) } }
-- It would be nice to only use the `Inf` half of the complete lattice, but
-- this seems not to have been described separately.
@[priority 100] -- see Note [lower instance priority]
instance has_limits_of_complete_lattice : has_limits α :=
{ has_limits_of_shape := λ J 𝒥, by exactI
{ has_limit := λ F, has_limit.mk (limit_cone F) } }
@[priority 100] -- see Note [lower instance priority]
instance has_colimits_of_complete_lattice : has_colimits α :=
{ has_colimits_of_shape := λ J 𝒥, by exactI
{ has_colimit := λ F, has_colimit.mk (colimit_cocone F) } }
/--
The limit of a functor into a complete lattice is the infimum of the objects in the image.
-/
lemma limit_eq_infi (F : J ⥤ α) : limit F = infi F.obj :=
(is_limit.cone_point_unique_up_to_iso (limit.is_limit F)
(limit_cone F).is_limit).to_eq
/--
The colimit of a functor into a complete lattice is the supremum of the objects in the image.
-/
lemma colimit_eq_supr (F : J ⥤ α) : colimit F = supr F.obj :=
(is_colimit.cocone_point_unique_up_to_iso (colimit.is_colimit F)
(colimit_cocone F).is_colimit).to_eq
end category_theory.limits.complete_lattice
|
import os
import sys
import filecmp
import pytest
import emeraldbgc
import numpy as np
emrld_dir = os.path.dirname(os.path.abspath(emeraldbgc.__file__))
test_files_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "files")
modules_dir = os.path.join(emrld_dir, "modules")
sys.path.append(modules_dir)
from BGCdetection import AnnotationFilesToEmerald
def test_bgc_prediction():
prodigal_file = os.path.join(test_files_dir, "BGC0001472.fna.prodigal.faa")
ips_file = os.path.join(test_files_dir, "BGC0001472.fna.prodigal.faa.ip.tsv")
hmm_file = os.path.join(test_files_dir, "BGC0001472.fna.prodigal.faa.emerald.tsv")
ann = AnnotationFilesToEmerald()
ann.transformIPS(ips_file)
ann.transformEmeraldHmm(hmm_file)
fmt = "fna"
ann.transformCDSpredToCDScontigs(
prodigal_file,
fmt)
ann.buildMatrices()
assert ann.annDct['BGC0001472'].shape == (200, 15264)
assert np.sum(ann.annDct['BGC0001472']) == 46.
ann.predictAnn()
np.sum(ann.annResults['BGC0001472']) == 15.492591619491577
score,g = None, 1
ann.defineLooseClusters(score=score, g=g)
ann.predictType()
assert np.array_equal(np.where(ann.typesClst['BGC0001472']!=None)[0], np.array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]))
|
------------------------------------------------------------------------
-- Operators
------------------------------------------------------------------------
module Mixfix.Operator where
open import Data.Nat using (ℕ; zero; suc; _+_)
open import Data.Vec using (Vec)
open import Data.Product using (∃; ∃₂; _,_)
open import Data.Maybe using (Maybe; just; nothing)
open import Data.String using (String)
open import Relation.Nullary using (Dec; yes; no)
open import Relation.Binary.PropositionalEquality using (_≡_; refl)
open import Mixfix.Fixity
-- Name parts.
NamePart : Set
NamePart = String
-- Operators. The parameter arity is the internal arity of the
-- operator, i.e. the number of arguments taken between the first and
-- last name parts.
record Operator (fix : Fixity) (arity : ℕ) : Set where
field nameParts : Vec NamePart (1 + arity)
open Operator public
-- Predicate filtering out operators of the given fixity and
-- associativity.
hasFixity : ∀ fix → ∃₂ Operator → Maybe (∃ (Operator fix))
hasFixity fix (fix' , op) with fix ≟ fix'
hasFixity fix (.fix , op) | yes refl = just op
hasFixity fix (fix' , op) | _ = nothing
|
Formal statement is: lemma linear_inj_bounded_below_pos: fixes f :: "'a::real_normed_vector \<Rightarrow> 'b::euclidean_space" assumes "linear f" "inj f" obtains B where "B > 0" "\<And>x. B * norm x \<le> norm(f x)" Informal statement is: If $f$ is a linear injective map from a normed vector space to a Euclidean space, then there exists a constant $B > 0$ such that $B \|x\| \leq \|f(x)\|$ for all $x$. |
function subbac!(u::Matrix{Float64}, b::Vector{Float64})
# Back-substitution on an Upper Triangle
n = size(u, 1)
for i in n:-1:1
total = b[i]
if i < n
for j in i+1:n
total -= u[i, j] * b[j]
end
end
b[i] = total / u[i, i]
end
end
function subbac(u::Matrix{Float64}, b::Vector{Float64})
# Back-substitution on an Upper Triangle
bt = deepcopy(b)
n = size(u, 1)
for i in n:-1:1
total = bt[i]
if i < n
for j in i+1:n
total -= u[i, j] * bt[j]
end
end
bt[i] = total / u[i, i]
end
bt
end
export
subbac!,
subbac
|
lemma complete_UNIV: "complete (UNIV :: ('a::complete_space) set)" |
[STATEMENT]
lemma (in itrace_top) itop_carrier: "carrier = A\<^sup>\<omega>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. carrier = A\<^sup>\<omega>
[PROOF STEP]
by (auto simp: carrier_topo infsuff_def) |
\subsection{Poisson distribution}
\subsection{Definition}
We can use the Poisson distribution to model the number of indepedent events that occur in an a time period.
For a very short time period the chance of us observing an event is a Bernoulli trial.
\(P(1)=p\)
\(P(0)=1-p\)
\subsection{Chance of no observations}
Let's consider the chance of repeatedly getting \(0\): \(P(0;t)\).
We can see that: \(P(0;t+\delta t)=P(0;t)(1-p)\).
And therefore:
\(P(0;t+\delta t)-P(0;t)=-pP(0;t))\)
By setting \(p=\lambda \delta t\):
\(\dfrac{P(0;t+\delta t)-P(0;t)}{\delta t}=-\lambda P(0;t))\)
\(\dfrac{\delta P(0;t)}{\delta t}=-\lambda P(0;t)\)
\(P(0;t)=Ce^{-\lambda t}\)
If \(t=0\) then \(P(0;t)=0\) and so \(C=1\).
\(P(0;t)=e^{-\lambda t}\)
\subsection{Deriving the Poisson distribution}
|
[STATEMENT]
lemma scons_eq_szip[iff]: "z ## zs = szip xs ys \<longleftrightarrow> z = (shd xs, shd ys) \<and> zs = szip (stl xs) (stl ys)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (z ## zs = szip xs ys) = (z = (shd xs, shd ys) \<and> zs = szip (stl xs) (stl ys))
[PROOF STEP]
using szip.ctr stream.inject
[PROOF STATE]
proof (prove)
using this:
szip ?s1.0 ?s2.0 = (shd ?s1.0, shd ?s2.0) ## szip (stl ?s1.0) (stl ?s2.0)
(?x1.0 ## ?x2.0 = ?y1.0 ## ?y2.0) = (?x1.0 = ?y1.0 \<and> ?x2.0 = ?y2.0)
goal (1 subgoal):
1. (z ## zs = szip xs ys) = (z = (shd xs, shd ys) \<and> zs = szip (stl xs) (stl ys))
[PROOF STEP]
by metis |
-- {-# OPTIONS -v tc.lhs.problem:10 #-}
-- {-# OPTIONS --compile --ghc-flag=-i.. #-}
module Issue727 where
open import Common.Prelude renaming (Nat to ℕ)
open import Common.MAlonzo hiding (main)
Sum : ℕ → Set
Sum 0 = ℕ
Sum (suc n) = ℕ → Sum n
sum : (n : ℕ) → ℕ → Sum n
sum 0 acc = acc
sum (suc n) acc m = sum n (m + acc)
main = mainPrintNat (sum 3 0 1 2 3)
|
function b = r8pbu_ml ( n, mu, a_lu, x )
%*****************************************************************************80
%
%% R8PBU_ML multiplies a vector times a matrix that was factored by R8PBU_FA.
%
% Discussion:
%
% The R8PBU storage format is for a symmetric positive definite band matrix.
%
% To save storage, only the diagonal and upper triangle of A is stored,
% in a compact diagonal format that preserves columns.
%
% The diagonal is stored in row MU+1 of the array.
% The first superdiagonal in row MU, columns 2 through N.
% The second superdiagonal in row MU-1, columns 3 through N.
% The MU-th superdiagonal in row 1, columns MU+1 through N.
%
% Licensing:
%
% This code is distributed under the GNU LGPL license.
%
% Modified:
%
% 21 February 2004
%
% Author:
%
% John Burkardt
%
% Parameters:
%
% Input, integer N, the order of the matrix.
% N must be positive.
%
% Input, integer MU, the number of superdiagonals of the matrix.
% MU must be at least 0 and no more than N-1.
%
% Input, real A_LU(MU+1,N), the matrix, as factored by R8PBU_FA.
%
% Input, real X(N), the vector to be multiplied by A.
%
% Output, real B(N), the product A * x.
%
b(1:n) = x(1:n);
%
% Multiply U * X = Y.
%
for k = 1 : n
ilo = max ( 1, k - mu );
for i = ilo : k - 1
b(i) = b(i) + a_lu(mu+1+i-k,k) * b(k);
end
b(k) = a_lu(mu+1,k) * b(k);
end
%
% Multiply L * Y = B.
%
for k = n : -1 : 1
jhi = min ( k + mu, n );
for j = k + 1 : jhi
b(j) = b(j) + a_lu(mu+1+k-j,j) * b(k);
end
b(k) = a_lu(mu+1,k) * b(k);
end
return
end
|
#ifndef OPENMC_TALLIES_FILTER_DELAYEDGROUP_H
#define OPENMC_TALLIES_FILTER_DELAYEDGROUP_H
#include <vector>
#include <gsl/gsl>
#include "openmc/tallies/filter.h"
namespace openmc {
//==============================================================================
//! Bins outgoing fission neutrons in their delayed groups.
//!
//! The get_all_bins functionality is not actually used. The bins are manually
//! iterated over in the scoring subroutines.
//==============================================================================
class DelayedGroupFilter : public Filter
{
public:
//----------------------------------------------------------------------------
// Constructors, destructors
~DelayedGroupFilter() = default;
//----------------------------------------------------------------------------
// Methods
std::string type() const override {return "delayedgroup";}
void from_xml(pugi::xml_node node) override;
void get_all_bins(const Particle& p, TallyEstimator estimator, FilterMatch& match)
const override;
void to_statepoint(hid_t filter_group) const override;
std::string text_label(int bin) const override;
//----------------------------------------------------------------------------
// Accessors
const std::vector<int>& groups() const { return groups_; }
void set_groups(gsl::span<int> groups);
private:
//----------------------------------------------------------------------------
// Data members
std::vector<int> groups_;
};
} // namespace openmc
#endif // OPENMC_TALLIES_FILTER_DELAYEDGROUP_H
|
\index{numbering}
The UFC specification dictates a certain numbering of the vertices,
edges etc. of the cells of a finite element mesh. First, an \emph{ad
hoc} numbering is picked for the vertices of each cell. Then, the
remaining entities are ordered based on a simple rule, as described in
detail below.
\section{Basic concepts}
\index{mesh entity}
\index{topological dimension}
The topological entities of a cell (or mesh) are referred to as
\emph{mesh entities}. A mesh entity can be identified by a pair
$(d, i)$, where $d$ is the topological dimension of the mesh entity and $i$
is a unique index of the mesh entity. Mesh entities are numbered
within each topological dimension from $0$ to $n_d-1$, where $n_d$ is
the number of mesh entities of topological dimension $d$.
For convenience, mesh entities of topological dimension $0$ are
referred to as \emph{vertices}, entities of dimension $1$
as \emph{edges}, entities of dimension $2$ as \emph{faces}, entities of
\emph{codimension} $1$ as \emph{facets} and entities of codimension
$0$ as \emph{cells}. These concepts are summarized in
Table~\ref{tab:entities}.
Thus, the vertices of a tetrahedron are identified as
$v_0 = (0, 0)$, $v_1 = (0, 1)$ and $v_2 = (0, 2)$,
the edges are
$e_0 = (1, 0)$, $e_1 = (1, 1)$, $e_2 = (1, 2)$,
$e_3 = (1, 3)$, $e_4 = (1, 4)$ and $e_5 = (1, 5)$,
the faces (facets) are
$f_0 = (2, 0)$, $f_1 = (2, 1)$, $f_2 = (2, 2)$ and $f_3 = (2, 3)$,
and the cell itself is
$c_0 = (3, 0)$.
\begin{table}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|l|c|c|}
\hline
Entity & Dimension & Codimension \\
\hline
Vertex & $0$ & -- \\
Edge & $1$ & -- \\
Face & $2$ & -- \\
& & \\
Facet & -- & $1$ \\
Cell & -- & $0$ \\
\hline
\end{tabular}
\caption{Named mesh entities.}
\label{tab:entities}
\end{center}
\end{table}
\section{Numbering of vertices}
\index{vertex numbering}
For simplicial cells (intervals, triangles and tetrahedra) of a finite
element mesh, the vertices are numbered locally based on the
corresponding global vertex numbers. In particular, a tuple of
increasing local vertex numbers corresponds to a tuple of increasing
global vertex numbers. This is illustrated in
Figure~\ref{fig:numbering_example_triangles} for a mesh consisting of
two triangles.
\begin{figure}[htbp]
\begin{center}
\psfrag{v0}{$v_0$}
\psfrag{v1}{$v_1$}
\psfrag{v2}{$v_2$}
\psfrag{0}{$0$}
\psfrag{1}{$1$}
\psfrag{2}{$2$}
\psfrag{3}{$3$}
\includegraphics[width=8cm]{eps/numbering_example_triangles.eps}
\caption{The vertices of a simplicial mesh are numbered locally
based on the corresponding global vertex numbers.}
\label{fig:numbering_example_triangles}
\end{center}
\end{figure}
For non-simplicial cells (quadrilaterals and hexahedra), the numbering
is arbitrary, as long as each cell is isomorphic to the corresponding
reference cell by matching each vertex with the corresponding vertex
in the reference cell. This is illustrated in
Figure~\ref{fig:numbering_example_quadrilaterals} for a mesh
consisting of two quadrilaterals.
\begin{figure}[htbp]
\begin{center}
\psfrag{v0}{$v_0$}
\psfrag{v1}{$v_1$}
\psfrag{v2}{$v_2$}
\psfrag{v3}{$v_3$}
\psfrag{0}{$0$}
\psfrag{1}{$1$}
\psfrag{2}{$2$}
\psfrag{3}{$3$}
\psfrag{4}{$4$}
\psfrag{5}{$5$}
\includegraphics[width=8cm]{eps/numbering_example_quadrilaterals.eps}
\caption{The local numbering of vertices of a non-simplicial mesh
is arbitrary, as long as each cell is isomorphic to the
reference cell by matching each vertex to the corresponding
vertex of the reference cell.}
\label{fig:numbering_example_quadrilaterals}
\end{center}
\end{figure}
\section{Numbering of other mesh entities}
When the vertices have been numbered, the remaining mesh entities are
numbered within each topological dimension based on a
\emph{lexicographical ordering} of the corresponding ordered tuples of
\emph{non-incident vertices}.
As an illustration, consider the numbering of edges (the mesh entities
of topological dimension one) on the reference triangle in
Figure~\ref{fig:orderingexample,triangle}. To number the edges of the
reference triangle, we identify for each edge the corresponding
non-incident vertices. For each edge, there is only one such vertex
(the vertex opposite to the edge). We thus identify the three edges in
the reference triangle with the tuples $(v_0)$, $(v_1)$ and $(v_2)$. The
first of these is edge $e_0$ between vertices $v_1$ and $v_2$ opposite
to vertex $v_0$, the second is edge $e_1$ between vertices $v_0$ and
$v_2$ opposite to vertex $v_1$, and the third is edge $e_2$ between
vertices $v_0$ and $v_1$ opposite to vertex $v_2$.
Similarly, we identify the six edges of the reference tetrahedron with
the corresponding non-incident tuples $(v_0, v_1)$, $(v_0, v_2)$,
$(v_0, v_3)$, $(v_1, v_2)$, $(v_1, v_3)$ and $(v_2, v_3)$. The first of these is
edge $e_0$ between vertices $v_2$ and $v_3$ opposite to vertices $v_0$
and $v_1$ as shown in Figure~\ref{fig:orderingexample,tetrahedron}.
\begin{figure}[htbp]
\begin{center}
\psfrag{v0}{$v_0$}
\psfrag{v1}{$v_1$}
\psfrag{v2}{$v_2$}
\psfrag{e0}{$e_0$}
\includegraphics[width=5cm]{eps/ordering_example_triangle.eps}
\caption{Mesh entities are ordered based on a lexicographical ordering
of the corresponding ordered tuples of non-incident vertices.
The first edge $e_0$ is non-incident to vertex $v_0$.}
\label{fig:orderingexample,triangle}
\end{center}
\end{figure}
\begin{figure}[htbp]
\begin{center}
\psfrag{v0}{$v_0$}
\psfrag{v1}{$v_1$}
\psfrag{v2}{$v_2$}
\psfrag{v3}{$v_3$}
\psfrag{e0}{$e_0$}
\includegraphics[width=5cm]{eps/ordering_example_tetrahedron.eps}
\caption{Mesh entities are ordered based on a lexicographical ordering
of the corresponding ordered tuples of non-incident vertices.
The first edge $e_0$ is non-incident to vertices $v_0$ and $v_1$.}
\label{fig:orderingexample,tetrahedron}
\end{center}
\end{figure}
\subsection{Relative ordering}
The relative ordering of mesh entities with respect to other incident
mesh entities follows by sorting the entities by their (global)
indices. Thus, the pair of vertices incident to the first edge $e_0$
of a triangular cell is $(v_1, v_2)$, not $(v_2, v_1)$. Similarly, the
first face $f_0$ of a tetrahedral cell is incident to vertices $(v_1,
v_2, v_3)$.
For simplicial cells, the relative ordering in combination with the
convention of numbering the vertices locally based on global vertex
indices means that two incident cells will always agree on the
orientation of incident subsimplices. Thus, two incident triangles
will agree on the orientation of the common edge and two incident
tetrahedra will agree on the orientation of the common edge(s) and the
orientation of the common face (if any). This is illustrated in
Figure~\ref{fig:orientation_example_triangles} for two incident
triangles sharing a common edge.
\begin{figure}[htbp]
\begin{center}
\psfrag{v0}{$v_0$}
\psfrag{v1}{$v_1$}
\psfrag{v2}{$v_2$}
\psfrag{v3}{$v_3$}
\includegraphics[width=9cm]{eps/orientation_example_triangles.eps}
\caption{Two incident triangles will always agree on the
orientation of the common edge.}
\label{fig:orientation_example_triangles}
\end{center}
\end{figure}
\subsection{Limitations}
The UFC specification is only concerned with the ordering of mesh
entities with respect to entities of larger topological dimension. In
other words, the UFC specification is only concerned with the ordering
of incidence relations of the class $d - d'$ where $d > d'$. For
example, the UFC specification is not concerned with the ordering of
incidence relations of the class $0 - 1$, that is, the ordering of
edges incident to vertices.
\newpage
\section{Numbering schemes for reference cells}
The numbering scheme is demonstrated below for cells
isomorphic to each of the five reference cells.
\subsection{Numbering of mesh entities on intervals}
\begin{minipage}{\textwidth}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Entity & Incident vertices & Non-incident vertices \\
\hline
\hline
$v_0 = (0, 0)$ & $(v_0)$ & $(v_1)$ \\
\hline
$v_1 = (0, 1)$ & $(v_1)$ & $(v_0)$ \\
\hline
$c_0 = (1, 0)$ & $(v_0, v_1)$ & $\emptyset$ \\
\hline
\end{tabular}
\end{center}
\end{minipage}
\subsection{Numbering of mesh entities on triangular cells}
%
\begin{minipage}{\textwidth}
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Entity & Incident vertices & Non-incident vertices \\
\hline
\hline
$v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2)$ \\
\hline
$v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2)$ \\
\hline
$v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1)$ \\
\hline
$e_0 = (1, 0)$ & $(v_1, v_2)$ & $(v_0)$ \\
\hline
$e_1 = (1, 1)$ & $(v_0, v_2)$ & $(v_1)$ \\
\hline
$e_2 = (1, 2)$ & $(v_0, v_1)$ & $(v_2)$ \\
\hline
$c_0 = (2, 0)$ & $(v_0, v_1, v_2)$ & $\emptyset$ \\
\hline
\end{tabular}
\end{center}
\end{minipage}
\subsection{Numbering of mesh entities on quadrilateral cells}
%
\begin{minipage}{\textwidth}
\linespread{1.1}\selectfont
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Entity & Incident vertices & Non-incident vertices \\
\hline
\hline
$v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3)$ \\
\hline
$v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3)$ \\
\hline
$v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3)$ \\
\hline
$v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2)$ \\
\hline
$e_0 = (1, 0)$ & $(v_2, v_3)$ & $(v_0, v_1)$ \\
\hline
$e_1 = (1, 1)$ & $(v_1, v_2)$ & $(v_0, v_3)$ \\
\hline
$e_2 = (1, 2)$ & $(v_0, v_3)$ & $(v_1, v_2)$ \\
\hline
$e_3 = (1, 3)$ & $(v_0, v_1)$ & $(v_2, v_3)$ \\
\hline
$c_0 = (2, 0)$ & $(v_0, v_1, v_2, v_3)$ & $\emptyset$ \\
\hline
\end{tabular}
\end{center}
\end{minipage}
\subsection{Numbering of mesh entities on tetrahedral cells}
%
\begin{minipage}{\textwidth}
\linespread{1.1}\selectfont
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Entity & Incident vertices & Non-incident vertices \\
\hline
\hline
$v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3)$ \\
\hline
$v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3)$ \\
\hline
$v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3)$ \\
\hline
$v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2)$ \\
\hline
$e_0 = (1, 0)$ & $(v_2, v_3)$ & $(v_0, v_1)$ \\
\hline
$e_1 = (1, 1)$ & $(v_1, v_3)$ & $(v_0, v_2)$ \\
\hline
$e_2 = (1, 2)$ & $(v_1, v_2)$ & $(v_0, v_3)$ \\
\hline
$e_3 = (1, 3)$ & $(v_0, v_3)$ & $(v_1, v_2)$ \\
\hline
$e_4 = (1, 4)$ & $(v_0, v_2)$ & $(v_1, v_3)$ \\
\hline
$e_5 = (1, 5)$ & $(v_0, v_1)$ & $(v_2, v_3)$ \\
\hline
$f_0 = (2, 0)$ & $(v_1, v_2, v_3)$ & $(v_0)$ \\
\hline
$f_1 = (2, 1)$ & $(v_0, v_2, v_3)$ & $(v_1)$ \\
\hline
$f_2 = (2, 2)$ & $(v_0, v_1, v_3)$ & $(v_2)$ \\
\hline
$f_3 = (2, 3)$ & $(v_0, v_1, v_2)$ & $(v_3)$ \\
\hline
$c_0 = (3, 0)$ & $(v_0, v_1, v_2, v_3)$ & $\emptyset$ \\
\hline
\end{tabular}
\end{center}
\end{minipage}
\vfill
\newpage
\subsection{Numbering of mesh entities on hexahedral cells}
\begin{minipage}{\textwidth}
\small
\linespread{1.2}\selectfont
\begin{center}
\begin{tabular}{|c|c|c|}
\hline
Entity & Incident vertices & Non-incident vertices \\
\hline
\hline
$v_0 = (0, 0)$ & $(v_0)$ & $(v_1, v_2, v_3, v_4, v_5, v_6, v_7)$ \\
\hline
$v_1 = (0, 1)$ & $(v_1)$ & $(v_0, v_2, v_3, v_4, v_5, v_6, v_7)$ \\
\hline
$v_2 = (0, 2)$ & $(v_2)$ & $(v_0, v_1, v_3, v_4, v_5, v_6, v_7)$ \\
\hline
$v_3 = (0, 3)$ & $(v_3)$ & $(v_0, v_1, v_2, v_4, v_5, v_6, v_7)$ \\
\hline
$v_4 = (0, 4)$ & $(v_4)$ & $(v_0, v_1, v_2, v_3, v_5, v_6, v_7)$ \\
\hline
$v_5 = (0, 5)$ & $(v_5)$ & $(v_0, v_1, v_2, v_3, v_4, v_6, v_7)$ \\
\hline
$v_6 = (0, 6)$ & $(v_6)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_7)$ \\
\hline
$v_7 = (0, 7)$ & $(v_7)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_6)$ \\
\hline
$e_0 = (1, 0)$ & $(v_6, v_7)$ & $(v_0, v_1, v_2, v_3, v_4, v_5)$ \\
\hline
$e_1 = (1, 1)$ & $(v_5, v_6)$ & $(v_0, v_1, v_2, v_3, v_4, v_7)$ \\
\hline
$e_2 = (1, 2)$ & $(v_4, v_7)$ & $(v_0, v_1, v_2, v_3, v_5, v_6)$ \\
\hline
$e_3 = (1, 3)$ & $(v_4, v_5)$ & $(v_0, v_1, v_2, v_3, v_6, v_7)$ \\
\hline
$e_4 = (1, 4)$ & $(v_3, v_7)$ & $(v_0, v_1, v_2, v_4, v_5, v_6)$ \\
\hline
$e_5 = (1, 5)$ & $(v_2, v_6)$ & $(v_0, v_1, v_3, v_4, v_5, v_7)$ \\
\hline
$e_6 = (1, 6)$ & $(v_2, v_3)$ & $(v_0, v_1, v_4, v_5, v_6, v_7)$ \\
\hline
$e_7 = (1, 7)$ & $(v_1, v_5)$ & $(v_0, v_2, v_3, v_4, v_6, v_7)$ \\
\hline
$e_8 = (1, 8)$ & $(v_1, v_2)$ & $(v_0, v_3, v_4, v_5, v_6, v_7)$ \\
\hline
$e_9 = (1, 9)$ & $(v_0, v_4)$ & $(v_1, v_2, v_3, v_5, v_6, v_7)$ \\
\hline
$e_{10} = (1, 10)$ & $(v_0, v_3)$ & $(v_1, v_2, v_4, v_5, v_6, v_7)$ \\
\hline
$e_{11} = (1, 11)$ & $(v_0, v_1)$ & $(v_2, v_3, v_4, v_5, v_6, v_7)$ \\
\hline
$f_0 = (2, 0)$ & $(v_4, v_5, v_6, v_7)$ & $(v_0, v_1, v_2, v_3)$ \\
\hline
$f_1 = (2, 1)$ & $(v_2, v_3, v_6, v_7)$ & $(v_0, v_1, v_4, v_5)$ \\
\hline
$f_2 = (2, 2)$ & $(v_1, v_2, v_5, v_6)$ & $(v_0, v_3, v_4, v_7)$ \\
\hline
$f_3 = (2, 3)$ & $(v_0, v_3, v_4, v_7)$ & $(v_1, v_2, v_5, v_6)$ \\
\hline
$f_4 = (2, 4)$ & $(v_0, v_1, v_4, v_5)$ & $(v_2, v_3, v_6, v_7)$ \\
\hline
$f_5 = (2, 5)$ & $(v_0, v_1, v_2, v_3)$ & $(v_4, v_5, v_6, v_7)$ \\
\hline
$c_0 = (3, 0)$ & $(v_0, v_1, v_2, v_3, v_4, v_5, v_6, v_7)$ & $\emptyset$ \\
\hline
\end{tabular}
\end{center}
\end{minipage}
|
lemma ball_divide_subset: "d \<ge> 1 \<Longrightarrow> ball x (e/d) \<subseteq> ball x e" |
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft -------
cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft -------
cc - oct'13 -------
cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 -------
cc ---------------------------------------------------------------------
cc converted for use with FLUKA -------
cc - oct'13 -------
C...PYTUNE
C...Presets for a few specific underlying-event and min-bias tunes
C...Note some tunes require external pdfs to be linked (e.g. 105:QW),
C...others require particular versions of pythia (e.g. the SCI and GAL
C...models). See below for details.
SUBROUTINE PYTUNE(MYTUNE)
C
C ITUNE NAME (detailed descriptions below)
C 0 Default : No settings changed => defaults.
C
C ====== Old UE, Q2-ordered showers ====================================
C 100 A : Rick Field's CDF Tune A (Oct 2002)
C 101 AW : Rick Field's CDF Tune AW (Apr 2006)
C 102 BW : Rick Field's CDF Tune BW (Apr 2006)
C 103 DW : Rick Field's CDF Tune DW (Apr 2006)
C 104 DWT : As DW but with slower UE ECM-scaling (Apr 2006)
C 105 QW : Rick Field's CDF Tune QW using CTEQ6.1M (?)
C 106 ATLAS-DC2: Arthur Moraes' (old) ATLAS tune ("Rome") (?)
C 107 ACR : Tune A modified with new CR model (Mar 2007)
C 108 D6 : Rick Field's CDF Tune D6 using CTEQ6L1 (?)
C 109 D6T : Rick Field's CDF Tune D6T using CTEQ6L1 (?)
C ---- Professor Tunes : 110+ (= 100+ with Professor's tune to LEP) ----
C 110 A-Pro : Tune A, with LEP tune from Professor (Oct 2008)
C 111 AW-Pro : Tune AW, -"- (Oct 2008)
C 112 BW-Pro : Tune BW, -"- (Oct 2008)
C 113 DW-Pro : Tune DW, -"- (Oct 2008)
C 114 DWT-Pro : Tune DWT, -"- (Oct 2008)
C 115 QW-Pro : Tune QW, -"- (Oct 2008)
C 116 ATLAS-DC2-Pro: ATLAS-DC2 / Rome, -"- (Oct 2008)
C 117 ACR-Pro : Tune ACR, -"- (Oct 2008)
C 118 D6-Pro : Tune D6, -"- (Oct 2008)
C 119 D6T-Pro : Tune D6T, -"- (Oct 2008)
C ---- Professor's Q2-ordered Perugia Tune : 129 -----------------------
C 129 Pro-Q2O : Professor Q2-ordered tune (Feb 2009)
C ---- LHC tune variations on Pro-Q2O
C 136 Q12-F1 : Variation with wide fragmentation function (Mar 2012)
C 137 Q12-F2 : Variation with narrow fragmentation function (Mar 2012)
C
C ====== Intermediate and Hybrid Models ================================
C 200 IM 1 : Intermediate model: new UE, Q2-ord. showers, new CR
C 201 APT : Tune A w. pT-ordered FSR (Mar 2007)
C 211 APT-Pro : Tune APT, with LEP tune from Professor (Oct 2008)
C 221 Perugia APT : "Perugia" update of APT-Pro (Feb 2009)
C 226 Perugia APT6 : "Perugia" update of APT-Pro w. CTEQ6L1 (Feb 2009)
C
C ====== New UE, interleaved pT-ordered showers, annealing CR ==========
C 300 S0 : Sandhoff-Skands Tune using the S0 CR model (Apr 2006)
C 301 S1 : Sandhoff-Skands Tune using the S1 CR model (Apr 2006)
C 302 S2 : Sandhoff-Skands Tune using the S2 CR model (Apr 2006)
C 303 S0A : S0 with "Tune A" UE energy scaling (Apr 2006)
C 304 NOCR : New UE "best try" without col. rec. (Apr 2006)
C 305 Old : New UE, original (primitive) col. rec. (Aug 2004)
C 306 ATLAS-CSC: Arthur Moraes' (new) ATLAS tune w. CTEQ6L1 (?)
C ---- Professor Tunes : 310+ (= 300+ with Professor's tune to LEP)
C 310 S0-Pro : S0 with updated LEP pars from Professor (Oct 2008)
C 311 S1-Pro : S1 -"- (Oct 2008)
C 312 S2-Pro : S2 -"- (Oct 2008)
C 313 S0A-Pro : S0A -"- (Oct 2008)
C 314 NOCR-Pro : NOCR -"- (Oct 2008)
C 315 Old-Pro : Old -"- (Oct 2008)
C 316 ATLAS MC08 : pT-ordered showers, CTEQ6L1 (2008)
C ---- Peter's Perugia Tunes : 320+ ------------------------------------
C 320 Perugia 0 : "Perugia" update of S0-Pro (Feb 2009)
C 321 Perugia HARD : More ISR, More FSR, Less MPI, Less BR, Less HAD
C 322 Perugia SOFT : Less ISR, Less FSR, More MPI, More BR, More HAD
C 323 Perugia 3 : Alternative to Perugia 0, with different ISR/MPI
C balance & different scaling to LHC & RHIC (Feb 2009)
C 324 Perugia NOCR : "Perugia" update of NOCR-Pro (Feb 2009)
C 325 Perugia * : "Perugia" Tune w. (external) MRSTLO* PDFs (Feb 2009)
C 326 Perugia 6 : "Perugia" Tune w. (external) CTEQ6L1 PDFs (Feb 2009)
C 327 Perugia 10: Alternative to Perugia 0, with more FSR (May 2010)
C off ISR, more BR breakup, more strangeness
C 328 Perugia K : Alternative to Perugia 2010, with a (May 2010)
C K-factor applied to MPI cross sections
C ---- Professor's pT-ordered Perugia Tune : 329 -----------------------
C 329 Pro-pTO : Professor pT-ordered tune w. S0 CR model (Feb 2009)
C ---- Tunes introduced in 6.4.23:
C 330 ATLAS MC09 : pT-ordered showers, LO* PDFs (2009)
C 331 ATLAS MC09c : pT-ordered showers, LO* PDFs, better CR (2009)
C 334 Perugia 10 NOCR : Perugia 2010 with no CR, less MPI (Oct 2010)
C 335 Pro-pT* : Professor Tune with LO* (Mar 2009)
C 336 Pro-pT6 : Professor Tune with CTEQ6LL (Mar 2009)
C 339 Pro-pT** : Professor Tune with LO** (Mar 2009)
C 340 AMBT1 : First ATLAS tune including 7 TeV data (May 2010)
C 341 Z1 : First CMS tune including 7 TeV data (Aug 2010)
C 342 Z1-LEP : CMS tune Z1, with improved LEP parameters (Oct 2010)
C 343 Z2 : Retune of Z1 by Field w CTEQ6L1 PDFs (2010)
C 344 Z2-LEP : Retune of Z1 by Skands w CTEQ6L1 PDFs (Feb 2011)
C 345 AMBT2B-CT6L : 2nd ATLAS MB tune, vers 'B', w CTEQ6L1 (Jul 2011)
C 346 AUET2B-CT6L : UE tune accompanying AMBT2B (Jul 2011)
C 347 AUET2B-CT66 : AUET2 with CTEQ 6.6 NLO PDFs (Nov 2011)
C 348 AUET2B-CT10 : AUET2 with CTEQ 10 NLO PDFs (Nov 2011)
C 349 AUET2B-NN21 : AUET2 with NNPDF 2.1 NLO PDFs (Nov 2011)
C 350 Perugia 2011 : Retune of Perugia 2010 incl 7-TeV data (Mar 2011)
C 351 P2011 radHi : Variation with alphaS(pT/2)
C 352 P2011 radLo : Variation with alphaS(2pT)
C 353 P2011 mpiHi : Variation with more semi-hard MPI
C 354 P2011 noCR : Variation without color reconnections
C 355 P2011 LO** : Perugia 2011 using MSTW LO** PDFs (Mar 2011)
C 356 P2011 C6 : Perugia 2011 using CTEQ6L1 PDFs (Mar 2011)
C 357 P2011 T16 : Variation with PARP(90)=0.32 away from 7 TeV
C 358 P2011 T32 : Variation with PARP(90)=0.16 awat from 7 TeV
C 359 P2011 TeV : Perugia 2011 optimized for Tevatron (Mar 2011)
C 360 S Global : Schulz-Skands Global fit (Mar 2011)
C 361 S 7000 : Schulz-Skands at 7000 GeV (Mar 2011)
C 362 S 1960 : Schulz-Skands at 1960 GeV (Mar 2011)
C 363 S 1800 : Schulz-Skands at 1800 GeV (Mar 2011)
C 364 S 900 : Schulz-Skands at 900 GeV (Mar 2011)
C 365 S 630 : Schulz-Skands at 630 GeV (Mar 2011)
C
C 370 P12 : Retune of Perugia 2011 w CTEQ6L1 (Oct 2012)
C 371 P12-radHi : Variation with alphaS(pT/2)
C 372 P12-radLo : Variation with alphaS(2pT)
C 373 P12-mpiHi : Variation with more semi-hard MPI -> more UE
C 374 P12-loCR : Variation using lower CR strength -> more Nch
C 375 P12-noCR : Variation without any color reconnections
C 376 P12-FL : Variation with more longitudinal fragmentation
C 377 P12-FT : Variation with more transverse fragmentation
C 378 P12-M8LO : Variation using MSTW 2008 LO PDFs
C 379 P12-LO** : Variation using MRST LO** PDFs
C ======= The Uppsala models ===========================================
C 1201 SCI 0 : Soft-Colour-Interaction model. Org pars (Dec 1998)
C 1202 SCI 1 : SCI 0. Tevatron MB retuned (Skands) (Oct 2006)
C 1401 GAL 0 : Generalized area-law model. Org pars (Dec 1998)
C 1402 GAL 1 : GAL 0. Tevatron MB retuned (Skands) (Oct 2006)
C
C More details;
C
C Quick Dictionary:
C BE : Bose-Einstein
C BR : Beam Remnants
C CR : Colour Reconnections
C HAD: Hadronization
C ISR/FSR: Initial-State Radiation / Final-State Radiation
C FSI: Final-State Interactions (=CR+BE)
C MB : Minimum-bias
C MI : Multiple Interactions
C UE : Underlying Event
C
C=======================================================================
C TUNES OF OLD FRAMEWORK (Q2-ORDERED ISR AND FSR, NON-INTERLEAVED UE)
C=======================================================================
C
C A (100) and AW (101). CTEQ5L parton distributions
C...*** NB : SHOULD BE RUN WITH PYTHIA 6.2 (e.g. 6.228) ***
C...*** CAN ALSO BE RUN WITH PYTHIA 6.406+
C...Key feature: extensively compared to CDF data (R.D. Field).
C...* Large starting scale for ISR (PARP(67)=4)
C...* AW has even more radiation due to smaller mu_R choice in alpha_s.
C...* See: http://www.phys.ufl.edu/~rfield/cdf/
C
C BW (102). CTEQ5L parton distributions
C...*** NB : SHOULD BE RUN WITH PYTHIA 6.2 (e.g. 6.228) ***
C...*** CAN ALSO BE RUN WITH PYTHIA 6.406+
C...Key feature: extensively compared to CDF data (R.D. Field).
C...NB: Can also be run with Pythia 6.2 or 6.312+
C...* Small starting scale for ISR (PARP(67)=1)
C...* BW has more radiation due to smaller mu_R choice in alpha_s.
C...* See: http://www.phys.ufl.edu/~rfield/cdf/
C
C DW (103) and DWT (104). CTEQ5L parton distributions
C...*** NB : SHOULD BE RUN WITH PYTHIA 6.2 (e.g. 6.228) ***
C...*** CAN ALSO BE RUN WITH PYTHIA 6.406+
C...Key feature: extensively compared to CDF data (R.D. Field).
C...NB: Can also be run with Pythia 6.2 or 6.312+
C...* Intermediate starting scale for ISR (PARP(67)=2.5)
C...* DWT has a different reference energy, the same as the "S" models
C... below, leading to more UE activity at the LHC, but less at RHIC.
C...* See: http://www.phys.ufl.edu/~rfield/cdf/
C
C QW (105). CTEQ61 parton distributions
C...*** NB : SHOULD BE RUN WITH PYTHIA 6.2 (e.g. 6.228) ***
C...*** CAN ALSO BE RUN WITH PYTHIA 6.406+
C...Key feature: uses CTEQ61 (external pdf library must be linked)
C
C ATLAS-DC2 (106). CTEQ5L parton distributions
C...*** NB : SHOULD BE RUN WITH PYTHIA 6.2 (e.g. 6.228) ***
C...*** CAN ALSO BE RUN WITH PYTHIA 6.406+
C...Key feature: tune used by the ATLAS collaboration.
C
C ACR (107). CTEQ5L parton distributions
C...*** NB : SHOULD BE RUN WITH PYTHIA 6.412+ ***
C...Key feature: Tune A modified to use annealing CR.
C...NB: PARP(85)=0D0 and amount of CR is regulated by PARP(78).
C
C D6 (108) and D6T (109). CTEQ6L parton distributions
C...Key feature: Like DW and DWT but retuned to use CTEQ6L PDFs.
C
C A-Pro, BW-Pro, etc (111, 112, etc). CTEQ5L parton distributions
C Old UE model, Q2-ordered showers.
C...Key feature: Rick Field's family of tunes revamped with the
C...Professor Q2-ordered final-state shower and fragmentation tunes
C...presented by Hendrik Hoeth at the Perugia MPI workshop in Oct 2008.
C...Key feature: improved descriptions of LEP data.
C
C Pro-Q2O (129). CTEQ5L parton distributions
C Old UE model, Q2-ordered showers.
C...Key feature: Complete retune of old model by Professor, including
C...large amounts of both LEP and Tevatron data.
C...Note that PARP(64) (ISR renormalization scale pre-factor) is quite
C...extreme in this tune, corresponding to using mu_R = pT/3 .
C
C=======================================================================
C INTERMEDIATE/HYBRID TUNES (MIX OF NEW AND OLD SHOWER AND UE MODELS)
C=======================================================================
C
C IM1 (200). Intermediate model, Q2-ordered showers,
C CTEQ5L parton distributions
C...Key feature: new UE model w Q2-ordered showers and no interleaving.
C...* "Rap" tune of hep-ph/0402078, modified with new annealing CR.
C...* See: Sjostrand & Skands: JHEP 03(2004)053, hep-ph/0402078.
C
C APT (201). Old UE model, pT-ordered final-state showers,
C CTEQ5L parton distributions
C...Key feature: Rick Field's Tune A, but with new final-state showers
C
C APT-Pro (211). Old UE model, pT-ordered final-state showers,
C CTEQ5L parton distributions
C...Key feature: APT revamped with the Professor pT-ordered final-state
C...shower and fragmentation tunes presented by Hendrik Hoeth at the
C...Perugia MPI workshop in October 2008.
C
C Perugia-APT (221). Old UE model, pT-ordered final-state showers,
C CTEQ5L parton distributions
C...Key feature: APT-Pro with final-state showers off the MPI,
C...lower ISR renormalization scale to improve agreement with the
C...Tevatron Drell-Yan pT measurements and with improved energy scaling
C...to min-bias at 630 GeV.
C
C Perugia-APT6 (226). Old UE model, pT-ordered final-state showers,
C CTEQ6L1 parton distributions.
C...Key feature: uses CTEQ6L1 (external pdf library must be linked),
C...with a slightly lower pT0 (2.0 instead of 2.05) due to the smaller
C...UE activity obtained with CTEQ6L1 relative to CTEQ5L.
C
C=======================================================================
C TUNES OF NEW FRAMEWORK (PT-ORDERED ISR AND FSR, INTERLEAVED UE)
C=======================================================================
C
C S0 (300) and S0A (303). CTEQ5L parton distributions
C...Key feature: large amount of multiple interactions
C...* Somewhat faster than the other colour annealing scenarios.
C...* S0A has a faster energy scaling of the UE IR cutoff, borrowed
C... from Tune A, leading to less UE at the LHC, but more at RHIC.
C...* Small amount of radiation.
C...* Large amount of low-pT MI
C...* Low degree of proton lumpiness (broad matter dist.)
C...* CR Type S (driven by free triplets), of medium strength.
C...* See: Pythia6402 update notes or later.
C
C S1 (301). CTEQ5L parton distributions
C...Key feature: large amount of radiation.
C...* Large amount of low-pT perturbative ISR
C...* Large amount of FSR off ISR partons
C...* Small amount of low-pT multiple interactions
C...* Moderate degree of proton lumpiness
C...* Least aggressive CR type (S+S Type I), but with large strength
C...* See: Sandhoff & Skands: FERMILAB-CONF-05-518-T, in hep-ph/0604120.
C
C S2 (302). CTEQ5L parton distributions
C...Key feature: very lumpy proton + gg string cluster formation allowed
C...* Small amount of radiation
C...* Moderate amount of low-pT MI
C...* High degree of proton lumpiness (more spiky matter distribution)
C...* Most aggressive CR type (S+S Type II), but with small strength
C...* See: Sandhoff & Skands: FERMILAB-CONF-05-518-T, in hep-ph/0604120.
C
C NOCR (304). CTEQ5L parton distributions
C...Key feature: no colour reconnections (NB: "Best fit" only).
C...* NB: <pT>(Nch) problematic in this tune.
C...* Small amount of radiation
C...* Small amount of low-pT MI
C...* Low degree of proton lumpiness
C...* Large BR composite x enhancement factor
C...* Most clever colour flow without CR ("Lambda ordering")
C
C ATLAS-CSC (306). CTEQ6L parton distributions
C...Key feature: 11-parameter ATLAS tune of the new framework.
C...* Old (pre-annealing) colour reconnections a la 305.
C...* Uses CTEQ6 Leading Order PDFs (must be interfaced externally)
C
C S0-Pro, S1-Pro, etc (310, 311, etc). CTEQ5L parton distributions.
C...Key feature: the S0 family of tunes revamped with the Professor
C...pT-ordered final-state shower and fragmentation tunes presented by
C...Hendrik Hoeth at the Perugia MPI workshop in October 2008.
C...Key feature: improved descriptions of LEP data.
C
C ATLAS MC08 (316). CTEQ6L1 parton distributions
C...Key feature: ATLAS tune of the new framework using CTEQ6L1 PDFs
C...* Warning: uses Peterson fragmentation function for heavy quarks
C...* Uses CTEQ6 Leading Order PDFs (must be interfaced externally)
C
C Perugia-0 (320). CTEQ5L parton distributions.
C...Key feature: S0-Pro retuned to more Tevatron data. Better Drell-Yan
C...pT spectrum, better <pT>(Nch) in min-bias, and better scaling to
C...630 GeV than S0-Pro. Also has a slightly smoother mass profile, more
C...beam-remnant breakup (more baryon number transport), and suppression
C...of CR in high-pT string pieces.
C
C Perugia-HARD (321). CTEQ5L parton distributions.
C...Key feature: More ISR, More FSR, Less MPI, Less BR
C...Uses pT/2 as argument of alpha_s for ISR, and a higher Lambda_FSR.
C...Has higher pT0, less intrinsic kT, less beam remnant breakup (less
C...baryon number transport), and more fragmentation pT.
C...Multiplicity in min-bias is LOW, <pT>(Nch) is HIGH,
C...DY pT spectrum is HARD.
C
C Perugia-SOFT (322). CTEQ5L parton distributions.
C...Key feature: Less ISR, Less FSR, More MPI, More BR
C...Uses sqrt(2)*pT as argument of alpha_s for ISR, and a lower
C...Lambda_FSR. Has lower pT0, more beam remnant breakup (more baryon
C...number transport), and less fragmentation pT.
C...Multiplicity in min-bias is HIGH, <pT>(Nch) is LOW,
C...DY pT spectrum is SOFT
C
C Perugia-3 (323). CTEQ5L parton distributions.
C...Key feature: variant of Perugia-0 with more extreme energy scaling
C...properties while still agreeing with Tevatron data from 630 to 1960.
C...More ISR and less MPI than Perugia-0 at the Tevatron and above and
C...allows FSR off the active end of dipoles stretched to the remnant.
C
C Perugia-NOCR (324). CTEQ5L parton distributions.
C...Key feature: Retune of NOCR-Pro with better scaling properties to
C...lower energies and somewhat better agreement with Tevatron data
C...at 1800/1960.
C
C Perugia-* (325). MRST LO* parton distributions for generators
C...Key feature: first attempt at using the LO* distributions
C...(external pdf library must be linked).
C
C Perugia-6 (326). CTEQ6L1 parton distributions
C...Key feature: uses CTEQ6L1 (external pdf library must be linked).
C
C Perugia-2010 (327). CTEQ5L parton distributions
C...Key feature: Retune of Perugia 0 to attempt to better describe
C...strangeness yields at RHIC and at LEP. Also increased the amount
C...of FSR off ISR following the conclusions in arXiv:1001.4082.
C...Increased the amount of beam blowup, causing more baryon transport
C...into the detector, to further explore this possibility. Using
C...a new color-reconnection model that relies on determining a thrust
C...axis for the events and then computing reconnection probabilities for
C...the individual string pieces based on the actual string densities
C...per rapidity interval along that thrust direction.
C
C Perugia-K (328). CTEQ5L parton distributions
C...Key feature: uses a ``K'' factor on the MPI cross sections
C...This gives a larger rate of minijets and pushes the underlying-event
C...activity towards higher pT. To compensate for the increased activity
C...at higher pT, the infared regularization scale is larger for this tune.
C
C Pro-pTO (329). CTEQ5L parton distributions
C...Key feature: Complete retune of new model by Professor, including
C...large amounts of both LEP and Tevatron data. Similar to S0A-Pro.
C
C ATLAS MC09 (330). LO* parton distributions
C...Key feature: Good overall agreement with Tevatron and early LHC data.
C...Similar to Perugia *.
C
C ATLAS MC09c (331). LO* parton distributions
C...Key feature: Good overall agreement with Tevatron and 900-GeV LHC data.
C...Similar to Perugia *. Retuned CR model with respect to MC09.
C
C Pro-pT* (335) LO* parton distributions
C...Key feature: Retune of Pro-PTO with MRST LO* PDFs.
C
C Pro-pT6 (336). CTEQ6L1 parton distributions
C...Key feature: Retune of Pro-PTO with CTEQ6L1 PDFs.
C
C Pro-pT** (339). LO** parton distributions
C...Key feature: Retune of Pro-PTO with MRST LO** PDFs.
C
C AMBT1 (340). LO* parton distributions
C...Key feature: First ATLAS tune including 7-TeV LHC data.
C...Mainly retuned CR and mass distribution with respect to MC09c.
C...Note: cannot be run standalone since it uses external PDFs.
C
C CMSZ1 (341). CTEQ5L parton distributions
C...Key feature: First CMS tune including 7-TeV LHC data.
C...Uses many of the features of AMBT1, but uses CTEQ5L PDFs,
C...has a lower pT0 at the Tevatron, which scales faster with energy.
C
C Z1-LEP (342). CTEQ5L parton distributions
C...Key feature: CMS tune Z1 with improved LEP parameters, mostly
C...taken from the Professor/Perugia tunes, with a few minor updates.
C
C=======================================================================
C OTHER TUNES
C=======================================================================
C
C...The GAL and SCI models (400+) are special and *SHOULD NOT* be run
C...with an unmodified Pythia distribution.
C...See http://www.isv.uu.se/thep/MC/scigal/ for more information.
C
C ::: + Future improvements?
C Include also QCD K-factor a la M. Heinz / ATLAS TDR ? RDF's QK?
C (problem: K-factor affects everything so only works as
C intended for min-bias, not for UE ... probably need a
C better long-term solution to handle UE as well. Anyway,
C Mark uses MSTP(33) and PARP(31)-PARP(33).)
C...Global statements
IMPLICIT DOUBLE PRECISION(A-H, O-Z)
C...Commonblocks.
include 'inc/pydat1'
include 'inc/pypars'
C...Internal parameters
PARAMETER(MXTUNS=500)
CHARACTER*8 CHDOC
PARAMETER (CHDOC='Oct 2012')
CHARACTER*16 CHNAMS(0:MXTUNS), CHNAME
C unvar CHARACTER*42 CHMSTJ(50), CHMSTP(100), CHPARP(100),
C unvar & CHPARJ(100), CHMSTU(101:121), CHPARU(101:121), CH40
CHARACTER*42 CHMSTJ(50), CHMSTP(100), CHPARP(100),
& CHPARJ(100), CHMSTU(101:121), CHPARU(101:121)
CHARACTER*60 CH60
CHARACTER*70 CH70
DATA (CHNAMS(I),I=0,1)/'Default',' '/
DATA (CHNAMS(I),I=100,119)/
& 'Tune A','Tune AW','Tune BW','Tune DW','Tune DWT','Tune QW',
& 'ATLAS DC2','Tune ACR','Tune D6','Tune D6T',
1 'Tune A-Pro','Tune AW-Pro','Tune BW-Pro','Tune DW-Pro',
1 'Tune DWT-Pro','Tune QW-Pro','ATLAS DC2-Pro','Tune ACR-Pro',
1 'Tune D6-Pro','Tune D6T-Pro'/
DATA (CHNAMS(I),I=120,129)/
& 9*' ','Pro-Q2O'/
DATA (CHNAMS(I),I=130,139)/
& 'Q12','Q12-radHi','Q12-radLo','Q12-mpiHi','Q12-noCR',
& 'Q12-M','Q12-F1','Q12-F2','Q12-LE','Q12-TeV'/
DATA (CHNAMS(I),I=300,309)/
& 'Tune S0','Tune S1','Tune S2','Tune S0A','NOCR','Old',
5 'ATLAS-CSC Tune','Yale Tune','Yale-K Tune',' '/
DATA (CHNAMS(I),I=310,316)/
& 'Tune S0-Pro','Tune S1-Pro','Tune S2-Pro','Tune S0A-Pro',
& 'NOCR-Pro','Old-Pro','ATLAS MC08'/
DATA (CHNAMS(I),I=320,329)/
& 'Perugia 0','Perugia HARD','Perugia SOFT',
& 'Perugia 3','Perugia NOCR','Perugia LO*',
& 'Perugia 6','Perugia 10','Perugia K','Pro-pTO'/
DATA (CHNAMS(I),I=330,349)/
& 'ATLAS MC09','ATLAS MC09c',2*' ','Perugia 10 NOCR','Pro-PT*',
& 'Pro-PT6',' ',' ','Pro-PT**',
4 'Tune AMBT1','Tune Z1','Tune Z1-LEP','Tune Z2','Tune Z2-LEP',
4 'AMBT2B-CT6L1','AUET2B-CT6L1','AUET2B-CT66','AUET2B-CT10',
4 'AUET2B-NN21'/
DATA (CHNAMS(I),I=350,359)/
& 'Perugia 2011','P2011 radHi','P2011 radLo','P2011 mpiHi',
& 'P2011 noCR','P2011 M(LO**)', 'P2011 CTEQ6L1',
& 'P2011 T16','P2011 T32','P2011 Tevatron'/
DATA (CHNAMS(I),I=360,369)/
& 'S Global','S 7000','S 1960','S 1800',
& 'S 900','S 630', 4*' '/
DATA (CHNAMS(I),I=370,379)/
& 'P12','P12-radHi','P12-radLo','P12-mpiHi','P12-loCR',
& 'P12-noCR','P12-FL','P12-FT','P12-M8LO','P12-LO**'/
DATA (CHNAMS(I),I=200,229)/
& 'IM Tune 1','Tune APT',8*' ',
& ' ','Tune APT-Pro',8*' ',
& ' ','Perugia APT',4*' ','Perugia APT6',3*' '/
DATA (CHNAMS(I),I=400,409)/
& 'GAL Tune 0','SCI Tune 0','GAL Tune 1','SCI Tune 1',6*' '/
DATA (CHMSTJ(I),I=11,20)/
& 'HAD choice of fragmentation function(s)',4*' ',
& 'HAD treatment of small-mass systems',4*' '/
DATA (CHMSTJ(I),I=41,50)/
& 'FSR type (Q2 or pT) for old framework',9*' '/
DATA (CHMSTP(I),I=1,10)/
& 2*' ','INT switch for choice of LambdaQCD',7*' '/
DATA (CHMSTP(I),I=31,40)/
& 2*' ','"K" switch for K-factor on/off & type',7*' '/
DATA (CHMSTP(I),I=51,100)/
5 'PDF set','PDF set internal (=1) or pdflib (=2)',8*' ',
6 'ISR master switch',2*' ','ISR alphaS type',2*' ',
6 'ISR coherence option for 1st emission',
6 'ISR phase space choice & ME corrections',' ',
7 'ISR IR regularization scheme',' ',
7 'IFSR scheme for non-decay FSR',8*' ',
8 'UE model',
8 'UE hadron transverse mass distribution',5*' ',
8 'BR composite scheme','BR color scheme',
9 'BR primordial kT compensation',
9 'BR primordial kT distribution',
9 'BR energy partitioning scheme',2*' ',
9 'FSI color (re-)connection model',5*' '/
DATA (CHPARP(I),I=1,10)/
& 'ME/UE LambdaQCD',9*' '/
DATA (CHPARP(I),I=31,40)/
& ' ','"K" K-factor',8*' '/
DATA (CHPARP(I),I=61,100)/
6 'ISR LambdaQCD','ISR IR cutoff',' ',
6 'ISR renormalization scale prefactor',
6 2*' ','ISR Q2max factor',3*' ',
7 'IFSR Q2max factor in non-s-channel procs',
7 'IFSR LambdaQCD (outside resonance decays)',4*' ',
7 'FSI color reco high-pT damping strength',
7 'FSI color reconnection strength',
7 'BR composite x enhancement','BR breakup suppression',
8 2*'UE IR cutoff at reference ecm',
8 2*'UE mass distribution parameter',
8 'UE gg color correlated fraction','UE total gg fraction',
8 2*' ',
8 'UE IR cutoff reference ecm',
8 'UE IR cutoff ecm scaling power',
9 'BR primordial kT width <|kT|>',' ',
9 'BR primordial kT UV cutoff',7*' '/
DATA (CHPARJ(I),I=1,30)/
& 'HAD diquark suppression','HAD strangeness suppression',
& 'HAD strange diquark suppression',
& 'HAD vector diquark suppression','HAD P(popcorn)',
& 'HAD extra popcorn B(s)-M-B(s) supp',
& 'HAD extra popcorn B-M(s)-B supp',
& 3*' ',
1 'HAD P(vector meson), u and d only',
1 'HAD P(vector meson), contains s',
1 'HAD P(vector meson), heavy quarks',7*' ',
2 'HAD fragmentation pT',' ',' ',' ',
2 'HAD eta0 suppression',"HAD eta0' suppression",4*' '/
DATA (CHPARJ(I),I=41,90)/
4 'HAD string parameter a(Meson)','HAD string parameter b',
4 2*' ','HAD string a(Baryon)-a(Meson)',
4 'HAD Lund(=0)-Bowler(=1) rQ (rc)',
4 'HAD Lund(=0)-Bowler(=1) rb',3*' ',
5 3*' ', 'HAD charm parameter','HAD bottom parameter',5*' ',
6 10*' ',10*' ',
8 'FSR LambdaQCD (inside resonance decays)',
& 'FSR IR cutoff',8*' '/
DATA (CHMSTU(I),I=111,120)/
1 ' ','INT n(flavors) for LambdaQCD',8*' '/
DATA (CHPARU(I),I=111,120)/
1 ' ','INT LambdaQCD',8*' '/
C...1) Shorthand notation
M13=MSTU(13)
M11=MSTU(11)
IF (MYTUNE.LE.MXTUNS.AND.MYTUNE.GE.0) THEN
CHNAME=CHNAMS(MYTUNE)
IF (MYTUNE.EQ.0) GOTO 9999
ELSE
CALL PYERRM(9,'(PYTUNE:) Tune number > max. Using defaults.')
GOTO 9999
ENDIF
C...2) Hello World
IF (M13.GE.1) WRITE(M11,5000) CHDOC
C...Hardcode some defaults
C...Get Lambda from PDF
MSTP(3) = 2
C...CTEQ5L1 PDFs
MSTP(52) = 1
MSTP(51) = 7
C... No K-factor
MSTP(33) = 0
C...3) Tune parameters
ITUNE = MYTUNE
C=======================================================================
C...ATLAS MC08
IF (ITUNE.EQ.316) THEN
IF (M13.GE.1) WRITE(M11,5010) ITUNE, CHNAME
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.405))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ENDIF
C...First set some explicit defaults from 6.4.20
C...# Old defaults
MSTJ(11) = 4
C...# Old default flavour parameters
PARJ(1) = 0.1
PARJ(2) = 0.3
PARJ(3) = 0.40
PARJ(4) = 0.05
PARJ(11) = 0.5
PARJ(12) = 0.6
PARJ(21) = 0.36
PARJ(41) = 0.30
PARJ(42) = 0.58
PARJ(46) = 1.0
PARJ(82) = 1.0
C...PDFs: CTEQ6L1 for 326
MSTP(52)=2
MSTP(51)=10042
C...UE and ISR switches
MSTP(81)=21
MSTP(82)=4
MSTP(70)=0
MSTP(72)=1
C...CR:
MSTP(95)=2
PARP(78)=0.3
PARP(77)=0.0
PARP(80)=0.1
C...Primordial kT
PARP(91)=2.0D0
PARP(93)=5.0D0
C...MPI:
PARP(82)=2.1
PARP(83)=0.8
PARP(84)=0.7
PARP(89)=1800.0
PARP(90)=0.16
C...FSR inside resonance decays
PARJ(81)=0.29
C...Fragmentation (warning: uses Peterson)
MSTJ(11)=3
PARJ(54)=-0.07
PARJ(55)=-0.006
IF (M13.GE.1) THEN
CH60='Tuned by ATLAS, ATL-PHYS-PUB-2010-002'
WRITE(M11,5030) CH60
CH60='Physics model: '//
& 'T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
CH60='CR by P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
C...Output
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
IF (MSTP(70).EQ.0) THEN
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
ENDIF
WRITE(M11,5040) 64, MSTP(64), CHMSTP(64)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5040) 67, MSTP(67), CHMSTP(67)
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5040) 70, MSTP(70), CHMSTP(70)
WRITE(M11,5040) 72, MSTP(72), CHMSTP(72)
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
WRITE(M11,5060) 82, PARJ(82), CHPARJ(82)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
WRITE(M11,5040) 88, MSTP(88), CHMSTP(88)
WRITE(M11,5040) 89, MSTP(89), CHMSTP(89)
WRITE(M11,5050) 79, PARP(79), CHPARP(79)
WRITE(M11,5050) 80, PARP(80), CHPARP(80)
WRITE(M11,5040) 91, MSTP(91), CHMSTP(91)
WRITE(M11,5050) 91, PARP(91), CHPARP(91)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
IF (MSTP(95).GE.1) THEN
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
IF (MSTP(95).GE.2) WRITE(M11,5050) 77, PARP(77), CHPARP(77)
ENDIF
ENDIF
C=======================================================================
C...ATLAS MC09, MC09c, AMBT1, AMBT2B, AUET2B + NLO PDF vars
C...CMS Z1 (R. Field), Z1-LEP
ELSEIF (ITUNE.EQ.330.OR.ITUNE.EQ.331.OR.ITUNE.EQ.340.OR.
& ITUNE.GE.341.AND.ITUNE.LE.349) THEN
IF (M13.GE.1) WRITE(M11,5010) ITUNE, CHNAME
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.405))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ENDIF
C...pT-ordered shower default for everything
MSTJ(41) = 12
C...FSR inside resonance decays, base value (modified by individual tunes)
PARJ(81) = 0.29
C...First set some explicit defaults from 6.4.20
IF (ITUNE.LE.341.OR.ITUNE.EQ.343) THEN
C... # Old defaults
MSTJ(11) = 4
C...# Old default flavour parameters
PARJ(1) = 0.1
PARJ(2) = 0.3
PARJ(3) = 0.40
PARJ(4) = 0.05
PARJ(11) = 0.5
PARJ(12) = 0.6
PARJ(21) = 0.36
PARJ(41) = 0.30
PARJ(42) = 0.58
PARJ(46) = 1.0
PARJ(82) = 1.0
ELSE IF (ITUNE.LE.344) THEN
C...# For Zn-LEP tunes, use tuned flavour parameters from Professor/Perugia
PARJ( 1) = 0.08D0
PARJ( 2) = 0.21D0
PARJ( 3) = 0.94
PARJ( 4) = 0.04D0
PARJ(11) = 0.35D0
PARJ(12) = 0.35D0
PARJ(13) = 0.54
PARJ(25) = 0.63
PARJ(26) = 0.12
C...# Switch on Bowler:
MSTJ(11) = 5
C...# Fragmentation
PARJ(21) = 0.34D0
PARJ(41) = 0.35D0
PARJ(42) = 0.80D0
PARJ(47) = 1.0
PARJ(81) = 0.26D0
PARJ(82) = 1.0D0
ELSE
C... A*T2 tunes, from ATL-PHYS-PUB-2011-008
PARJ( 1) = 0.073
PARJ( 2) = 0.202
PARJ( 3) = 0.950
PARJ( 4) = 0.033
PARJ(11) = 0.309
PARJ(12) = 0.402
PARJ(13) = 0.544
PARJ(25) = 0.628
PARJ(26) = 0.129
C...# Switch on Bowler:
MSTJ(11) = 5
C... # Fragmentation
PARJ(21) = 0.30
PARJ(41) = 0.368
PARJ(42) = 1.004
PARJ(47) = 0.873
PARJ(81) = 0.256
PARJ(82) = 0.830
ENDIF
C...Default scales and alphaS choices
IF (ITUNE.GE.345) THEN
MSTP(3) = 1
PARU(112) = 0.192
PARP(1) = 0.192
PARP(61) = 0.192
ENDIF
C...PDFs: MRST LO*
MSTP(52) = 2
MSTP(51) = 20650
IF (ITUNE.EQ.341.OR.ITUNE.EQ.342) THEN
C...Z1 uses CTEQ5L
MSTP(52) = 1
MSTP(51) = 7
ELSEIF (ITUNE.EQ.343.OR.ITUNE.EQ.344) THEN
C...Z2 uses CTEQ6L
MSTP(52) = 2
MSTP(51) = 10042
ELSEIF (ITUNE.EQ.345.OR.ITUNE.EQ.346) THEN
C...AMBT2B, AUET2B use CTEQ6L1
MSTP(52) = 2
MSTP(51) = 10042
ELSEIF (ITUNE.EQ.347) THEN
C...AUET2B-CT66 uses CTEQ66 NLO PDFs
MSTP(52) = 2
MSTP(51) = 10550
ELSEIF (ITUNE.EQ.348) THEN
C...AUET2B-CT10 uses CTEQ10 NLO PDFs
MSTP(52) = 2
MSTP(51) = 10800
ELSEIF (ITUNE.EQ.349) THEN
C...AUET2B-NN21 uses NNPDF 2.1 NLO PDF
MSTP(52) = 2
MSTP(51) = 192800
ENDIF
C...UE and ISR switches
MSTP(81) = 21
MSTP(82) = 4
MSTP(70) = 0
MSTP(72) = 1
C...CR:
MSTP(95) = 6
PARP(78) = 0.3
PARP(77) = 0.0
PARP(80) = 0.1
IF (ITUNE.EQ.331) THEN
PARP(78) = 0.224
ELSEIF (ITUNE.EQ.340) THEN
C...AMBT1
PARP(77) = 1.016D0
PARP(78) = 0.538D0
ELSEIF (ITUNE.GE.341.AND.ITUNE.LE.344) THEN
C...Z1 and Z2 use the AMBT1 CR values
PARP(77) = 1.016D0
PARP(78) = 0.538D0
ELSEIF (ITUNE.EQ.345) THEN
C...AMBT2B
PARP(77) = 0.357D0
PARP(78) = 0.235D0
ELSEIF (ITUNE.EQ.346) THEN
C...AUET2B
PARP(77) = 0.491D0
PARP(78) = 0.311D0
ELSEIF (ITUNE.EQ.347) THEN
C...AUET2B-CT66
PARP(77) = 0.505D0
PARP(78) = 0.385D0
ELSEIF (ITUNE.EQ.348) THEN
C...AUET2B-CT10
PARP(77) = 0.125D0
PARP(78) = 0.309D0
ELSEIF (ITUNE.EQ.349) THEN
C...AUET2B-NN21
PARP(77) = 0.498D0
PARP(78) = 0.354D0
ENDIF
C...MPI:
PARP(82) = 2.3
PARP(83) = 0.8
PARP(84) = 0.7
PARP(89) = 1800.0
PARP(90) = 0.25
IF (ITUNE.EQ.331) THEN
PARP(82) = 2.315
PARP(90) = 0.2487
ELSEIF (ITUNE.EQ.340) THEN
PARP(82) = 2.292D0
PARP(83) = 0.356D0
PARP(84) = 0.651
PARP(90) = 0.25D0
ELSEIF (ITUNE.EQ.341.OR.ITUNE.EQ.342) THEN
PARP(82) = 1.932D0
PARP(83) = 0.356D0
PARP(84) = 0.651
PARP(90) = 0.275D0
ELSEIF (ITUNE.EQ.343.OR.ITUNE.EQ.344) THEN
PARP(82) = 1.832D0
PARP(83) = 0.356D0
PARP(84) = 0.651
PARP(90) = 0.275D0
ELSEIF (ITUNE.EQ.345) THEN
PARP(82) = 2.34
PARP(83) = 0.356
PARP(84) = 0.605
PARP(90) = 0.246
ELSEIF (ITUNE.EQ.346) THEN
PARP(82) = 2.26
PARP(83) = 0.356
PARP(84) = 0.443
PARP(90) = 0.249
ELSEIF (ITUNE.EQ.347) THEN
PARP(82) = 1.87
PARP(83) = 0.356
PARP(84) = 0.561
PARP(90) = 0.189
ELSEIF (ITUNE.EQ.348) THEN
PARP(82) = 1.89
PARP(83) = 0.356
PARP(84) = 0.415
PARP(90) = 0.182
ELSEIF (ITUNE.EQ.349) THEN
PARP(82) = 1.86
PARP(83) = 0.356
PARP(84) = 0.588
PARP(90) = 0.177
ENDIF
C...Primordial kT
PARP(91) = 2.0D0
PARP(93) = 5D0
IF (ITUNE.GE.340) THEN
PARP(93) = 10D0
ENDIF
IF (ITUNE.GE.345) THEN
PARP(91) = 2.0
ENDIF
C...ISR
IF (ITUNE.EQ.345.OR.ITUNE.EQ.346) THEN
MSTP(64) = 2
PARP(62) = 1.13
PARP(64) = 0.68
PARP(67) = 1.0
ELSE IF (ITUNE.EQ.347) THEN
MSTP(64) = 2
PARP(62) = 0.946
PARP(64) = 1.032
PARP(67) = 1.0
ELSE IF (ITUNE.EQ.348) THEN
MSTP(64) = 2
PARP(62) = 0.312
PARP(64) = 0.939
PARP(67) = 1.0
ELSE IF (ITUNE.EQ.349) THEN
MSTP(64) = 2
PARP(62) = 1.246
PARP(64) = 0.771
PARP(67) = 1.0
ELSE IF (ITUNE.GE.340) THEN
PARP(62) = 1.025
ENDIF
C...FSR off ISR (LambdaQCD) for A*ET2B tunes
IF (ITUNE.GE.345) THEN
MSTP(72) = 2
PARP(72) = 0.527
IF (ITUNE.EQ.348) THEN
PARP(72) = 0.537
ENDIF
ENDIF
IF (M13.GE.1) THEN
IF (ITUNE.LT.340) THEN
CH60='Tuned by ATLAS, ATL-PHYS-PUB-2010-002'
ELSEIF (ITUNE.EQ.340) THEN
CH60='Tuned by ATLAS, ATLAS-CONF-2010-031'
ELSEIF (ITUNE.EQ.341) THEN
CH60='AMBT1 Tuned by ATLAS, ATLAS-CONF-2010-031'
WRITE(M11,5030) CH60
CH60='Z1 variation tuned by R. D. Field (CMS)'
ELSEIF (ITUNE.EQ.342) THEN
CH60='AMBT1 Tuned by ATLAS, ATLAS-CONF-2010-031'
WRITE(M11,5030) CH60
CH60='Z1 variation retuned by R. D. Field (CMS)'
WRITE(M11,5030) CH60
CH60='Z1-LEP variation retuned by Professor / P. Skands'
ELSEIF (ITUNE.EQ.343) THEN
CH60='AMBT1 Tuned by ATLAS, ATLAS-CONF-2010-031'
WRITE(M11,5030) CH60
CH60='Z2 variation retuned by R. D. Field (CMS)'
ELSEIF (ITUNE.EQ.344) THEN
CH60='AMBT1 Tuned by ATLAS, ATLAS-CONF-2010-031'
WRITE(M11,5030) CH60
CH60='Z2 variation retuned by R. D. Field (CMS)'
WRITE(M11,5030) CH60
CH60='Z2-LEP variation retuned by Professor / P. Skands'
ELSEIF (ITUNE.EQ.345.OR.ITUNE.EQ.346) THEN
CH60='A*T2B tunes by ATLAS, ATL-PHYS-PUB-2011-009'
ELSEIF (ITUNE.GE.347) THEN
CH60='A*T2B-NLO tunes by ATLAS, ATL-PHYS-PUB-2011-014'
WRITE(M11,5030) CH60
CH60='Warning: NLO PDFs are NOT recommended!'
ENDIF
WRITE(M11,5030) CH60
CH60='Physics Model: '//
& 'T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
CH60='CR by P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
C...Output
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
IF (MSTP(3).EQ.1) THEN
WRITE(M11,6100) 112, MSTU(112), CHMSTU(112)
WRITE(M11,6110) 112, PARU(112), CHPARU(112)
WRITE(M11,5050) 1, PARP(1) , CHPARP( 1)
ENDIF
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
IF (MSTP(3).EQ.1) THEN
WRITE(M11,5050) 72, PARP(72) , CHPARP( 72)
WRITE(M11,5050) 61, PARP(61) , CHPARP( 61)
ENDIF
WRITE(M11,5040) 64, MSTP(64), CHMSTP(64)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5040) 67, MSTP(67), CHMSTP(67)
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5040) 70, MSTP(70), CHMSTP(70)
IF (MSTP(70).EQ.0) THEN
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
ENDIF
WRITE(M11,5040) 72, MSTP(72), CHMSTP(72)
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5050) 72, PARP(72), CHPARP(72)
WRITE(M11,5060) 82, PARJ(82), CHPARJ(82)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
WRITE(M11,5040) 88, MSTP(88), CHMSTP(88)
WRITE(M11,5040) 89, MSTP(89), CHMSTP(89)
WRITE(M11,5050) 79, PARP(79), CHPARP(79)
WRITE(M11,5050) 80, PARP(80), CHPARP(80)
WRITE(M11,5040) 91, MSTP(91), CHMSTP(91)
WRITE(M11,5050) 91, PARP(91), CHPARP(91)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
IF (MSTP(95).GE.1) THEN
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
IF (MSTP(95).GE.2) WRITE(M11,5050) 77, PARP(77), CHPARP(77)
ENDIF
ENDIF
C=======================================================================
C...S0, S1, S2, S0A, NOCR, Rap,
C...S0-Pro, S1-Pro, S2-Pro, S0A-Pro, NOCR-Pro, Rap-Pro
C...Perugia 0, HARD, SOFT, 3, LO*, 6, 2010, K
C...Pro-pTO, Pro-PT*, Pro-PT6, Pro-PT**
C...Perugia 2011 (incl variations)
C...Schulz-Skands tunes
ELSEIF ((ITUNE.GE.300.AND.ITUNE.LE.305)
& .OR.(ITUNE.GE.310.AND.ITUNE.LE.315)
& .OR.(ITUNE.GE.320.AND.ITUNE.LE.329)
& .OR.(ITUNE.GE.334.AND.ITUNE.LE.336).OR.ITUNE.EQ.339
& .OR.(ITUNE.GE.350.AND.ITUNE.LE.379)) THEN
IF (M13.GE.1) WRITE(M11,5010) ITUNE, CHNAME
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.405))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ELSEIF(ITUNE.GE.320.AND.ITUNE.LE.339.AND.ITUNE.NE.324.AND.
& ITUNE.NE.334.AND.
& (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.419)))
& THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ELSEIF((ITUNE.EQ.327.OR.ITUNE.EQ.328.OR.ITUNE.GE.350).AND.
& (MSTP(181).LE.5.OR.
& (MSTP(181).EQ.6.AND.MSTP(182).LE.422)))
& THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ENDIF
C...Use 327 as base tune for 350-359 and 370-379 (Perugia 2011 and 2012)
ITUNSV = ITUNE
IF (ITUNE.GE.350.AND.ITUNE.LE.359) ITUNE = 327
IF (ITUNE.GE.370.AND.ITUNE.LE.379) ITUNE = 327
C...Use 320 as base tune for 360+ (Schulz-Skands)
IF (ITUNE.GE.360) ITUNE = 320
C...HAD: Use Professor's LEP pars if ITUNE >= 310
C...(i.e., for S0-Pro, S1-Pro etc, and for Perugia tunes)
IF (ITUNE.LT.310) THEN
C...# Old defaults
MSTJ(11) = 4
C...# Old default flavour parameters
PARJ(1) = 0.1
PARJ(2) = 0.3
PARJ(3) = 0.40
PARJ(4) = 0.05
PARJ(11) = 0.5
PARJ(12) = 0.6
PARJ(21) = 0.36
PARJ(41) = 0.30
PARJ(42) = 0.58
PARJ(46) = 1.0
PARJ(82) = 1.0
ELSEIF (ITUNE.GE.310) THEN
C...# Tuned flavour parameters:
PARJ(1) = 0.073
PARJ(2) = 0.2
PARJ(3) = 0.94
PARJ(4) = 0.032
PARJ(11) = 0.31
PARJ(12) = 0.4
PARJ(13) = 0.54
PARJ(25) = 0.63
PARJ(26) = 0.12
C...# Always use pT-ordered shower:
MSTJ(41) = 12
C...# Switch on Bowler:
MSTJ(11) = 5
C...# Fragmentation
PARJ(21) = 0.313
PARJ(41) = 0.49
PARJ(42) = 1.2
PARJ(47) = 1.0
PARJ(81) = 0.257
PARJ(82) = 0.8
C...HAD: fragmentation pT (only if not using professor) - HARD and SOFT
IF (ITUNE.EQ.321) PARJ(21) = 0.34D0
IF (ITUNE.EQ.322) PARJ(21) = 0.28D0
C...HAD: P-2010 and P-K use different strangeness parameters
C... indicated by LEP and RHIC yields.
C...(only 5% different from Professor values, so should be within acceptable
C...theoretical uncertainty range)
C...(No attempt made to retune other flavor parameters post facto)
IF (ITUNE.EQ.327.OR.ITUNE.EQ.328.OR.ITUNE.EQ.334) THEN
PARJ( 1) = 0.08D0
PARJ( 2) = 0.21D0
PARJ( 4) = 0.04D0
PARJ(11) = 0.35D0
PARJ(12) = 0.35D0
PARJ(21) = 0.36D0
PARJ(41) = 0.35D0
PARJ(42) = 0.90D0
PARJ(81) = 0.26D0
PARJ(82) = 1.0D0
ENDIF
ENDIF
C...Remove middle digit now for Professor variants, since identical pars
ITUNEB=ITUNE
IF (ITUNE.GE.310.AND.ITUNE.LE.319) THEN
ITUNEB=(ITUNE/100)*100+MOD(ITUNE,10)
ENDIF
C...PDFs: all use CTEQ5L as starting point
MSTP(52) = 1
MSTP(51) = 7
IF (ITUNE.EQ.325.OR.ITUNE.EQ.335) THEN
C...MRST LO* for 325 and 335
MSTP(52) = 2
MSTP(51) = 20650
ELSEIF (ITUNE.EQ.326.OR.ITUNE.EQ.336) THEN
C...CTEQ6L1 for 326 and 336
MSTP(52) = 2
MSTP(51) = 10042
ELSEIF (ITUNE.EQ.339) THEN
C...MRST LO** for 339
MSTP(52) = 2
MSTP(51) = 20651
ENDIF
C...LambdaQCD choice: 327 and 328 use hardcoded, others get from PDF
MSTP(3) = 2
IF (ITUNE.EQ.327.OR.ITUNE.EQ.328.OR.ITUNE.EQ.334) THEN
MSTP(3) = 1
C...Hardcode CTEQ5L values for ME and ISR
MSTU(112) = 4
PARU(112) = 0.192D0
PARP(61) = 0.192D0
PARP( 1) = 0.192D0
C...but use LEP value also for non-res FSR
PARP(72) = 0.260D0
ENDIF
C...ISR: use Lambda_MSbar with default scale for S0(A)
MSTP(64) = 2
PARP(64) = 1D0
IF (ITUNE.EQ.320.OR.ITUNE.EQ.323.OR.ITUNE.EQ.324.OR.ITUNE.EQ.334
& .OR.ITUNE.EQ.326.OR.ITUNE.EQ.327.OR.ITUNE.EQ.328) THEN
C...Use Lambda_MC with muR^2=pT^2 for most central Perugia tunes
MSTP(64) = 3
PARP(64) = 1D0
ELSEIF (ITUNE.EQ.321) THEN
C...Use Lambda_MC with muR^2=(1/2pT)^2 for Perugia HARD
MSTP(64) = 3
PARP(64) = 0.25D0
ELSEIF (ITUNE.EQ.322) THEN
C...Use Lambda_MSbar with muR^2=2pT^2 for Perugia SOFT
MSTP(64) = 2
PARP(64) = 2D0
ELSEIF (ITUNE.EQ.325) THEN
C...Use Lambda_MC with muR^2=2pT^2 for Perugia LO*
MSTP(64) = 3
PARP(64) = 2D0
ELSEIF (ITUNE.EQ.329.OR.ITUNE.EQ.335.OR.ITUNE.EQ.336.OR.
& ITUNE.EQ.339) THEN
C...Use Lambda_MSbar with P64=1.3 for Pro-pT0
MSTP(64) = 2
PARP(64) = 1.3D0
IF (ITUNE.EQ.335) PARP(64) = 0.92D0
IF (ITUNE.EQ.336) PARP(64) = 0.89D0
IF (ITUNE.EQ.339) PARP(64) = 0.97D0
ENDIF
C...ISR : power-suppressed power showers above s_color (since 6.4.19)
MSTP(67) = 2
PARP(67) = 4D0
C...Perugia tunes have stronger suppression, except HARD
IF ((ITUNE.GE.320.AND.ITUNE.LE.328).OR.ITUNE.EQ.334) THEN
PARP(67) = 1D0
IF (ITUNE.EQ.321) PARP(67) = 4D0
IF (ITUNE.EQ.322) PARP(67) = 0.25D0
ENDIF
C...ISR IR cutoff type and FSR off ISR setting:
C...Smooth ISR, low FSR-off-ISR
MSTP(70) = 2
MSTP(72) = 0
IF (ITUNEB.EQ.301) THEN
C...S1, S1-Pro: sharp ISR, high FSR
MSTP(70) = 0
MSTP(72) = 1
ELSEIF (ITUNE.EQ.320.OR.ITUNE.EQ.324.OR.ITUNE.EQ.326
& .OR.ITUNE.EQ.325) THEN
C...Perugia default is smooth ISR, high FSR-off-ISR
MSTP(70) = 2
MSTP(72) = 1
ELSEIF (ITUNE.EQ.321) THEN
C...Perugia HARD: sharp ISR, high FSR-off-ISR (but no dip-to-BR rad)
MSTP(70) = 0
PARP(62) = 1.25D0
MSTP(72) = 1
ELSEIF (ITUNE.EQ.322) THEN
C...Perugia SOFT: scaling sharp ISR, low FSR-off-ISR
MSTP(70) = 1
PARP(81) = 1.5D0
MSTP(72) = 0
ELSEIF (ITUNE.EQ.323) THEN
C...Perugia 3: sharp ISR, high FSR-off-ISR (with dipole-to-BR radiating)
MSTP(70) = 0
PARP(62) = 1.25D0
MSTP(72) = 2
ELSEIF (ITUNE.EQ.327.OR.ITUNE.EQ.328.OR.ITUNE.EQ.334) THEN
C...Perugia 2010/K: smooth ISR, high FSR-off-ISR (with dipole-to-BR radiating)
MSTP(70) = 2
MSTP(72) = 2
ENDIF
C...FSR activity: Perugia tunes use a lower PARP(71) as indicated
C...by Professor tunes (with HARD and SOFT variations)
PARP(71) = 4D0
IF ((ITUNE.GE.320.AND.ITUNE.LE.328).OR.ITUNE.EQ.334) THEN
PARP(71) = 2D0
IF (ITUNE.EQ.321) PARP(71) = 4D0
IF (ITUNE.EQ.322) PARP(71) = 1D0
ENDIF
IF (ITUNE.EQ.329) PARP(71) = 2D0
IF (ITUNE.EQ.335) PARP(71) = 1.29D0
IF (ITUNE.EQ.336) PARP(71) = 1.72D0
IF (ITUNE.EQ.339) PARP(71) = 1.20D0
C...FSR: Lambda_FSR scale (only if not using professor)
IF (ITUNE.LT.310) PARJ(81) = 0.23D0
IF (ITUNE.EQ.321) PARJ(81) = 0.30D0
IF (ITUNE.EQ.322) PARJ(81) = 0.20D0
C...K-factor : only 328 uses a K-factor on the UE cross sections
MSTP(33) = 0
IF (ITUNE.EQ.328) THEN
MSTP(33) = 10
PARP(32) = 1.5
ENDIF
C...UE on, new model
MSTP(81) = 21
C...UE: hadron-hadron overlap profile (expOfPow for all)
MSTP(82) = 5
C...UE: Overlap smoothness (1.0 = exponential; 2.0 = gaussian)
PARP(83) = 1.6D0
IF (ITUNEB.EQ.301) PARP(83) = 1.4D0
IF (ITUNEB.EQ.302) PARP(83) = 1.2D0
C...NOCR variants have very smooth distributions
IF (ITUNEB.EQ.304) PARP(83) = 1.8D0
IF (ITUNEB.EQ.305) PARP(83) = 2.0D0
IF ((ITUNE.GE.320.AND.ITUNE.LE.328).OR.ITUNE.EQ.334) THEN
C...Perugia variants have slightly smoother profiles by default
C...(to compensate for more tail by added radiation)
C...Perugia-SOFT has more peaked distribution, NOCR less peaked
PARP(83) = 1.7D0
IF (ITUNE.EQ.322) PARP(83) = 1.5D0
IF (ITUNE.EQ.327) PARP(83) = 1.5D0
IF (ITUNE.EQ.328) PARP(83) = 1.5D0
C...NOCR variants have smoother mass profiles
IF (ITUNE.EQ.324) PARP(83) = 1.8D0
IF (ITUNE.EQ.334) PARP(83) = 1.8D0
ENDIF
C...Professor-pT0 also has very smooth distribution
IF (ITUNE.EQ.329) PARP(83) = 1.8
IF (ITUNE.EQ.335) PARP(83) = 1.68
IF (ITUNE.EQ.336) PARP(83) = 1.72
IF (ITUNE.EQ.339) PARP(83) = 1.67
C...UE: pT0 = 1.85 for S0, S0A, 2.0 for Perugia version
PARP(82) = 1.85D0
IF (ITUNEB.EQ.301) PARP(82) = 2.1D0
IF (ITUNEB.EQ.302) PARP(82) = 1.9D0
IF (ITUNEB.EQ.304) PARP(82) = 2.05D0
IF (ITUNEB.EQ.305) PARP(82) = 1.9D0
IF ((ITUNE.GE.320.AND.ITUNE.LE.328).OR.ITUNE.EQ.334) THEN
C...Perugia tunes (def is 2.0 GeV, HARD has higher, SOFT has lower,
C...Perugia-3 has more ISR, so higher pT0, NOCR can be slightly lower,
C...CTEQ6L1 slightly lower, due to less activity, and LO* needs to be
C...slightly higher, due to increased activity.
PARP(82) = 2.0D0
IF (ITUNE.EQ.321) PARP(82) = 2.3D0
IF (ITUNE.EQ.322) PARP(82) = 1.9D0
IF (ITUNE.EQ.323) PARP(82) = 2.2D0
IF (ITUNE.EQ.324) PARP(82) = 1.95D0
IF (ITUNE.EQ.325) PARP(82) = 2.2D0
IF (ITUNE.EQ.326) PARP(82) = 1.95D0
IF (ITUNE.EQ.327) PARP(82) = 2.05D0
IF (ITUNE.EQ.328) PARP(82) = 2.45D0
IF (ITUNE.EQ.334) PARP(82) = 2.15D0
ENDIF
C...Professor-pT0 maintains low pT0 vaue
IF (ITUNE.EQ.329) PARP(82) = 1.85D0
IF (ITUNE.EQ.335) PARP(82) = 2.10D0
IF (ITUNE.EQ.336) PARP(82) = 1.83D0
IF (ITUNE.EQ.339) PARP(82) = 2.28D0
C...UE: IR cutoff reference energy and default energy scaling pace
PARP(89) = 1800D0
PARP(90) = 0.16D0
C...S0A, S0A-Pro have tune A energy scaling
IF (ITUNEB.EQ.303) PARP(90) = 0.25D0
IF ((ITUNE.GE.320.AND.ITUNE.LE.328).OR.ITUNE.EQ.334) THEN
C...Perugia tunes explicitly include MB at 630 to fix energy scaling
PARP(90) = 0.26
IF (ITUNE.EQ.321) PARP(90) = 0.30D0
IF (ITUNE.EQ.322) PARP(90) = 0.24D0
IF (ITUNE.EQ.323) PARP(90) = 0.32D0
IF (ITUNE.EQ.324) PARP(90) = 0.24D0
C...LO* and CTEQ6L1 tunes have slower energy scaling
IF (ITUNE.EQ.325) PARP(90) = 0.23D0
IF (ITUNE.EQ.326) PARP(90) = 0.22D0
ENDIF
C...Professor-pT0 has intermediate scaling
IF (ITUNE.EQ.329) PARP(90) = 0.22D0
IF (ITUNE.EQ.335) PARP(90) = 0.20D0
IF (ITUNE.EQ.336) PARP(90) = 0.20D0
IF (ITUNE.EQ.339) PARP(90) = 0.21D0
C...BR: MPI initiator color connections rap-ordered by default
C...NOCR variants are Lambda-ordered, Perugia SOFT & 2010 random-ordered
MSTP(89) = 1
IF (ITUNEB.EQ.304.OR.ITUNE.EQ.324) MSTP(89) = 2
IF (ITUNE.EQ.322) MSTP(89) = 0
IF (ITUNE.EQ.327) MSTP(89) = 0
IF (ITUNE.EQ.328) MSTP(89) = 0
C...BR: BR-g-BR suppression factor (higher values -> more beam blowup)
PARP(80) = 0.01D0
IF (ITUNE.GE.320.AND.ITUNE.LE.328) THEN
C...Perugia tunes have more beam blowup by default
PARP(80) = 0.05D0
IF (ITUNE.EQ.321) PARP(80) = 0.01
IF (ITUNE.EQ.323) PARP(80) = 0.03
IF (ITUNE.EQ.324) PARP(80) = 0.01
IF (ITUNE.EQ.327) PARP(80) = 0.1
IF (ITUNE.EQ.328) PARP(80) = 0.1
ENDIF
C...BR: diquarks (def = valence qq and moderate diquark x enhancement)
MSTP(88) = 0
PARP(79) = 2D0
IF (ITUNEB.EQ.304) PARP(79) = 3D0
IF (ITUNE.EQ.329) PARP(79) = 1.18
IF (ITUNE.EQ.335) PARP(79) = 1.11
IF (ITUNE.EQ.336) PARP(79) = 1.10
IF (ITUNE.EQ.339) PARP(79) = 3.69
C...BR: Primordial kT, parametrization and cutoff, default is 2 GeV
MSTP(91) = 1
PARP(91) = 2D0
PARP(93) = 10D0
C...Perugia-HARD only uses 1.0 GeV
IF (ITUNE.EQ.321) PARP(91) = 1.0D0
C...Perugia-3 only uses 1.5 GeV
IF (ITUNE.EQ.323) PARP(91) = 1.5D0
C...Professor-pT0 uses 7-GeV cutoff
IF (ITUNE.EQ.329) PARP(93) = 7.0
IF (ITUNE.EQ.335) THEN
PARP(91) = 2.15
PARP(93) = 6.79
ELSEIF (ITUNE.EQ.336) THEN
PARP(91) = 1.85
PARP(93) = 6.86
ELSEIF (ITUNE.EQ.339) THEN
PARP(91) = 2.11
PARP(93) = 5.08
ENDIF
C...FSI: Colour Reconnections - Seattle algorithm is default (S0)
MSTP(95) = 6
C...S1, S1-Pro: use S1
IF (ITUNEB.EQ.301) MSTP(95) = 2
C...S2, S2-Pro: use S2
IF (ITUNEB.EQ.302) MSTP(95) = 4
C...NOCR, NOCR-Pro, Perugia-NOCR: use no CR
IF (ITUNE.EQ.304.OR.ITUNE.EQ.314.OR.ITUNE.EQ.324.OR.
& ITUNE.EQ.334) MSTP(95) = 0
C..."Old" and "Old"-Pro: use old CR
IF (ITUNEB.EQ.305) MSTP(95) = 1
C...Perugia 2010 and K use Paquis model
IF (ITUNE.EQ.327.OR.ITUNE.EQ.328) MSTP(95) = 8
C...FSI: CR strength and high-pT dampening, default is S0
PARP(77) = 0D0
IF (ITUNE.LT.320.OR.ITUNE.EQ.329.OR.ITUNE.GE.335) THEN
PARP(78) = 0.2D0
IF (ITUNEB.EQ.301) PARP(78) = 0.35D0
IF (ITUNEB.EQ.302) PARP(78) = 0.15D0
IF (ITUNEB.EQ.304) PARP(78) = 0.0D0
IF (ITUNEB.EQ.305) PARP(78) = 1.0D0
IF (ITUNE.EQ.329) PARP(78) = 0.17D0
IF (ITUNE.EQ.335) PARP(78) = 0.14D0
IF (ITUNE.EQ.336) PARP(78) = 0.17D0
IF (ITUNE.EQ.339) PARP(78) = 0.13D0
ELSE
C...Perugia tunes also use high-pT dampening : default is Perugia 0,*,6
PARP(78) = 0.33
PARP(77) = 0.9D0
IF (ITUNE.EQ.321) THEN
C...HARD has HIGH amount of CR
PARP(78) = 0.37D0
PARP(77) = 0.4D0
ELSEIF (ITUNE.EQ.322) THEN
C...SOFT has LOW amount of CR
PARP(78) = 0.15D0
PARP(77) = 0.5D0
ELSEIF (ITUNE.EQ.323) THEN
C...Scaling variant appears to need slightly more than default
PARP(78) = 0.35D0
PARP(77) = 0.6D0
ELSEIF (ITUNE.EQ.324.OR.ITUNE.EQ.334) THEN
C...NOCR has no CR
PARP(78) = 0D0
PARP(77) = 0D0
ELSEIF (ITUNE.EQ.327) THEN
C...2010
PARP(78) = 0.035D0
PARP(77) = 1D0
ELSEIF (ITUNE.EQ.328) THEN
C...K
PARP(78) = 0.033D0
PARP(77) = 1D0
ENDIF
ENDIF
C================
C...Perugia 2011 and 2012 tunes
C...(written as modifications on top of Perugia 2010)
C================
IF ( (ITUNSV.GE.350.AND.ITUNSV.LE.359)
& .OR.(ITUNSV.GE.370.AND.ITUNSV.LE.379) ) THEN
ITUNE = ITUNSV
C... Scale setting for matching applications.
C... Switch to 5-flavor CMW LambdaQCD = 0.26 for all shower activity
C... (equivalent to a 5-flavor MSbar LambdaQCD = 0.26/1.6 = 0.16)
MSTP(64) = 2
MSTU(112) = 5
C... This sets the Lambda scale for ISR, IFSR, and FSR
PARP(61) = 0.26D0
PARP(72) = 0.26D0
PARJ(81) = 0.26D0
C... This sets the Lambda scale for QCD hard interactions (important for the
C... UE dijet cross sections. Here we still use an MSbar value, rather than
C... a CMW one, in order not to hugely increase the UE jettiness. The CTEQ5L
C... value corresponds to a Lambda5 of 0.146 for comparison, so quite close.)
PARP(1) = 0.16D0
PARU(112) = 0.16D0
C... For matching applications, PARP(71) and PARP(67) = 1
PARP(67) = 1D0
PARP(71) = 1D0
C... Primordial kT: only use 1 GeV
MSTP(91) = 1
PARP(91) = 1D0
C... ADDITIONAL LESSONS WRT PERUGIA 2010
C... ALICE taught us: need less baryon transport than SOFT
MSTP(89) = 0
PARP(80) = 0.015
C... Small adjustments at LEP (slightly softer frag functions, esp for baryons)
PARJ(21) = 0.33
PARJ(41) = 0.35
PARJ(42) = 0.8
PARJ(45) = 0.55
C... Increase Lambda/K ratio and other strange baryon yields
PARJ(1) = 0.087D0
PARJ(3) = 0.95D0
PARJ(4) = 0.043D0
PARJ(6) = 1.0D0
PARJ(7) = 1.0D0
C... Also reduce total strangeness yield a bit, with higher K*/K
PARJ(2) = 0.19D0
PARJ(12) = 0.40D0
C... Perugia 2011 default is sharp ISR, dipoles to BR radiating, pTmax individual
MSTP(70) = 0
MSTP(72) = 2
PARP(62) = 1.5D0
C... Holger taught us a smoother proton is preferred at high energies
C... Just use a simple Gaussian
MSTP(82) = 3
C... Scaling of pt0 cutoff
PARP(90) = 0.265
C... Now retune pT0 to give right UE activity.
C... Low CR strength indicated by LHC tunes
C... (also keep low to get <pT>(Nch) a bit down for pT>100MeV samples)
PARP(78) = 0.036D0
C... Choose 7 TeV as new reference scale
PARP(89) = 7000.0D0
PARP(82) = 2.93D0
C================
C... P2011 Variations
C================
IF (ITUNE.EQ.351) THEN
C... radHi: high Lambda scale for ISR, IFSR, and FSR
C... ( ca 10% more particles at LEP after retune )
PARP(61) = 0.52D0
PARP(72) = 0.52D0
PARJ(81) = 0.52D0
C... Retune cutoff scales to compensate partially
C... (though higher cutoff causes faster multiplicity drop at low energies)
PARP(62) = 1.75D0
PARJ(82) = 1.75D0
PARP(82) = 3.00D0
C... Needs faster cutoff scaling than nominal variant for same <Nch> scaling
C... (since more radiation otherwise generates faster mult growth)
PARP(90) = 0.28
ELSEIF (ITUNE.EQ.352) THEN
C... radLo: low Lambda scale for ISR, IFSR, and FSR
C... ( ca 10% less particles at LEP after retune )
PARP(61) = 0.13D0
PARP(72) = 0.13D0
PARJ(81) = 0.13D0
C... Retune cutoff scales to compensate partially
PARP(62) = 1.00D0
PARJ(82) = 0.75D0
PARP(82) = 2.95D0
C... Needs slower cutoff scaling than nominal variant for same <Nch> scaling
C... (since less radiation otherwise generates slower mult growth)
PARP(90) = 0.24
ELSEIF (ITUNE.EQ.353) THEN
C... mpiHi: high Lambda scale for MPI
PARP(1) = 0.26D0
PARU(112) = 0.26D0
PARP(82) = 3.35D0
PARP(90) = 0.26D0
ELSEIF (ITUNE.EQ.354) THEN
MSTP(95) = 0
PARP(82) = 3.05D0
ELSEIF (ITUNE.EQ.355) THEN
C... LO**
MSTP(52) = 2
MSTP(51) = 20651
PARP(62) = 1.5D0
C... Compensate for higher <pT> with less CR
PARP(78) = 0.034
PARP(82) = 3.40D0
C... Need slower energy scaling than CTEQ5L
PARP(90) = 0.23D0
ELSEIF (ITUNE.EQ.356) THEN
C... CTEQ6L1
MSTP(52) = 2
MSTP(51) = 10042
PARP(82) = 2.65D0
C... Need slower cutoff scaling than CTEQ5L
PARP(90) = 0.22D0
ELSEIF (ITUNE.EQ.357) THEN
C... T16
PARP(90) = 0.16
ELSEIF (ITUNE.EQ.358) THEN
C... T32
PARP(90) = 0.32
ELSEIF (ITUNE.EQ.359) THEN
C... Tevatron
PARP(89) = 1800D0
PARP(90) = 0.28
PARP(82) = 2.10
PARP(78) = 0.05
ENDIF
C================
C... Perugia 2012 Variations
C================
IF (ITUNE.GE.370) THEN
C... CTEQ6L1 Baseline
MSTP(52) = 2
MSTP(51) = 10042
PARP(82) = 2.65D0
C... Needs slower cutoff scaling than CTEQ5L
PARP(90) = 0.24D0
C... Slightly lower CR strength than Perugia 2011
PARP(78) = 0.035D0
C... Adjusted fragmentation parameters wrt 2011
PARJ(1) = 0.085D0
PARJ(2) = 0.2
PARJ(3) = 0.92
PARJ(25) = 0.70
PARJ(26) = 0.135
PARJ(41) = 0.45
PARJ(42) = 1.0
PARJ(45) = 0.86
ENDIF
C... Variations
IF (ITUNE.EQ.371) THEN
C... radHi: high Lambda scale for ISR, IFSR, and FSR
C... ( ca 10% more particles at LEP after retune )
PARP(61) = 0.52D0
PARP(72) = 0.52D0
PARJ(81) = 0.52D0
C... Retune cutoff scales to compensate partially
C... (though higher cutoff causes faster multiplicity drop at low energies)
PARP(62) = 1.75D0
PARJ(82) = 1.75D0
PARP(82) = 2.725D0
C... Needs faster cutoff scaling than nominal variant for same <Nch> scaling
C... (since more radiation otherwise generates faster mult growth)
PARP(90) = 0.25
ELSEIF (ITUNE.EQ.372) THEN
C... radLo: low Lambda scale for ISR, IFSR, and FSR
C... ( ca 10% less particles at LEP after retune )
PARP(61) = 0.13D0
PARP(72) = 0.13D0
PARJ(81) = 0.13D0
C... Retune cutoff scales to compensate partially
PARP(62) = 1.00D0
PARJ(82) = 0.75D0
PARP(82) = 2.6D0
C... Needs slower cutoff scaling than nominal variant for same <Nch> scaling
C... (since less radiation otherwise generates slower mult growth)
PARP(90) = 0.23
ELSEIF (ITUNE.EQ.373) THEN
C... mpiHi: high Lambda scale for MPI
PARP(1) = 0.26D0
PARU(112) = 0.26D0
PARP(82) = 3.0D0
PARP(90) = 0.24D0
ELSEIF (ITUNE.EQ.374) THEN
C... LOCR : uses global CR model. Less extreme alternative to noCR.
MSTP(95) = 6
PARP(78) = 0.25D0
PARP(82) = 2.7D0
PARP(83) = 1.50D0
PARP(90) = 0.24
ELSEIF (ITUNE.EQ.375) THEN
C... NOCR : with higher pT0
MSTP(95) = 0
PARP(82) = 2.80D0
ELSEIF (ITUNE.EQ.376) THEN
C... hadF1 (harder frag function, smaller n.p. pT)
PARJ(21) = 0.30
PARJ(41) = 0.36
PARJ(42) = 1.0
PARJ(45) = 0.75
ELSEIF (ITUNE.EQ.377) THEN
C... hadF2 (softer frag function, larger n.p. pT)
PARJ(21) = 0.36
PARJ(41) = 0.45
PARJ(42) = 0.75
PARJ(45) = 0.9
ELSEIF (ITUNE.EQ.378) THEN
C... MSTW08LO
MSTP(52) = 2
MSTP(51) = 21000
PARP(82) = 2.9D0
C...Uses a large LambdaQCD MSbar value (close to CMW one)
C...(Nominally, MSTW 2008 alphaS(mZ) = 0.139)
PARP(1) = 0.26D0
PARU(112) = 0.26D0
C...Tentative (fast) energy scaling
PARP(90) = 0.29
ELSEIF (ITUNE.EQ.379) THEN
C... MSTW LO**
MSTP(52) = 2
MSTP(51) = 20651
PARP(62) = 1.5D0
C... Use a smaller LambdaQCD MSbar than with CTEQ
PARP(1) = 0.14D0
PARU(112) = 0.14D0
C... Compensate for higher <pT> with less CR
PARP(78) = 0.034
PARP(82) = 3.25D0
C...Tentative scaling
PARP(90) = 0.25
ENDIF
C================
C...Schulz-Skands 2011 tunes
C...(written as modifications on top of Perugia 0)
C================
ELSEIF (ITUNSV.GE.360.AND.ITUNSV.LE.365) THEN
ITUNE = ITUNSV
IF (ITUNE.EQ.360) THEN
PARP(78) = 0.40D0
PARP(82) = 2.19D0
PARP(83) = 1.45D0
PARP(89) = 1800.0D0
PARP(90) = 0.27D0
ELSEIF (ITUNE.EQ.361) THEN
PARP(78) = 0.20D0
PARP(82) = 2.75D0
PARP(83) = 1.73D0
PARP(89) = 7000.0D0
ELSEIF (ITUNE.EQ.362) THEN
PARP(78) = 0.31D0
PARP(82) = 1.97D0
PARP(83) = 1.98D0
PARP(89) = 1960.0D0
ELSEIF (ITUNE.EQ.363) THEN
PARP(78) = 0.35D0
PARP(82) = 1.91D0
PARP(83) = 2.02D0
PARP(89) = 1800.0D0
ELSEIF (ITUNE.EQ.364) THEN
PARP(78) = 0.33D0
PARP(82) = 1.69D0
PARP(83) = 1.92D0
PARP(89) = 900.0D0
ELSEIF (ITUNE.EQ.365) THEN
PARP(78) = 0.47D0
PARP(82) = 1.61D0
PARP(83) = 1.50D0
PARP(89) = 630.0D0
ENDIF
ENDIF
C...Switch off trial joinings
MSTP(96) = 0
C...S0 (300), S0A (303)
IF (ITUNEB.EQ.300.OR.ITUNEB.EQ.303) THEN
IF (M13.GE.1) THEN
CH60='see P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
CH60='M. Sandhoff & P. Skands, in hep-ph/0604120'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.GE.310) THEN
CH60='LEP parameters tuned by Professor,'//
& ' hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C...S1 (301)
ELSEIF(ITUNEB.EQ.301) THEN
IF (M13.GE.1) THEN
CH60='see M. Sandhoff & P. Skands, in hep-ph/0604120'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.GE.310) THEN
CH60='LEP parameters tuned by Professor,'//
& ' hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C...S2 (302)
ELSEIF(ITUNEB.EQ.302) THEN
IF (M13.GE.1) THEN
CH60='see M. Sandhoff & P. Skands, in hep-ph/0604120'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.GE.310) THEN
CH60='LEP parameters tuned by Professor,'//
& ' hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C...NOCR (304)
ELSEIF(ITUNEB.EQ.304) THEN
IF (M13.GE.1) THEN
CH60='"best try" without colour reconnections'
WRITE(M11,5030) CH60
CH60='see P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.GE.310) THEN
CH60='LEP parameters tuned by Professor,'//
& ' hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C..."Lo FSR" retune (305)
ELSEIF(ITUNEB.EQ.305) THEN
IF (M13.GE.1) THEN
CH60='"Lo FSR retune" with primitive colour reconnections'
WRITE(M11,5030) CH60
CH60='see T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.GE.310) THEN
CH60='LEP parameters tuned by Professor,'//
& ' hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C...Perugia Tunes (320-328 and 334)
ELSEIF((ITUNE.GE.320.AND.ITUNE.LE.328).OR.ITUNE.EQ.334) THEN
IF (M13.GE.1) THEN
CH60='Tuned by P. Skands, hep-ph/1005.3457'
WRITE(M11,5030) CH60
CH60='Physics Model: '//
& 'T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.LE.326) THEN
CH60='CR by P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
CH60='LEP parameters tuned by Professor, hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
IF (ITUNE.EQ.325) THEN
CH70='NB! This tune requires MRST LO* pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ELSEIF (ITUNE.EQ.326) THEN
CH70='NB! This tune requires CTEQ6L1 pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ELSEIF (ITUNE.EQ.321) THEN
CH60='NB! This tune has MORE ISR & FSR / LESS UE & BR'
WRITE(M11,5030) CH60
ELSEIF (ITUNE.EQ.322) THEN
CH60='NB! This tune has LESS ISR & FSR / MORE UE & BR'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C...Professor-pTO (329)
ELSEIF(ITUNE.EQ.329.OR.ITUNE.EQ.335.OR.ITUNE.EQ.336.OR.
& ITUNE.EQ.339) THEN
IF (M13.GE.1) THEN
CH60='Tuned by Professor, hep-ph/0907.2973'
WRITE(M11,5030) CH60
CH60='Physics Model: '//
& 'T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
CH60='CR by P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
ENDIF
C...Perugia 2011 Tunes (350-359)
ELSEIF(ITUNE.GE.350.AND.ITUNE.LE.359) THEN
IF (M13.GE.1) THEN
CH60='Tuned by P. Skands, hep-ph/1005.3457'
WRITE(M11,5030) CH60
CH60='Physics Model: '//
& 'T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
CH60='CR by P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
IF (ITUNE.EQ.355) THEN
CH70='NB! This tune requires MRST LO** pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ELSEIF (ITUNE.EQ.356) THEN
CH70='NB! This tune requires CTEQ6L1 pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ENDIF
ENDIF
C...Schulz-Skands Tunes (360-365)
ELSEIF(ITUNE.GE.360.AND.ITUNE.LE.365) THEN
IF (M13.GE.1) THEN
CH60='Tuned by H. Schulz & P. Skands, MCNET-11-07'
WRITE(M11,5030) CH60
CH60='Based on Perugia 0, hep-ph/1005.3457'
WRITE(M11,5030) CH60
CH60='Physics Model: '//
& 'T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
CH60='CR by P. Skands & D. Wicke, hep-ph/0703081'
WRITE(M11,5030) CH60
ENDIF
ENDIF
C...Output
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
IF (MSTP(33).GE.10) THEN
WRITE(M11,5050) 32, PARP(32), CHPARP(32)
ENDIF
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
IF (MSTP(3).EQ.1) THEN
WRITE(M11,6100) 112, MSTU(112), CHMSTU(112)
WRITE(M11,6110) 112, PARU(112), CHPARU(112)
WRITE(M11,5050) 1, PARP(1) , CHPARP( 1)
ENDIF
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
IF (MSTP(3).EQ.1) THEN
WRITE(M11,5050) 72, PARP(72) , CHPARP( 72)
WRITE(M11,5050) 61, PARP(61) , CHPARP( 61)
ENDIF
WRITE(M11,5040) 64, MSTP(64), CHMSTP(64)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5040) 67, MSTP(67), CHMSTP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 72, MSTP(72), CHMSTP(72)
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5040) 70, MSTP(70), CHMSTP(70)
IF (MSTP(70).EQ.0) THEN
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
ELSEIF (MSTP(70).EQ.1) THEN
WRITE(M11,5050) 81, PARP(81), CHPARP(62)
CH60='(Note: PARP(81) replaces PARP(62).)'
WRITE(M11,5030) CH60
ENDIF
WRITE(M11,5060) 82, PARJ(82), CHPARJ(82)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
IF (MSTP(70).EQ.2) THEN
CH60='(Note: PARP(82) replaces PARP(62).)'
WRITE(M11,5030) CH60
ENDIF
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
IF (MSTP(82).EQ.5) THEN
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
ELSEIF (MSTP(82).EQ.4) THEN
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
ENDIF
WRITE(M11,5040) 88, MSTP(88), CHMSTP(88)
WRITE(M11,5040) 89, MSTP(89), CHMSTP(89)
WRITE(M11,5050) 79, PARP(79), CHPARP(79)
WRITE(M11,5050) 80, PARP(80), CHPARP(80)
WRITE(M11,5040) 91, MSTP(91), CHMSTP(91)
WRITE(M11,5050) 91, PARP(91), CHPARP(91)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
IF (MSTP(95).GE.1) THEN
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
IF (MSTP(95).GE.2) WRITE(M11,5050) 77, PARP(77), CHPARP(77)
ENDIF
ENDIF
C=======================================================================
C...ATLAS-CSC 11-parameter tune (By A. Moraes)
ELSEIF (ITUNE.EQ.306) THEN
IF (M13.GE.1) WRITE(M11,5010) ITUNE, CHNAME
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.405))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ENDIF
C...PDFs
MSTP(52) = 2
MSTP(54) = 2
MSTP(51) = 10042
MSTP(53) = 10042
C...ISR
C PARP(64) = 1D0
C...UE on, new model.
MSTP(81) = 21
C...Energy scaling
PARP(89) = 1800D0
PARP(90) = 0.22D0
C...Switch off trial joinings
MSTP(96) = 0
C...Primordial kT cutoff
IF (M13.GE.1) THEN
CH60='see presentations by A. Moraes (ATLAS),'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
WRITE(M11,5030) ' '
CH70='NB! This tune requires CTEQ6.1 pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ENDIF
C...Smooth ISR, low FSR
MSTP(70) = 2
MSTP(72) = 0
C...pT0
PARP(82) = 1.9D0
C...Transverse density profile.
MSTP(82) = 4
PARP(83) = 0.3D0
PARP(84) = 0.5D0
C...ISR & FSR in interactions after the first (default)
MSTP(84) = 1
MSTP(85) = 1
C...No double-counting (default)
MSTP(86) = 2
C...Companion quark parent gluon (1-x) power
MSTP(87) = 4
C...Primordial kT compensation along chaings (default = 0 : uniform)
MSTP(90) = 1
C...Colour Reconnections
MSTP(95) = 1
PARP(78) = 0.2D0
C...Lambda_FSR scale.
PARJ(81) = 0.23D0
C...Rap order, Valence qq, qq x enhc, BR-g-BR supp
MSTP(89) = 1
MSTP(88) = 0
C PARP(79) = 2D0
PARP(80) = 0.01D0
C...Peterson charm frag, and c and b hadr parameters
MSTJ(11) = 3
PARJ(54) = -0.07
PARJ(55) = -0.006
C... Output
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5040) 70, MSTP(70), CHMSTP(70)
WRITE(M11,5040) 72, MSTP(72), CHMSTP(72)
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
CH60='(Note: PARJ(81) changed from 0.14! See update notes)'
WRITE(M11,5030) CH60
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
WRITE(M11,5040) 88, MSTP(88), CHMSTP(88)
WRITE(M11,5040) 89, MSTP(89), CHMSTP(89)
WRITE(M11,5040) 90, MSTP(90), CHMSTP(90)
WRITE(M11,5050) 79, PARP(79), CHPARP(79)
WRITE(M11,5050) 80, PARP(80), CHPARP(80)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
ENDIF
C=======================================================================
C...Tunes A, AW, BW, DW, DWT, QW, D6, D6T (by R.D. Field, CDF)
C...(100-105,108-109), ATLAS-DC2 Tune (by A. Moraes, ATLAS) (106)
C...A-Pro, DW-Pro, etc (100-119), and Pro-Q2O (129)
ELSEIF ((ITUNE.GE.100.AND.ITUNE.LE.106).OR.ITUNE.EQ.108.OR.
& ITUNE.EQ.109.OR.(ITUNE.GE.110.AND.ITUNE.LE.116).OR.
& ITUNE.EQ.118.OR.ITUNE.EQ.119.OR.ITUNE.EQ.129) THEN
IF (M13.GE.1.AND.ITUNE.NE.106.AND.ITUNE.NE.129) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='see R.D. Field, in hep-ph/0610012'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
IF (ITUNE.GE.110.AND.ITUNE.LE.119) THEN
CH60='LEP parameters tuned by Professor, hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ELSEIF (M13.GE.1.AND.ITUNE.EQ.129) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='Tuned by Professor, hep-ph/0907.2973'
WRITE(M11,5030) CH60
CH60='Physics Model: '//
& 'T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
ENDIF
C...Make sure we start from old default fragmentation parameters
PARJ(81) = 0.29
PARJ(82) = 1.0
C...Use Professor's LEP pars if ITUNE >= 110
C...(i.e., for A-Pro, DW-Pro etc)
IF (ITUNE.LT.110) THEN
C...# Old defaults
MSTJ(11) = 4
PARJ(1) = 0.1
PARJ(2) = 0.3
PARJ(3) = 0.40
PARJ(4) = 0.05
PARJ(11) = 0.5
PARJ(12) = 0.6
PARJ(21) = 0.36
PARJ(41) = 0.30
PARJ(42) = 0.58
PARJ(46) = 1.0
PARJ(81) = 0.29
PARJ(82) = 1.0
ELSE
C...# Tuned flavour parameters:
PARJ(1) = 0.073
PARJ(2) = 0.2
PARJ(3) = 0.94
PARJ(4) = 0.032
PARJ(11) = 0.31
PARJ(12) = 0.4
PARJ(13) = 0.54
PARJ(25) = 0.63
PARJ(26) = 0.12
C...# Switch on Bowler:
MSTJ(11) = 5
C...# Fragmentation
PARJ(21) = 0.325
PARJ(41) = 0.5
PARJ(42) = 0.6
PARJ(47) = 0.67
PARJ(81) = 0.29
PARJ(82) = 1.65
ENDIF
C...Remove middle digit now for Professor variants, since identical pars
ITUNEB=ITUNE
IF (ITUNE.GE.110.AND.ITUNE.LE.119) THEN
ITUNEB=(ITUNE/100)*100+MOD(ITUNE,10)
ENDIF
C...Multiple interactions on, old framework
MSTP(81) = 1
C...Fast IR cutoff energy scaling by default
PARP(89) = 1800D0
PARP(90) = 0.25D0
C...Default CTEQ5L (internal), except for QW: CTEQ61 (external)
MSTP(51) = 7
MSTP(52) = 1
IF (ITUNEB.EQ.105) THEN
MSTP(51) = 10150
MSTP(52) = 2
ELSEIF(ITUNEB.EQ.108.OR.ITUNEB.EQ.109) THEN
MSTP(52) = 2
MSTP(54) = 2
MSTP(51) = 10042
MSTP(53) = 10042
ENDIF
C...Double Gaussian matter distribution.
MSTP(82) = 4
PARP(83) = 0.5D0
PARP(84) = 0.4D0
C...FSR activity.
PARP(71) = 4D0
C...Fragmentation functions and c and b parameters
C...(only if not using Professor)
IF (ITUNE.LE.109) THEN
MSTJ(11) = 4
PARJ(54) = -0.05
PARJ(55) = -0.005
ENDIF
C...Tune A and AW
IF(ITUNEB.EQ.100.OR.ITUNEB.EQ.101) THEN
C...pT0.
PARP(82) = 2.0D0
c...String drawing almost completely minimizes string length.
PARP(85) = 0.9D0
PARP(86) = 0.95D0
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1D0
PARP(64) = 1D0
PARP(67) = 4D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 1D0
PARP(93) = 5D0
C...AW : higher ISR IR cutoff, but also larger alphaS, more intrinsic kT
IF (ITUNEB.EQ.101) THEN
PARP(62) = 1.25D0
PARP(64) = 0.2D0
PARP(91) = 2.1D0
PARP(92) = 15.0D0
ENDIF
C...Tune BW (larger alphaS, more intrinsic kT. Smaller ISR phase space)
ELSEIF (ITUNEB.EQ.102) THEN
C...pT0.
PARP(82) = 1.9D0
c...String drawing completely minimizes string length.
PARP(85) = 1.0D0
PARP(86) = 1.0D0
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1.25D0
PARP(64) = 0.2D0
PARP(67) = 1D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 2.1D0
PARP(93) = 15D0
C...Tune DW
ELSEIF (ITUNEB.EQ.103) THEN
C...pT0.
PARP(82) = 1.9D0
c...String drawing completely minimizes string length.
PARP(85) = 1.0D0
PARP(86) = 1.0D0
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1.25D0
PARP(64) = 0.2D0
PARP(67) = 2.5D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 2.1D0
PARP(93) = 15D0
C...Tune DWT
ELSEIF (ITUNEB.EQ.104) THEN
C...pT0.
PARP(82) = 1.9409D0
C...Run II ref scale and slow scaling
PARP(89) = 1960D0
PARP(90) = 0.16D0
c...String drawing completely minimizes string length.
PARP(85) = 1.0D0
PARP(86) = 1.0D0
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1.25D0
PARP(64) = 0.2D0
PARP(67) = 2.5D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 2.1D0
PARP(93) = 15D0
C...Tune QW
ELSEIF(ITUNEB.EQ.105) THEN
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
CH70='NB! This tune requires CTEQ6.1 pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ENDIF
C...pT0.
PARP(82) = 1.1D0
c...String drawing completely minimizes string length.
PARP(85) = 1.0D0
PARP(86) = 1.0D0
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1.25D0
PARP(64) = 0.2D0
PARP(67) = 2.5D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 2.1D0
PARP(93) = 15D0
C...Tune D6 and D6T
ELSEIF(ITUNEB.EQ.108.OR.ITUNEB.EQ.109) THEN
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
CH70='NB! This tune requires CTEQ6L pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
ENDIF
C...The "Rick" proton, double gauss with 0.5/0.4
MSTP(82) = 4
PARP(83) = 0.5D0
PARP(84) = 0.4D0
c...String drawing completely minimizes string length.
PARP(85) = 1.0D0
PARP(86) = 1.0D0
IF (ITUNEB.EQ.108) THEN
C...D6: pT0, Run I ref scale, and fast energy scaling
PARP(82) = 1.8D0
PARP(89) = 1800D0
PARP(90) = 0.25D0
ELSE
C...D6T: pT0, Run II ref scale, and slow energy scaling
PARP(82) = 1.8387D0
PARP(89) = 1960D0
PARP(90) = 0.16D0
ENDIF
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1.25D0
PARP(64) = 0.2D0
PARP(67) = 2.5D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 2.1D0
PARP(93) = 15D0
C...Old ATLAS-DC2 5-parameter tune
ELSEIF(ITUNEB.EQ.106) THEN
IF (M13.GE.1) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='see A. Moraes et al., SN-ATLAS-2006-057,'
WRITE(M11,5030) CH60
CH60=' R. Field in hep-ph/0610012,'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
ENDIF
C... pT0.
PARP(82) = 1.8D0
C... Different ref and rescaling pacee
PARP(89) = 1000D0
PARP(90) = 0.16D0
C... Parameters of mass distribution
PARP(83) = 0.5D0
PARP(84) = 0.5D0
C... Old default string drawing
PARP(85) = 0.33D0
PARP(86) = 0.66D0
C... ISR, phase space equivalent to Tune B
PARP(62) = 1D0
PARP(64) = 1D0
PARP(67) = 1D0
C... FSR
PARP(71) = 4D0
C... Intrinsic kT
MSTP(91) = 1
PARP(91) = 1D0
PARP(93) = 5D0
C...Professor's Pro-Q2O Tune
ELSEIF(ITUNE.EQ.129) THEN
PARP(62) = 2.9
PARP(64) = 0.14
PARP(67) = 2.65
PARP(82) = 1.9
PARP(83) = 0.83
PARP(84) = 0.6
PARP(85) = 0.86
PARP(86) = 0.93
PARP(89) = 1800D0
PARP(90) = 0.22
MSTP(91) = 1
PARP(91) = 2.1
PARP(93) = 5.0
ENDIF
C... Output
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
WRITE(M11,5060) 82, PARJ(82), CHPARJ(82)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
WRITE(M11,5050) 85, PARP(85), CHPARP(85)
WRITE(M11,5050) 86, PARP(86), CHPARP(86)
WRITE(M11,5040) 91, MSTP(91), CHMSTP(91)
WRITE(M11,5050) 91, PARP(91), CHPARP(91)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
ENDIF
C=======================================================================
C... ACR, tune A with new CR (107)
ELSEIF(ITUNE.EQ.107.OR.ITUNE.EQ.117) THEN
IF (M13.GE.1) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='Tune A modified with new colour reconnections'
WRITE(M11,5030) CH60
CH60='PARP(85)=0D0 and amount of CR is regulated by PARP(78)'
WRITE(M11,5030) CH60
CH60='see P. Skands & D. Wicke, hep-ph/0703081,'
WRITE(M11,5030) CH60
CH60=' R. Field, in hep-ph/0610012 (Tune A),'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
IF (ITUNE.EQ.117) THEN
CH60='LEP parameters tuned by Professor, hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.406))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune. Using defaults.')
GOTO 100
ENDIF
C...Make sure we start from old default fragmentation parameters
PARJ(81) = 0.29
PARJ(82) = 1.0
C...Use Professor's LEP pars if ITUNE >= 110
C...(i.e., for A-Pro, DW-Pro etc)
IF (ITUNE.LT.110) THEN
C...# Old defaults
MSTJ(11) = 4
C...# Old default flavour parameters
PARJ(21) = 0.36
PARJ(41) = 0.30
PARJ(42) = 0.58
PARJ(46) = 1.0
PARJ(82) = 1.0
ELSE
C...# Tuned flavour parameters:
PARJ(1) = 0.073
PARJ(2) = 0.2
PARJ(3) = 0.94
PARJ(4) = 0.032
PARJ(11) = 0.31
PARJ(12) = 0.4
PARJ(13) = 0.54
PARJ(25) = 0.63
PARJ(26) = 0.12
C...# Switch on Bowler:
MSTJ(11) = 5
C...# Fragmentation
PARJ(21) = 0.325
PARJ(41) = 0.5
PARJ(42) = 0.6
PARJ(47) = 0.67
PARJ(81) = 0.29
PARJ(82) = 1.65
ENDIF
MSTP(81) = 1
PARP(89) = 1800D0
PARP(90) = 0.25D0
MSTP(82) = 4
PARP(83) = 0.5D0
PARP(84) = 0.4D0
MSTP(51) = 7
MSTP(52) = 1
PARP(71) = 4D0
PARP(82) = 2.0D0
PARP(85) = 0.0D0
PARP(86) = 0.66D0
PARP(62) = 1D0
PARP(64) = 1D0
PARP(67) = 4D0
MSTP(91) = 1
PARP(91) = 1D0
PARP(93) = 5D0
MSTP(95) = 6
C...P78 changed from 0.12 to 0.09 in 6.4.19 to improve <pT>(Nch)
PARP(78) = 0.09D0
C...Frag functions (only if not using Professor)
IF (ITUNE.LE.109) THEN
MSTJ(11) = 4
PARJ(54) = -0.05
PARJ(55) = -0.005
ENDIF
C...Output
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
WRITE(M11,5060) 82, PARJ(82), CHPARJ(82)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
WRITE(M11,5050) 85, PARP(85), CHPARP(85)
WRITE(M11,5050) 86, PARP(86), CHPARP(86)
WRITE(M11,5040) 91, MSTP(91), CHMSTP(91)
WRITE(M11,5050) 91, PARP(91), CHPARP(91)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
ENDIF
C=======================================================================
C...Intermediate model. Rap tune
C...(retuned to post-6.406 IR factorization)
ELSEIF(ITUNE.EQ.200) THEN
IF (M13.GE.1) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='see T. Sjostrand & P. Skands, JHEP03(2004)053'
WRITE(M11,5030) CH60
ENDIF
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.405))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ENDIF
C...PDF
MSTP(51) = 7
MSTP(52) = 1
C...ISR
PARP(62) = 1D0
PARP(64) = 1D0
PARP(67) = 4D0
C...FSR
PARP(71) = 4D0
PARJ(81) = 0.29D0
C...UE
MSTP(81) = 11
PARP(82) = 2.25D0
PARP(89) = 1800D0
PARP(90) = 0.25D0
C... ExpOfPow(1.8) overlap profile
MSTP(82) = 5
PARP(83) = 1.8D0
C... Valence qq
MSTP(88) = 0
C... Rap Tune
MSTP(89) = 1
C... Default diquark, BR-g-BR supp
PARP(79) = 2D0
PARP(80) = 0.01D0
C... Final state reconnect.
MSTP(95) = 1
PARP(78) = 0.55D0
C...Fragmentation functions and c and b parameters
MSTJ(11) = 4
PARJ(54) = -0.05
PARJ(55) = -0.005
C... Output
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5040) 88, MSTP(88), CHMSTP(88)
WRITE(M11,5040) 89, MSTP(89), CHMSTP(89)
WRITE(M11,5050) 79, PARP(79), CHPARP(79)
WRITE(M11,5050) 80, PARP(80), CHPARP(80)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
ENDIF
C...APT(201), APT-Pro (211), Perugia-APT (221), Perugia-APT6 (226).
C...Old model for ISR and UE, new pT-ordered model for FSR
ELSEIF(ITUNE.EQ.201.OR.ITUNE.EQ.211.OR.ITUNE.EQ.221.OR
& .ITUNE.EQ.226) THEN
IF (M13.GE.1) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='see P. Skands & D. Wicke, hep-ph/0703081 (Tune APT),'
WRITE(M11,5030) CH60
CH60=' R.D. Field, in hep-ph/0610012 (Tune A)'
WRITE(M11,5030) CH60
CH60=' T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & P. Skands, hep-ph/0408302'
WRITE(M11,5030) CH60
IF (ITUNE.EQ.211.OR.ITUNE.GE.221) THEN
CH60='LEP parameters tuned by Professor, hep-ph/0907.2973'
WRITE(M11,5030) CH60
ENDIF
ENDIF
IF (MSTP(181).LE.5.OR.(MSTP(181).EQ.6.AND.MSTP(182).LE.411))THEN
CALL PYERRM(9,'(PYTUNE:) linked PYTHIA version incompatible'//
& ' with tune.')
ENDIF
C...First set as if Pythia tune A
C...Multiple interactions on, old framework
MSTP(81) = 1
C...Fast IR cutoff energy scaling by default
PARP(89) = 1800D0
PARP(90) = 0.25D0
C...Default CTEQ5L (internal)
MSTP(51) = 7
MSTP(52) = 1
C...Double Gaussian matter distribution.
MSTP(82) = 4
PARP(83) = 0.5D0
PARP(84) = 0.4D0
C...FSR activity.
PARP(71) = 4D0
c...String drawing almost completely minimizes string length.
PARP(85) = 0.9D0
PARP(86) = 0.95D0
C...ISR cutoff, muR scale factor, and phase space size
PARP(62) = 1D0
PARP(64) = 1D0
PARP(67) = 4D0
C...Intrinsic kT, size, and max
MSTP(91) = 1
PARP(91) = 1D0
PARP(93) = 5D0
C...Use 2 GeV of primordial kT for "Perugia" version
IF (ITUNE.EQ.221) THEN
PARP(91) = 2D0
PARP(93) = 10D0
ENDIF
C...Use pT-ordered FSR
MSTJ(41) = 12
C...Lambda_FSR scale for pT-ordering
PARJ(81) = 0.23D0
C...Retune pT0 (changed from 2.1 to 2.05 in 6.4.20)
PARP(82) = 2.05D0
C...Fragmentation functions and c and b parameters
C...(overwritten for 211, i.e., if using Professor pars)
PARJ(54) = -0.05
PARJ(55) = -0.005
C...Use Professor's LEP pars if ITUNE == 211, 221, 226
IF (ITUNE.LT.210) THEN
C...# Old defaults
MSTJ(11) = 4
C...# Old default flavour parameters
PARJ(21) = 0.36
PARJ(41) = 0.30
PARJ(42) = 0.58
PARJ(46) = 1.0
PARJ(82) = 1.0
ELSE
C...# Tuned flavour parameters:
PARJ(1) = 0.073
PARJ(2) = 0.2
PARJ(3) = 0.94
PARJ(4) = 0.032
PARJ(11) = 0.31
PARJ(12) = 0.4
PARJ(13) = 0.54
PARJ(25) = 0.63
PARJ(26) = 0.12
C...# Always use pT-ordered shower:
MSTJ(41) = 12
C...# Switch on Bowler:
MSTJ(11) = 5
C...# Fragmentation
PARJ(21) = 3.1327e-01
PARJ(41) = 4.8989e-01
PARJ(42) = 1.2018e+00
PARJ(47) = 1.0000e+00
PARJ(81) = 2.5696e-01
PARJ(82) = 8.0000e-01
ENDIF
C...221, 226 : Perugia-APT and Perugia-APT6
IF (ITUNE.EQ.221.OR.ITUNE.EQ.226) THEN
PARP(64) = 0.5D0
PARP(82) = 2.05D0
PARP(90) = 0.26D0
PARP(91) = 2.0D0
C...The Perugia variants use Steve's showers off the old MPI
MSTP(152) = 1
C...And use a lower PARP(71) as suggested by Professor tunings
C...(although not certain that applies to Q2-pT2 hybrid)
PARP(71) = 2.5D0
C...Perugia-APT6 uses CTEQ6L1 and a slightly lower pT0
IF (ITUNE.EQ.226) THEN
CH70='NB! This tune requires CTEQ6L1 pdfs to be '//
& 'externally linked'
WRITE(M11,5035) CH70
MSTP(52) = 2
MSTP(51) = 10042
PARP(82) = 1.95D0
ENDIF
ENDIF
C... Output
IF (M13.GE.1) THEN
WRITE(M11,5030) ' '
WRITE(M11,5040) 51, MSTP(51), CHMSTP(51)
WRITE(M11,5040) 52, MSTP(52), CHMSTP(52)
WRITE(M11,5040) 3, MSTP( 3), CHMSTP( 3)
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
WRITE(M11,5050) 64, PARP(64), CHPARP(64)
WRITE(M11,5050) 67, PARP(67), CHPARP(67)
WRITE(M11,5040) 68, MSTP(68), CHMSTP(68)
CH60='(Note: MSTP(68) is not explicitly (re-)set by PYTUNE)'
WRITE(M11,5030) CH60
WRITE(M11,5070) 41, MSTJ(41), CHMSTJ(41)
WRITE(M11,5050) 71, PARP(71), CHPARP(71)
WRITE(M11,5060) 81, PARJ(81), CHPARJ(81)
WRITE(M11,5040) 33, MSTP(33), CHMSTP(33)
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5050) 89, PARP(89), CHPARP(89)
WRITE(M11,5050) 90, PARP(90), CHPARP(90)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
WRITE(M11,5050) 85, PARP(85), CHPARP(85)
WRITE(M11,5050) 86, PARP(86), CHPARP(86)
WRITE(M11,5040) 91, MSTP(91), CHMSTP(91)
WRITE(M11,5050) 91, PARP(91), CHPARP(91)
WRITE(M11,5050) 93, PARP(93), CHPARP(93)
ENDIF
C======================================================================
C...Uppsala models: Generalized Area Law and Soft Colour Interactions
ELSEIF(CHNAME.EQ.'GAL Tune 0'.OR.CHNAME.EQ.'GAL Tune 1') THEN
IF (M13.GE.1) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='see J. Rathsman, PLB452(1999)364'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
ENDIF
C...GAL Recommended settings from Uppsala web page
MSTP(95) = 13
PARP(78) = 0.10
MSTJ(16) = 0
PARJ(42) = 0.45
PARJ(82) = 2.0
PARP(62) = 2.0
MSTP(81) = 1
MSTP(82) = 1
PARP(81) = 1.9
MSTP(92) = 1
IF(CHNAME.EQ.'GAL Tune 1') THEN
C...GAL retune (P. Skands) to get better min-bias <Nch> at Tevatron
MSTP(82) = 4
PARP(83) = 0.25D0
PARP(84) = 0.5D0
PARP(82) = 1.75
IF (M13.GE.1) THEN
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 84, PARP(84), CHPARP(84)
ENDIF
ELSE
IF (M13.GE.1) THEN
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 81, PARP(81), CHPARP(81)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
ENDIF
ENDIF
C...Output
IF (M13.GE.1) THEN
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
WRITE(M11,5060) 82, PARJ(82), CHPARJ(82)
WRITE(M11,5040) 92, MSTP(92), CHMSTP(92)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
WRITE(M11,5060) 42, PARJ(42), CHPARJ(42)
WRITE(M11,5070) 16, MSTJ(16), CHMSTJ(16)
ENDIF
ELSEIF(CHNAME.EQ.'SCI Tune 0'.OR.CHNAME.EQ.'SCI Tune 1') THEN
IF (M13.GE.1) THEN
WRITE(M11,5010) ITUNE, CHNAME
CH60='see A.Edin et al, PLB366(1996)371, Z.Phys.C75(1997)57,'
WRITE(M11,5030) CH60
CH60='and T. Sjostrand & M. v. Zijl, PRD36(1987)2019'
WRITE(M11,5030) CH60
WRITE(M11,5030) ' '
CH70='NB! The SCI model must be run with modified '//
& 'Pythia v6.215:'
WRITE(M11,5035) CH70
CH70='available from http://www.isv.uu.se/thep/MC/scigal/'
WRITE(M11,5035) CH70
WRITE(M11,5030) ' '
ENDIF
C...SCI Recommended settings from Uppsala web page (as per 22/08 2006)
MSTP(81) = 1
MSTP(82) = 1
PARP(81) = 2.2
MSTP(92) = 1
MSTP(95) = 11
PARP(78) = 0.50
MSTJ(16) = 0
IF (CHNAME.EQ.'SCI Tune 1') THEN
C...SCI retune (P. Skands) to get better min-bias <Nch> at Tevatron
MSTP(81) = 1
MSTP(82) = 3
PARP(82) = 2.4
PARP(83) = 0.5D0
PARP(62) = 1.5
PARP(84) = 0.25D0
IF (M13.GE.1) THEN
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 82, PARP(82), CHPARP(82)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
WRITE(M11,5050) 83, PARP(83), CHPARP(83)
WRITE(M11,5050) 62, PARP(62), CHPARP(62)
ENDIF
ELSE
IF (M13.GE.1) THEN
WRITE(M11,5040) 81, MSTP(81), CHMSTP(81)
WRITE(M11,5050) 81, PARP(81), CHPARP(81)
WRITE(M11,5040) 82, MSTP(82), CHMSTP(82)
ENDIF
ENDIF
C...Output
IF (M13.GE.1) THEN
WRITE(M11,5040) 92, MSTP(92), CHMSTP(92)
WRITE(M11,5040) 95, MSTP(95), CHMSTP(95)
WRITE(M11,5050) 78, PARP(78), CHPARP(78)
WRITE(M11,5070) 16, MSTJ(16), CHMSTJ(16)
ENDIF
ELSE
IF (MSTU(13).GE.1) WRITE(M11,5020) ITUNE
ENDIF
C...Output of LEP parameters, common to all models
IF (M13.GE.1) THEN
WRITE(M11,5080)
WRITE(M11,5070) 11, MSTJ(11), CHMSTJ(11)
IF (MSTJ(11).EQ.3) THEN
CH60='Warning: using Peterson fragmentation function'
WRITE(M11,5030) CH60
ENDIF
WRITE(M11,5060) 1, PARJ( 1), CHPARJ( 1)
WRITE(M11,5060) 2, PARJ( 2), CHPARJ( 2)
WRITE(M11,5060) 3, PARJ( 3), CHPARJ( 3)
WRITE(M11,5060) 4, PARJ( 4), CHPARJ( 4)
WRITE(M11,5060) 5, PARJ( 5), CHPARJ( 5)
WRITE(M11,5060) 6, PARJ( 6), CHPARJ( 6)
WRITE(M11,5060) 7, PARJ( 7), CHPARJ( 7)
WRITE(M11,5060) 11, PARJ(11), CHPARJ(11)
WRITE(M11,5060) 12, PARJ(12), CHPARJ(12)
WRITE(M11,5060) 13, PARJ(13), CHPARJ(13)
WRITE(M11,5060) 21, PARJ(21), CHPARJ(21)
WRITE(M11,5060) 25, PARJ(25), CHPARJ(25)
WRITE(M11,5060) 26, PARJ(26), CHPARJ(26)
WRITE(M11,5060) 41, PARJ(41), CHPARJ(41)
WRITE(M11,5060) 42, PARJ(42), CHPARJ(42)
WRITE(M11,5060) 45, PARJ(45), CHPARJ(45)
IF (MSTJ(11).LE.3) THEN
WRITE(M11,5060) 54, PARJ(54), CHPARJ(54)
WRITE(M11,5060) 55, PARJ(55), CHPARJ(55)
ELSE
WRITE(M11,5060) 46, PARJ(46), CHPARJ(46)
ENDIF
IF (MSTJ(11).EQ.5) WRITE(M11,5060) 47, PARJ(47), CHPARJ(47)
ENDIF
100 IF (MSTU(13).GE.1) WRITE(M11,6000)
9999 RETURN
5000 FORMAT(1x,78('*')/' *',76x,'*'/' *',3x,'PYTUNE : ',
& 'Presets for underlying-event (and min-bias)',21x,'*'/' *',
& 12x,'Last Change : ',A8,' - P. Skands',30x,'*'/' *',76x,'*')
5010 FORMAT(' *',3x,I4,1x,A16,52x,'*')
5020 FORMAT(' *',3x,'Tune ',I4, ' not recognized. Using defaults.')
5030 FORMAT(' *',3x,10x,A60,3x,'*')
5035 FORMAT(' *',3x,A70,3x,'*')
5040 FORMAT(' *',5x,'MSTP(',I2,') = ',I12,3x,A42,3x,'*')
5050 FORMAT(' *',5x,'PARP(',I2,') = ',F12.4,3x,A40,5x,'*')
5060 FORMAT(' *',5x,'PARJ(',I2,') = ',F12.4,3x,A40,5x,'*')
5070 FORMAT(' *',5x,'MSTJ(',I2,') = ',I12,3x,A40,5x,'*')
5080 FORMAT(' *',3x,'----------------------------',42('-'),3x,'*')
6100 FORMAT(' *',5x,'MSTU(',I3,')= ',I12,3x,A42,3x,'*')
6110 FORMAT(' *',5x,'PARU(',I3,')= ',F12.4,3x,A42,3x,'*')
C 5140 FORMAT(' *',5x,'MSTP(',I3,')= ',I12,3x,A40,5x,'*')
C 5150 FORMAT(' *',5x,'PARP(',I3,')= ',F12.4,3x,A40,5x,'*')
6000 FORMAT(' *',76x,'*'/1x,32('*'),1x,'END OF PYTUNE',1x,31('*'))
6040 FORMAT(' *',5x,'MSWI(',I1,') = ',I12,3x,A40,5x,'*')
6050 FORMAT(' *',5x,'PARSCI(',I1,')= ',F12.4,3x,A40,5x,'*')
END
|
{-
This second-order signature was created from the following second-order syntax description:
syntax Monad | M
type
T : 1-ary
term
ret : α -> T α
bind : T α α.(T β) -> T β | _>>=_ r10
theory
(LU) a : α b : α.(T β) |> bind (ret(a), x. b[x]) = b[a]
(RU) t : T α |> bind (t, x. ret(x)) = t
(AS) t : T α b : α.(T β) c : β.(T γ) |> bind (bind (t, x.b[x]), y.c[y]) = bind (t, x. bind (b[x], y.c[y]))
-}
module Monad.Signature where
open import SOAS.Context
-- Type declaration
data MT : Set where
T : MT → MT
open import SOAS.Syntax.Signature MT public
open import SOAS.Syntax.Build MT public
-- Operator symbols
data Mₒ : Set where
retₒ : {α : MT} → Mₒ
bindₒ : {α β : MT} → Mₒ
-- Term signature
M:Sig : Signature Mₒ
M:Sig = sig λ
{ (retₒ {α}) → (⊢₀ α) ⟼₁ T α
; (bindₒ {α}{β}) → (⊢₀ T α) , (α ⊢₁ T β) ⟼₂ T β
}
open Signature M:Sig public
|
using Sigma
using Distributions
import Sigma: Exponential, uniform, mvexponential
# Data
λreal = 1.5
n = 10
data = rand(Exponential(λreal),n)
λ = uniform(0,2)
x = mvexponential(λ, n)
observations = x == data
posterior_samples = rand(λ, observations, 10) |
State Before: 𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type ?u.127337
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type ?u.127432
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
hf : HasFDerivAtFilter f f' x L
hg : HasFDerivAtFilter g g' x L
x✝ : E
⊢ f x✝ - f x - ↑f' (x✝ - x) + (g x✝ - g x - ↑g' (x✝ - x)) =
(fun y => f y + g y) x✝ - (fun y => f y + g y) x - ↑(f' + g') (x✝ - x) State After: 𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type ?u.127337
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type ?u.127432
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
hf : HasFDerivAtFilter f f' x L
hg : HasFDerivAtFilter g g' x L
x✝ : E
⊢ f x✝ - f x - (↑f' x✝ - ↑f' x) + (g x✝ - g x - (↑g' x✝ - ↑g' x)) =
f x✝ + g x✝ - (f x + g x) - (↑f' x✝ + ↑g' x✝ - (↑f' x + ↑g' x)) Tactic: simp only [LinearMap.sub_apply, LinearMap.add_apply, map_sub, map_add, add_apply] State Before: 𝕜 : Type u_1
inst✝⁸ : NontriviallyNormedField 𝕜
E : Type u_2
inst✝⁷ : NormedAddCommGroup E
inst✝⁶ : NormedSpace 𝕜 E
F : Type u_3
inst✝⁵ : NormedAddCommGroup F
inst✝⁴ : NormedSpace 𝕜 F
G : Type ?u.127337
inst✝³ : NormedAddCommGroup G
inst✝² : NormedSpace 𝕜 G
G' : Type ?u.127432
inst✝¹ : NormedAddCommGroup G'
inst✝ : NormedSpace 𝕜 G'
f f₀ f₁ g : E → F
f' f₀' f₁' g' e : E →L[𝕜] F
x : E
s t : Set E
L L₁ L₂ : Filter E
hf : HasFDerivAtFilter f f' x L
hg : HasFDerivAtFilter g g' x L
x✝ : E
⊢ f x✝ - f x - (↑f' x✝ - ↑f' x) + (g x✝ - g x - (↑g' x✝ - ↑g' x)) =
f x✝ + g x✝ - (f x + g x) - (↑f' x✝ + ↑g' x✝ - (↑f' x + ↑g' x)) State After: no goals Tactic: abel |
-- ----------------------------------------------------------------- [ Set.idr ]
-- Module : Set.idr
-- Copyright : (c) 2015,2016 See CONTRIBUTORS.md
-- License : see LICENSE
-- --------------------------------------------------------------------- [ EOH ]
||| Implementation of a Set using an AVL Binary Search Tree.
module Data.AVL.Set
import Data.AVL
%access export
-- ------------------------------------------------------------- [ Definitions ]
||| An ordered set.
data Set : (a : Type) -> Type where
MkSet : {a : Type} -> AVLTree n a Unit -> Set a
||| Return a empty set.
empty : (Ord a) => Set a
empty = MkSet (Element Empty AVLEmpty)
||| Insert an element into a set.
insert : (Ord a) => a -> Set a -> Set a
insert a (MkSet m) = MkSet (snd $ AVL.API.insert a () m)
||| Does the set contain the given element.
contains : (Ord a) => a -> Set a -> Bool
contains a (MkSet m) = isJust (lookup a m)
||| Construct a set that contains all elements in both of the input sets.
union : (Ord a) => Set a -> Set a -> Set a
union (MkSet m1) (MkSet m2) = MkSet (snd $ AVL.API.foldr insertElement (_ ** m1) m2)
where
insertElement : (Ord a) => a
-> Unit
-> (h : Nat ** AVLTree h a Unit)
-> (h' : Nat ** AVLTree h' a Unit)
insertElement k v m' = AVL.API.insert k v (snd m')
||| Return the size of the Dictionary.
size : Set a -> Nat
size (MkSet m) = AVL.API.size m
||| Construct a set that contains the elements from the first input
||| set but not the second.
|||
||| *Note* Not an efficient operation as we are constructing a new set
||| instead of modifying the right one.
difference : (Ord a) => Set a -> Set a -> Set a
difference (MkSet m1) s2 = AVL.API.foldr (\e,_,t => if (contains e s2) then t else Set.insert e t) empty $ m1
||| Construct a set that contains common elements of the input sets.
intersection : (Ord a) => Set a -> Set a -> Set a
intersection s1 s2 = difference s1 (difference s1 s2)
||| Construct a list using the given set.
toList : Set a -> List a
toList (MkSet m) = map fst $ AVL.API.toList m
||| Construct a set from the given list.
fromList : (Ord a) => List a -> Set a
fromList xs = (foldl (\t,k => Set.insert k t) empty xs)
-- --------------------------------------------------------- [ Implementations ]
Foldable Set where
foldr f i (MkSet m) = foldr (\x,_,p => f x p) i m
Eq a => Eq (Set a) where
(==) (MkSet (Element t _)) (MkSet (Element t' _)) = t == t'
Show a => Show (Set a) where
show s = "{ " ++ (unwords . intersperse "," . map show . Set.toList $ s) ++ " }"
namespace Predicate
export
data Elem : (value : type) -> (set : Set type) -> Type where
IsElem : (prf : HasKey value tree)
-> Elem value (MkSet tree)
private
elemNotInSet : (prfIsNotElem : HasKey value tree -> Void) -> Elem value (MkSet tree) -> Void
elemNotInSet prfIsNotElem (IsElem prf) = prfIsNotElem prf
isElem : DecEq type
=> (value : type)
-> (set : Set type)
-> Dec (Elem value set)
isElem value (MkSet tree) with (isKey value tree)
isElem value (MkSet tree) | (Yes prf) = Yes (IsElem prf)
isElem value (MkSet tree) | (No prfIsNotElem) = No (elemNotInSet prfIsNotElem)
namespace Quantifier
data All : (predicate : type -> Type) -> (set : Set type) -> Type where
Satisfies : (prf : AllKeys p tree) -> All p (MkSet tree)
-- --------------------------------------------------------------------- [ EOF ]
|
[STATEMENT]
lemma match_ex_table_stack_xliftD:
"match_ex_table P C pc (stack_xlift n xt) = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> d \<ge> n \<and> match_ex_table P C pc xt = \<lfloor>(pc', d - n)\<rfloor>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. match_ex_table P C pc (stack_xlift n xt) = \<lfloor>(pc', d)\<rfloor> \<Longrightarrow> n \<le> d \<and> match_ex_table P C pc xt = \<lfloor>(pc', d - n)\<rfloor>
[PROOF STEP]
by(simp) |
||| WIP: same as Data.List.Quantifiers but for lazy lists
module Data.List.Lazy.Quantifiers
import Data.DPair
import Data.List.Lazy
%default total
namespace Any
-- Note: it is crucial here that we mark `xs` as `Lazy`, otherwise Idris
-- will happily use `Delay` in the return index and give us a badly-behaved
-- family!
public export
data Any : (p : a -> Type) -> (xs : LazyList a) -> Type where
Here : {0 xs : Lazy (LazyList a)} -> p x -> Any p (x :: xs)
There : {0 xs : Lazy (LazyList a)} -> Any p xs -> Any p (x :: xs)
public export
toExists : Any p xs -> Exists p
toExists (Here prf) = Evidence _ prf
toExists (There p) = toExists p
public export
toDPair : {xs : LazyList a} -> Any p xs -> DPair a p
toDPair (Here prf) = (_ ** prf)
toDPair (There p) = toDPair p
|
/-
Copyright (c) 2021 Thomas Browning. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Thomas Browning
-/
import algebra.big_operators.order
import combinatorics.hall.basic
import data.fintype.card
import set_theory.cardinal.finite
/-!
# Configurations of Points and lines
This file introduces abstract configurations of points and lines, and proves some basic properties.
## Main definitions
* `configuration.nondegenerate`: Excludes certain degenerate configurations,
and imposes uniqueness of intersection points.
* `configuration.has_points`: A nondegenerate configuration in which
every pair of lines has an intersection point.
* `configuration.has_lines`: A nondegenerate configuration in which
every pair of points has a line through them.
* `configuration.line_count`: The number of lines through a given point.
* `configuration.point_count`: The number of lines through a given line.
## Main statements
* `configuration.has_lines.card_le`: `has_lines` implies `|P| ≤ |L|`.
* `configuration.has_points.card_le`: `has_points` implies `|L| ≤ |P|`.
* `configuration.has_lines.has_points`: `has_lines` and `|P| = |L|` implies `has_points`.
* `configuration.has_points.has_lines`: `has_points` and `|P| = |L|` implies `has_lines`.
Together, these four statements say that any two of the following properties imply the third:
(a) `has_lines`, (b) `has_points`, (c) `|P| = |L|`.
-/
open_locale big_operators
namespace configuration
universe u
variables (P L : Type u) [has_mem P L]
/-- A type synonym. -/
def dual := P
instance [this : inhabited P] : inhabited (dual P) := this
instance [this : fintype P] : fintype (dual P) := this
instance : has_mem (dual L) (dual P) :=
⟨function.swap (has_mem.mem : P → L → Prop)⟩
/-- A configuration is nondegenerate if:
1) there does not exist a line that passes through all of the points,
2) there does not exist a point that is on all of the lines,
3) there is at most one line through any two points,
4) any two lines have at most one intersection point.
Conditions 3 and 4 are equivalent. -/
class nondegenerate : Prop :=
(exists_point : ∀ l : L, ∃ p, p ∉ l)
(exists_line : ∀ p, ∃ l : L, p ∉ l)
(eq_or_eq : ∀ {p₁ p₂ : P} {l₁ l₂ : L}, p₁ ∈ l₁ → p₂ ∈ l₁ → p₁ ∈ l₂ → p₂ ∈ l₂ → p₁ = p₂ ∨ l₁ = l₂)
/-- A nondegenerate configuration in which every pair of lines has an intersection point. -/
class has_points extends nondegenerate P L : Type u :=
(mk_point : ∀ {l₁ l₂ : L} (h : l₁ ≠ l₂), P)
(mk_point_ax : ∀ {l₁ l₂ : L} (h : l₁ ≠ l₂), mk_point h ∈ l₁ ∧ mk_point h ∈ l₂)
/-- A nondegenerate configuration in which every pair of points has a line through them. -/
class has_lines extends nondegenerate P L : Type u :=
(mk_line : ∀ {p₁ p₂ : P} (h : p₁ ≠ p₂), L)
(mk_line_ax : ∀ {p₁ p₂ : P} (h : p₁ ≠ p₂), p₁ ∈ mk_line h ∧ p₂ ∈ mk_line h)
open nondegenerate has_points has_lines
instance [nondegenerate P L] : nondegenerate (dual L) (dual P) :=
{ exists_point := @exists_line P L _ _,
exists_line := @exists_point P L _ _,
eq_or_eq := λ l₁ l₂ p₁ p₂ h₁ h₂ h₃ h₄, (@eq_or_eq P L _ _ p₁ p₂ l₁ l₂ h₁ h₃ h₂ h₄).symm }
instance [has_points P L] : has_lines (dual L) (dual P) :=
{ mk_line := @mk_point P L _ _,
mk_line_ax := λ _ _, mk_point_ax }
instance [has_lines P L] : has_points (dual L) (dual P) :=
{ mk_point := @mk_line P L _ _,
mk_point_ax := λ _ _, mk_line_ax }
lemma has_points.exists_unique_point [has_points P L] (l₁ l₂ : L) (hl : l₁ ≠ l₂) :
∃! p, p ∈ l₁ ∧ p ∈ l₂ :=
⟨mk_point hl, mk_point_ax hl,
λ p hp, (eq_or_eq hp.1 (mk_point_ax hl).1 hp.2 (mk_point_ax hl).2).resolve_right hl⟩
lemma has_lines.exists_unique_line [has_lines P L] (p₁ p₂ : P) (hp : p₁ ≠ p₂) :
∃! l : L, p₁ ∈ l ∧ p₂ ∈ l :=
has_points.exists_unique_point (dual L) (dual P) p₁ p₂ hp
variables {P L}
/-- If a nondegenerate configuration has at least as many points as lines, then there exists
an injective function `f` from lines to points, such that `f l` does not lie on `l`. -/
lemma nondegenerate.exists_injective_of_card_le [nondegenerate P L]
[fintype P] [fintype L] (h : fintype.card L ≤ fintype.card P) :
∃ f : L → P, function.injective f ∧ ∀ l, (f l) ∉ l :=
begin
classical,
let t : L → finset P := λ l, (set.to_finset {p | p ∉ l}),
suffices : ∀ s : finset L, s.card ≤ (s.bUnion t).card, -- Hall's marriage theorem
{ obtain ⟨f, hf1, hf2⟩ := (finset.all_card_le_bUnion_card_iff_exists_injective t).mp this,
exact ⟨f, hf1, λ l, set.mem_to_finset.mp (hf2 l)⟩ },
intro s,
by_cases hs₀ : s.card = 0, -- If `s = ∅`, then `s.card = 0 ≤ (s.bUnion t).card`
{ simp_rw [hs₀, zero_le] },
by_cases hs₁ : s.card = 1, -- If `s = {l}`, then pick a point `p ∉ l`
{ obtain ⟨l, rfl⟩ := finset.card_eq_one.mp hs₁,
obtain ⟨p, hl⟩ := exists_point l,
rw [finset.card_singleton, finset.singleton_bUnion, nat.one_le_iff_ne_zero],
exact finset.card_ne_zero_of_mem (set.mem_to_finset.mpr hl) },
suffices : (s.bUnion t)ᶜ.card ≤ sᶜ.card, -- Rephrase in terms of complements (uses `h`)
{ rw [finset.card_compl, finset.card_compl, tsub_le_iff_left] at this,
replace := h.trans this,
rwa [←add_tsub_assoc_of_le s.card_le_univ, le_tsub_iff_left
(le_add_left s.card_le_univ), add_le_add_iff_right] at this },
have hs₂ : (s.bUnion t)ᶜ.card ≤ 1, -- At most one line through two points of `s`
{ refine finset.card_le_one_iff.mpr (λ p₁ p₂ hp₁ hp₂, _),
simp_rw [finset.mem_compl, finset.mem_bUnion, exists_prop, not_exists, not_and,
set.mem_to_finset, set.mem_set_of_eq, not_not] at hp₁ hp₂,
obtain ⟨l₁, l₂, hl₁, hl₂, hl₃⟩ :=
finset.one_lt_card_iff.mp (nat.one_lt_iff_ne_zero_and_ne_one.mpr ⟨hs₀, hs₁⟩),
exact (eq_or_eq (hp₁ l₁ hl₁) (hp₂ l₁ hl₁) (hp₁ l₂ hl₂) (hp₂ l₂ hl₂)).resolve_right hl₃ },
by_cases hs₃ : sᶜ.card = 0,
{ rw [hs₃, nat.le_zero_iff],
rw [finset.card_compl, tsub_eq_zero_iff_le, has_le.le.le_iff_eq (finset.card_le_univ _),
eq_comm, finset.card_eq_iff_eq_univ] at hs₃ ⊢,
rw hs₃,
rw finset.eq_univ_iff_forall at hs₃ ⊢,
exact λ p, exists.elim (exists_line p) -- If `s = univ`, then show `s.bUnion t = univ`
(λ l hl, finset.mem_bUnion.mpr ⟨l, finset.mem_univ l, set.mem_to_finset.mpr hl⟩) },
{ exact hs₂.trans (nat.one_le_iff_ne_zero.mpr hs₃) }, -- If `s < univ`, then consequence of `hs₂`
end
variables {P} (L)
/-- Number of points on a given line. -/
noncomputable def line_count (p : P) : ℕ := nat.card {l : L // p ∈ l}
variables (P) {L}
/-- Number of lines through a given point. -/
noncomputable def point_count (l : L) : ℕ := nat.card {p : P // p ∈ l}
variables (P L)
lemma sum_line_count_eq_sum_point_count [fintype P] [fintype L] :
∑ p : P, line_count L p = ∑ l : L, point_count P l :=
begin
classical,
simp only [line_count, point_count, nat.card_eq_fintype_card, ←fintype.card_sigma],
apply fintype.card_congr,
calc (Σ p, {l : L // p ∈ l}) ≃ {x : P × L // x.1 ∈ x.2} :
(equiv.subtype_prod_equiv_sigma_subtype (∈)).symm
... ≃ {x : L × P // x.2 ∈ x.1} : (equiv.prod_comm P L).subtype_equiv (λ x, iff.rfl)
... ≃ (Σ l, {p // p ∈ l}) : equiv.subtype_prod_equiv_sigma_subtype (λ (l : L) (p : P), p ∈ l),
end
variables {P L}
lemma has_lines.point_count_le_line_count [has_lines P L] {p : P} {l : L} (h : p ∉ l)
[fintype {l : L // p ∈ l}] : point_count P l ≤ line_count L p :=
begin
by_cases hf : infinite {p : P // p ∈ l},
{ exactI (le_of_eq nat.card_eq_zero_of_infinite).trans (zero_le (line_count L p)) },
haveI := fintype_of_not_infinite hf,
rw [line_count, point_count, nat.card_eq_fintype_card, nat.card_eq_fintype_card],
have : ∀ p' : {p // p ∈ l}, p ≠ p' := λ p' hp', h ((congr_arg (∈ l) hp').mpr p'.2),
exact fintype.card_le_of_injective (λ p', ⟨mk_line (this p'), (mk_line_ax (this p')).1⟩)
(λ p₁ p₂ hp, subtype.ext ((eq_or_eq p₁.2 p₂.2 (mk_line_ax (this p₁)).2
((congr_arg _ (subtype.ext_iff.mp hp)).mpr (mk_line_ax (this p₂)).2)).resolve_right
(λ h', (congr_arg _ h').mp h (mk_line_ax (this p₁)).1))),
end
lemma has_points.line_count_le_point_count [has_points P L] {p : P} {l : L} (h : p ∉ l)
[hf : fintype {p : P // p ∈ l}] : line_count L p ≤ point_count P l :=
@has_lines.point_count_le_line_count (dual L) (dual P) _ _ l p h hf
variables (P L)
/-- If a nondegenerate configuration has a unique line through any two points, then `|P| ≤ |L|`. -/
lemma has_lines.card_le [has_lines P L] [fintype P] [fintype L] :
fintype.card P ≤ fintype.card L :=
begin
classical,
by_contradiction hc₂,
obtain ⟨f, hf₁, hf₂⟩ := nondegenerate.exists_injective_of_card_le (le_of_not_le hc₂),
have := calc ∑ p, line_count L p = ∑ l, point_count P l : sum_line_count_eq_sum_point_count P L
... ≤ ∑ l, line_count L (f l) :
finset.sum_le_sum (λ l hl, has_lines.point_count_le_line_count (hf₂ l))
... = ∑ p in finset.univ.image f, line_count L p :
finset.sum_bij (λ l hl, f l) (λ l hl, finset.mem_image_of_mem f hl) (λ l hl, rfl)
(λ l₁ l₂ hl₁ hl₂ hl₃, hf₁ hl₃) (λ p, by simp_rw [finset.mem_image, eq_comm, imp_self])
... < ∑ p, line_count L p : _,
{ exact lt_irrefl _ this },
{ obtain ⟨p, hp⟩ := not_forall.mp (mt (fintype.card_le_of_surjective f) hc₂),
refine finset.sum_lt_sum_of_subset ((finset.univ.image f).subset_univ) (finset.mem_univ p)
_ _ (λ p hp₁ hp₂, zero_le (line_count L p)),
{ simpa only [finset.mem_image, exists_prop, finset.mem_univ, true_and] },
{ rw [line_count, nat.card_eq_fintype_card, fintype.card_pos_iff],
obtain ⟨l, hl⟩ := @exists_line P L _ _ p,
exact let this := not_exists.mp hp l in ⟨⟨mk_line this, (mk_line_ax this).2⟩⟩ } },
end
/-- If a nondegenerate configuration has a unique point on any two lines, then `|L| ≤ |P|`. -/
lemma has_points.card_le [has_points P L] [fintype P] [fintype L] :
fintype.card L ≤ fintype.card P :=
@has_lines.card_le (dual L) (dual P) _ _ _ _
variables {P L}
lemma has_lines.exists_bijective_of_card_eq [has_lines P L]
[fintype P] [fintype L] (h : fintype.card P = fintype.card L) :
∃ f : L → P, function.bijective f ∧ ∀ l, point_count P l = line_count L (f l) :=
begin
classical,
obtain ⟨f, hf1, hf2⟩ := nondegenerate.exists_injective_of_card_le (ge_of_eq h),
have hf3 := (fintype.bijective_iff_injective_and_card f).mpr ⟨hf1, h.symm⟩,
refine ⟨f, hf3, λ l, (finset.sum_eq_sum_iff_of_le
(by exact λ l hl, has_lines.point_count_le_line_count (hf2 l))).mp
((sum_line_count_eq_sum_point_count P L).symm.trans ((finset.sum_bij (λ l hl, f l)
(λ l hl, finset.mem_univ (f l)) (λ l hl, refl (line_count L (f l)))
(λ l₁ l₂ hl₁ hl₂ hl, hf1 hl) (λ p hp, _)).symm)) l (finset.mem_univ l)⟩,
obtain ⟨l, rfl⟩ := hf3.2 p,
exact ⟨l, finset.mem_univ l, rfl⟩,
end
lemma has_lines.line_count_eq_point_count [has_lines P L] [fintype P] [fintype L]
(hPL : fintype.card P = fintype.card L) {p : P} {l : L} (hpl : p ∉ l) :
line_count L p = point_count P l :=
begin
classical,
obtain ⟨f, hf1, hf2⟩ := has_lines.exists_bijective_of_card_eq hPL,
let s : finset (P × L) := set.to_finset {i | i.1 ∈ i.2},
have step1 : ∑ i : P × L, line_count L i.1 = ∑ i : P × L, point_count P i.2,
{ rw [←finset.univ_product_univ, finset.sum_product_right, finset.sum_product],
simp_rw [finset.sum_const, finset.card_univ, hPL, sum_line_count_eq_sum_point_count] },
have step2 : ∑ i in s, line_count L i.1 = ∑ i in s, point_count P i.2,
{ rw [s.sum_finset_product finset.univ (λ p, set.to_finset {l | p ∈ l})],
rw [s.sum_finset_product_right finset.univ (λ l, set.to_finset {p | p ∈ l})],
refine (finset.sum_bij (λ l hl, f l) (λ l hl, finset.mem_univ (f l)) (λ l hl, _)
(λ _ _ _ _ h, hf1.1 h) (λ p hp, _)).symm,
{ simp_rw [finset.sum_const, set.to_finset_card, ←nat.card_eq_fintype_card],
change (point_count P l) • (point_count P l) = (line_count L (f l)) • (line_count L (f l)),
rw hf2 },
{ obtain ⟨l, hl⟩ := hf1.2 p,
exact ⟨l, finset.mem_univ l, hl.symm⟩ },
all_goals { simp_rw [finset.mem_univ, true_and, set.mem_to_finset], exact λ p, iff.rfl } },
have step3 : ∑ i in sᶜ, line_count L i.1 = ∑ i in sᶜ, point_count P i.2,
{ rwa [←s.sum_add_sum_compl, ←s.sum_add_sum_compl, step2, add_left_cancel_iff] at step1 },
rw ← set.to_finset_compl at step3,
exact ((finset.sum_eq_sum_iff_of_le (by exact λ i hi, has_lines.point_count_le_line_count
(set.mem_to_finset.mp hi))).mp step3.symm (p, l) (set.mem_to_finset.mpr hpl)).symm,
end
lemma has_points.line_count_eq_point_count [has_points P L] [fintype P] [fintype L]
(hPL : fintype.card P = fintype.card L) {p : P} {l : L} (hpl : p ∉ l) :
line_count L p = point_count P l :=
(@has_lines.line_count_eq_point_count (dual L) (dual P) _ _ _ _ hPL.symm l p hpl).symm
/-- If a nondegenerate configuration has a unique line through any two points, and if `|P| = |L|`,
then there is a unique point on any two lines. -/
noncomputable def has_lines.has_points [has_lines P L] [fintype P] [fintype L]
(h : fintype.card P = fintype.card L) : has_points P L :=
let this : ∀ l₁ l₂ : L, l₁ ≠ l₂ → ∃ p : P, p ∈ l₁ ∧ p ∈ l₂ := λ l₁ l₂ hl, begin
classical,
obtain ⟨f, hf1, hf2⟩ := has_lines.exists_bijective_of_card_eq h,
haveI : nontrivial L := ⟨⟨l₁, l₂, hl⟩⟩,
haveI := fintype.one_lt_card_iff_nontrivial.mp ((congr_arg _ h).mpr fintype.one_lt_card),
have h₁ : ∀ p : P, 0 < line_count L p := λ p, exists.elim (exists_ne p) (λ q hq, (congr_arg _
nat.card_eq_fintype_card).mpr (fintype.card_pos_iff.mpr ⟨⟨mk_line hq, (mk_line_ax hq).2⟩⟩)),
have h₂ : ∀ l : L, 0 < point_count P l := λ l, (congr_arg _ (hf2 l)).mpr (h₁ (f l)),
obtain ⟨p, hl₁⟩ := fintype.card_pos_iff.mp ((congr_arg _ nat.card_eq_fintype_card).mp (h₂ l₁)),
by_cases hl₂ : p ∈ l₂, exact ⟨p, hl₁, hl₂⟩,
have key' : fintype.card {q : P // q ∈ l₂} = fintype.card {l : L // p ∈ l},
{ exact ((has_lines.line_count_eq_point_count h hl₂).trans nat.card_eq_fintype_card).symm.trans
nat.card_eq_fintype_card, },
have : ∀ q : {q // q ∈ l₂}, p ≠ q := λ q hq, hl₂ ((congr_arg (∈ l₂) hq).mpr q.2),
let f : {q : P // q ∈ l₂} → {l : L // p ∈ l} := λ q, ⟨mk_line (this q), (mk_line_ax (this q)).1⟩,
have hf : function.injective f := λ q₁ q₂ hq, subtype.ext ((eq_or_eq q₁.2 q₂.2
(mk_line_ax (this q₁)).2 ((congr_arg _ (subtype.ext_iff.mp hq)).mpr (mk_line_ax
(this q₂)).2)).resolve_right (λ h, (congr_arg _ h).mp hl₂ (mk_line_ax (this q₁)).1)),
have key' := ((fintype.bijective_iff_injective_and_card f).mpr ⟨hf, key'⟩).2,
obtain ⟨q, hq⟩ := key' ⟨l₁, hl₁⟩,
exact ⟨q, (congr_arg _ (subtype.ext_iff.mp hq)).mp (mk_line_ax (this q)).2, q.2⟩,
end in
{ mk_point := λ l₁ l₂ hl, classical.some (this l₁ l₂ hl),
mk_point_ax := λ l₁ l₂ hl, classical.some_spec (this l₁ l₂ hl) }
/-- If a nondegenerate configuration has a unique point on any two lines, and if `|P| = |L|`,
then there is a unique line through any two points. -/
noncomputable def has_points.has_lines [has_points P L] [fintype P] [fintype L]
(h : fintype.card P = fintype.card L) : has_lines P L :=
let this := @has_lines.has_points (dual L) (dual P) _ _ _ _ h.symm in
{ mk_line := this.mk_point,
mk_line_ax := this.mk_point_ax }
variables (P L)
/-- A projective plane is a nondegenerate configuration in which every pair of lines has
an intersection point, every pair of points has a line through them,
and which has three points in general position. -/
class projective_plane extends nondegenerate P L : Type u :=
(mk_point : ∀ {l₁ l₂ : L} (h : l₁ ≠ l₂), P)
(mk_point_ax : ∀ {l₁ l₂ : L} (h : l₁ ≠ l₂), mk_point h ∈ l₁ ∧ mk_point h ∈ l₂)
(mk_line : ∀ {p₁ p₂ : P} (h : p₁ ≠ p₂), L)
(mk_line_ax : ∀ {p₁ p₂ : P} (h : p₁ ≠ p₂), p₁ ∈ mk_line h ∧ p₂ ∈ mk_line h)
(exists_config : ∃ (p₁ p₂ p₃ : P) (l₁ l₂ l₃ : L), p₁ ∉ l₂ ∧ p₁ ∉ l₃ ∧
p₂ ∉ l₁ ∧ p₂ ∈ l₂ ∧ p₂ ∈ l₃ ∧ p₃ ∉ l₁ ∧ p₃ ∈ l₂ ∧ p₃ ∉ l₃)
namespace projective_plane
@[priority 100] -- see Note [lower instance priority]
instance has_points [h : projective_plane P L] : has_points P L := { .. h }
@[priority 100] -- see Note [lower instance priority]
instance has_lines [h : projective_plane P L] : has_lines P L := { .. h }
instance [projective_plane P L] : projective_plane (dual L) (dual P) :=
{ mk_line := @mk_point P L _ _,
mk_line_ax := λ _ _, mk_point_ax,
mk_point := @mk_line P L _ _,
mk_point_ax := λ _ _, mk_line_ax,
exists_config := by
{ obtain ⟨p₁, p₂, p₃, l₁, l₂, l₃, h₁₂, h₁₃, h₂₁, h₂₂, h₂₃, h₃₁, h₃₂, h₃₃⟩ :=
@exists_config P L _ _,
exact ⟨l₁, l₂, l₃, p₁, p₂, p₃, h₂₁, h₃₁, h₁₂, h₂₂, h₃₂, h₁₃, h₂₃, h₃₃⟩ },
.. dual.nondegenerate P L }
/-- The order of a projective plane is one less than the number of lines through an arbitrary point.
Equivalently, it is one less than the number of points on an arbitrary line. -/
noncomputable def order [projective_plane P L] : ℕ :=
line_count L (classical.some (@exists_config P L _ _)) - 1
variables [fintype P] [fintype L]
lemma card_points_eq_card_lines [projective_plane P L] : fintype.card P = fintype.card L :=
le_antisymm (has_lines.card_le P L) (has_points.card_le P L)
variables {P} (L)
lemma line_count_eq_line_count [projective_plane P L] (p q : P) :
line_count L p = line_count L q :=
begin
obtain ⟨p₁, p₂, p₃, l₁, l₂, l₃, h₁₂, h₁₃, h₂₁, h₂₂, h₂₃, h₃₁, h₃₂, h₃₃⟩ := exists_config,
have h := card_points_eq_card_lines P L,
let n := line_count L p₂,
have hp₂ : line_count L p₂ = n := rfl,
have hl₁ : point_count P l₁ = n := (has_lines.line_count_eq_point_count h h₂₁).symm.trans hp₂,
have hp₃ : line_count L p₃ = n := (has_lines.line_count_eq_point_count h h₃₁).trans hl₁,
have hl₃ : point_count P l₃ = n := (has_lines.line_count_eq_point_count h h₃₃).symm.trans hp₃,
have hp₁ : line_count L p₁ = n := (has_lines.line_count_eq_point_count h h₁₃).trans hl₃,
have hl₂ : point_count P l₂ = n := (has_lines.line_count_eq_point_count h h₁₂).symm.trans hp₁,
suffices : ∀ p : P, line_count L p = n, { exact (this p).trans (this q).symm },
refine λ p, or_not.elim (λ h₂, _) (λ h₂, (has_lines.line_count_eq_point_count h h₂).trans hl₂),
refine or_not.elim (λ h₃, _) (λ h₃, (has_lines.line_count_eq_point_count h h₃).trans hl₃),
rwa (eq_or_eq h₂ h₂₂ h₃ h₂₃).resolve_right (λ h, h₃₃ ((congr_arg (has_mem.mem p₃) h).mp h₃₂)),
end
variables (P) {L}
lemma point_count_eq_point_count [projective_plane P L] (l m : L) :
point_count P l = point_count P m :=
line_count_eq_line_count (dual P) l m
variables {P L}
lemma line_count_eq_point_count [projective_plane P L] (p : P) (l : L) :
line_count L p = point_count P l :=
exists.elim (exists_point l) (λ q hq, (line_count_eq_line_count L p q).trans
(has_lines.line_count_eq_point_count (card_points_eq_card_lines P L) hq))
variables (P L)
lemma dual.order [projective_plane P L] : order (dual L) (dual P) = order P L :=
congr_arg (λ n, n - 1) (line_count_eq_point_count _ _)
variables {P} (L)
lemma line_count_eq [projective_plane P L] (p : P) : line_count L p = order P L + 1 :=
begin
classical,
obtain ⟨q, -, -, l, -, -, -, -, h, -⟩ := classical.some_spec (@exists_config P L _ _),
rw [order, line_count_eq_line_count L p q, line_count_eq_line_count L (classical.some _) q,
line_count, nat.card_eq_fintype_card, nat.sub_add_cancel],
exact fintype.card_pos_iff.mpr ⟨⟨l, h⟩⟩,
end
variables (P) {L}
lemma point_count_eq [projective_plane P L] (l : L) : point_count P l = order P L + 1 :=
(line_count_eq (dual P) l).trans (congr_arg (λ n, n + 1) (dual.order P L))
variables (P L)
lemma one_lt_order [projective_plane P L] : 1 < order P L :=
begin
obtain ⟨p₁, p₂, p₃, l₁, l₂, l₃, -, -, h₂₁, h₂₂, h₂₃, h₃₁, h₃₂, h₃₃⟩ := @exists_config P L _ _,
classical,
rw [←add_lt_add_iff_right, ←point_count_eq, point_count, nat.card_eq_fintype_card],
simp_rw [fintype.two_lt_card_iff, ne, subtype.ext_iff],
have h := mk_point_ax (λ h, h₂₁ ((congr_arg _ h).mpr h₂₂)),
exact ⟨⟨mk_point _, h.2⟩, ⟨p₂, h₂₂⟩, ⟨p₃, h₃₂⟩,
ne_of_mem_of_not_mem h.1 h₂₁, ne_of_mem_of_not_mem h.1 h₃₁, ne_of_mem_of_not_mem h₂₃ h₃₃⟩,
end
variables {P} (L)
lemma two_lt_line_count [projective_plane P L] (p : P) : 2 < line_count L p :=
by simpa only [line_count_eq L p, nat.succ_lt_succ_iff] using one_lt_order P L
variables (P) {L}
lemma two_lt_point_count [projective_plane P L] (l : L) : 2 < point_count P l :=
by simpa only [point_count_eq P l, nat.succ_lt_succ_iff] using one_lt_order P L
variables (P) (L)
lemma card_points [projective_plane P L] : fintype.card P = order P L ^ 2 + order P L + 1 :=
begin
obtain ⟨p, -⟩ := @exists_config P L _ _,
let ϕ : {q // q ≠ p} ≃ Σ (l : {l : L // p ∈ l}), {q // q ∈ l.1 ∧ q ≠ p} :=
{ to_fun := λ q, ⟨⟨mk_line q.2, (mk_line_ax q.2).2⟩, q, (mk_line_ax q.2).1, q.2⟩,
inv_fun := λ lq, ⟨lq.2, lq.2.2.2⟩,
left_inv := λ q, subtype.ext rfl,
right_inv := λ lq, sigma.subtype_ext (subtype.ext ((eq_or_eq (mk_line_ax lq.2.2.2).1
(mk_line_ax lq.2.2.2).2 lq.2.2.1 lq.1.2).resolve_left lq.2.2.2)) rfl },
classical,
have h1 : fintype.card {q // q ≠ p} + 1 = fintype.card P,
{ apply (eq_tsub_iff_add_eq_of_le (nat.succ_le_of_lt (fintype.card_pos_iff.mpr ⟨p⟩))).mp,
convert (fintype.card_subtype_compl _).trans (congr_arg _ (fintype.card_subtype_eq p)) },
have h2 : ∀ l : {l : L // p ∈ l}, fintype.card {q // q ∈ l.1 ∧ q ≠ p} = order P L,
{ intro l,
rw [←fintype.card_congr (equiv.subtype_subtype_equiv_subtype_inter _ _),
fintype.card_subtype_compl (λ (x : subtype (∈ l.val)), x.val = p), ←nat.card_eq_fintype_card],
refine tsub_eq_of_eq_add ((point_count_eq P l.1).trans _),
rw ← fintype.card_subtype_eq (⟨p, l.2⟩ : {q : P // q ∈ l.1}),
simp_rw subtype.ext_iff_val },
simp_rw [←h1, fintype.card_congr ϕ, fintype.card_sigma, h2, finset.sum_const, finset.card_univ],
rw [←nat.card_eq_fintype_card, ←line_count, line_count_eq, smul_eq_mul, nat.succ_mul, sq],
end
lemma card_lines [projective_plane P L] : fintype.card L = order P L ^ 2 + order P L + 1 :=
(card_points (dual L) (dual P)).trans (congr_arg (λ n, n ^ 2 + n + 1) (dual.order P L))
end projective_plane
end configuration
|
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : Preadditive C
X Y Z : C
r : ℕ
f : X ⟶ Y
g : Y ⟶ Z
⊢ (r • f) ≫ g = r • f ≫ g
[PROOFSTEP]
exact (Preadditive.rightComp X g).map_nsmul f r
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : Preadditive C
X Y Z : C
f : X ⟶ Y
r : ℕ
g : Y ⟶ Z
⊢ f ≫ (r • g) = r • f ≫ g
[PROOFSTEP]
exact (Preadditive.leftComp Z f).map_nsmul g r
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : Preadditive C
X Y Z : C
r : ℤ
f : X ⟶ Y
g : Y ⟶ Z
⊢ (r • f) ≫ g = r • f ≫ g
[PROOFSTEP]
exact (Preadditive.rightComp X g).map_zsmul f r
[GOAL]
C : Type u
inst✝¹ : Category.{v, u} C
inst✝ : Preadditive C
X Y Z : C
f : X ⟶ Y
r : ℤ
g : Y ⟶ Z
⊢ f ≫ (r • g) = r • f ≫ g
[PROOFSTEP]
exact (Preadditive.leftComp Z f).map_zsmul g r
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
R : Type w
inst✝¹ : Semiring R
inst✝ : Linear R C
X : C
⊢ Module R (End X)
[PROOFSTEP]
dsimp [End]
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
R : Type w
inst✝¹ : Semiring R
inst✝ : Linear R C
X : C
⊢ Module R (X ⟶ X)
[PROOFSTEP]
infer_instance
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
R : Type w
inst✝¹ : Semiring R
inst✝ : Linear R C
X Y Z : C
f : X ⟶ Y
⊢ ∀ (x y : Y ⟶ Z), (fun g => f ≫ g) (x + y) = (fun g => f ≫ g) x + (fun g => f ≫ g) y
[PROOFSTEP]
simp
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
R : Type w
inst✝¹ : Semiring R
inst✝ : Linear R C
X Y Z : C
f : X ⟶ Y
⊢ ∀ (r : R) (x : Y ⟶ Z),
AddHom.toFun { toFun := fun g => f ≫ g, map_add' := (_ : ∀ (a a_1 : Y ⟶ Z), f ≫ (a + a_1) = f ≫ a + f ≫ a_1) }
(r • x) =
↑(RingHom.id R) r •
AddHom.toFun { toFun := fun g => f ≫ g, map_add' := (_ : ∀ (a a_1 : Y ⟶ Z), f ≫ (a + a_1) = f ≫ a + f ≫ a_1) } x
[PROOFSTEP]
simp
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
R : Type w
inst✝¹ : Semiring R
inst✝ : Linear R C
X Y Z : C
g : Y ⟶ Z
⊢ ∀ (x y : X ⟶ Y), (fun f => f ≫ g) (x + y) = (fun f => f ≫ g) x + (fun f => f ≫ g) y
[PROOFSTEP]
simp
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
R : Type w
inst✝¹ : Semiring R
inst✝ : Linear R C
X Y Z : C
g : Y ⟶ Z
⊢ ∀ (r : R) (x : X ⟶ Y),
AddHom.toFun { toFun := fun f => f ≫ g, map_add' := (_ : ∀ (a a_1 : X ⟶ Y), (a + a_1) ≫ g = a ≫ g + a_1 ≫ g) }
(r • x) =
↑(RingHom.id R) r •
AddHom.toFun { toFun := fun f => f ≫ g, map_add' := (_ : ∀ (a a_1 : X ⟶ Y), (a + a_1) ≫ g = a ≫ g + a_1 ≫ g) } x
[PROOFSTEP]
simp
[GOAL]
C : Type u
inst✝⁵ : Category.{v, u} C
inst✝⁴ : Preadditive C
R : Type w
inst✝³ : Semiring R
inst✝² : Linear R C
X Y : C
f : X ⟶ Y
inst✝¹ : Epi f
r : R
inst✝ : Invertible r
Z✝ : C
g g' : Y ⟶ Z✝
H : (r • f) ≫ g = (r • f) ≫ g'
⊢ g = g'
[PROOFSTEP]
rw [smul_comp, smul_comp, ← comp_smul, ← comp_smul, cancel_epi] at H
[GOAL]
C : Type u
inst✝⁵ : Category.{v, u} C
inst✝⁴ : Preadditive C
R : Type w
inst✝³ : Semiring R
inst✝² : Linear R C
X Y : C
f : X ⟶ Y
inst✝¹ : Epi f
r : R
inst✝ : Invertible r
Z✝ : C
g g' : Y ⟶ Z✝
H : r • g = r • g'
⊢ g = g'
[PROOFSTEP]
simpa [smul_smul] using congr_arg (fun f => ⅟r • f) H
[GOAL]
C : Type u
inst✝⁵ : Category.{v, u} C
inst✝⁴ : Preadditive C
R : Type w
inst✝³ : Semiring R
inst✝² : Linear R C
X Y : C
f : X ⟶ Y
inst✝¹ : Mono f
r : R
inst✝ : Invertible r
Z✝ : C
g g' : Z✝ ⟶ X
H : g ≫ (r • f) = g' ≫ (r • f)
⊢ g = g'
[PROOFSTEP]
rw [comp_smul, comp_smul, ← smul_comp, ← smul_comp, cancel_mono] at H
[GOAL]
C : Type u
inst✝⁵ : Category.{v, u} C
inst✝⁴ : Preadditive C
R : Type w
inst✝³ : Semiring R
inst✝² : Linear R C
X Y : C
f : X ⟶ Y
inst✝¹ : Mono f
r : R
inst✝ : Invertible r
Z✝ : C
g g' : Z✝ ⟶ X
H : r • g = r • g'
⊢ g = g'
[PROOFSTEP]
simpa [smul_smul] using congr_arg (fun f => ⅟r • f) H
[GOAL]
C✝ : Type u
inst✝⁷ : Category.{v, u} C✝
inst✝⁶ : Preadditive C✝
R : Type w
inst✝⁵ : Semiring R
inst✝⁴ : Linear R C✝
k : Type u_1
C : Type u_2
inst✝³ : Category.{?u.35990, u_2} C
inst✝² : Semiring k
inst✝¹ : Preadditive C
inst✝ : Linear k C
X Y W Z : C
f₁ : X ≅ Y
f₂ : W ≅ Z
src✝ : (X ⟶ W) →ₗ[k] Y ⟶ Z := comp (rightComp k Y f₂.hom) (leftComp k W f₁.symm.hom)
x : X ⟶ W
⊢ ↑(comp (leftComp k W f₁.hom) (rightComp k Y f₂.symm.hom))
(AddHom.toFun
{ toAddHom := src✝.toAddHom,
map_smul' :=
(_ :
∀ (r : k) (x : X ⟶ W),
AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id k) r • AddHom.toFun src✝.toAddHom x) }.toAddHom
x) =
x
[PROOFSTEP]
simp only [Iso.symm_hom, LinearMap.toFun_eq_coe, LinearMap.coe_comp, Function.comp_apply, leftComp_apply,
rightComp_apply, Category.assoc, Iso.hom_inv_id, Category.comp_id, Iso.hom_inv_id_assoc]
[GOAL]
C✝ : Type u
inst✝⁷ : Category.{v, u} C✝
inst✝⁶ : Preadditive C✝
R : Type w
inst✝⁵ : Semiring R
inst✝⁴ : Linear R C✝
k : Type u_1
C : Type u_2
inst✝³ : Category.{?u.35990, u_2} C
inst✝² : Semiring k
inst✝¹ : Preadditive C
inst✝ : Linear k C
X Y W Z : C
f₁ : X ≅ Y
f₂ : W ≅ Z
src✝ : (X ⟶ W) →ₗ[k] Y ⟶ Z := comp (rightComp k Y f₂.hom) (leftComp k W f₁.symm.hom)
x : Y ⟶ Z
⊢ AddHom.toFun
{ toAddHom := src✝.toAddHom,
map_smul' :=
(_ :
∀ (r : k) (x : X ⟶ W),
AddHom.toFun src✝.toAddHom (r • x) = ↑(RingHom.id k) r • AddHom.toFun src✝.toAddHom x) }.toAddHom
(↑(comp (leftComp k W f₁.hom) (rightComp k Y f₂.symm.hom)) x) =
x
[PROOFSTEP]
simp only [Iso.symm_hom, LinearMap.coe_comp, Function.comp_apply, rightComp_apply, leftComp_apply,
LinearMap.toFun_eq_coe, Iso.inv_hom_id_assoc, Category.assoc, Iso.inv_hom_id, Category.comp_id]
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
S : Type w
inst✝¹ : CommSemiring S
inst✝ : Linear S C
X Y Z : C
⊢ ∀ (x y : X ⟶ Y), (fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y
[PROOFSTEP]
intros
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
S : Type w
inst✝¹ : CommSemiring S
inst✝ : Linear S C
X Y Z : C
x✝ y✝ : X ⟶ Y
⊢ (fun f => leftComp S Z f) (x✝ + y✝) = (fun f => leftComp S Z f) x✝ + (fun f => leftComp S Z f) y✝
[PROOFSTEP]
ext
[GOAL]
case h
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
S : Type w
inst✝¹ : CommSemiring S
inst✝ : Linear S C
X Y Z : C
x✝¹ y✝ : X ⟶ Y
x✝ : Y ⟶ Z
⊢ ↑((fun f => leftComp S Z f) (x✝¹ + y✝)) x✝ = ↑((fun f => leftComp S Z f) x✝¹ + (fun f => leftComp S Z f) y✝) x✝
[PROOFSTEP]
simp
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
S : Type w
inst✝¹ : CommSemiring S
inst✝ : Linear S C
X Y Z : C
⊢ ∀ (r : S) (x : X ⟶ Y),
AddHom.toFun
{ toFun := fun f => leftComp S Z f,
map_add' :=
(_ :
∀ (x y : X ⟶ Y),
(fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y) }
(r • x) =
↑(RingHom.id S) r •
AddHom.toFun
{ toFun := fun f => leftComp S Z f,
map_add' :=
(_ :
∀ (x y : X ⟶ Y),
(fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y) }
x
[PROOFSTEP]
intros
[GOAL]
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
S : Type w
inst✝¹ : CommSemiring S
inst✝ : Linear S C
X Y Z : C
r✝ : S
x✝ : X ⟶ Y
⊢ AddHom.toFun
{ toFun := fun f => leftComp S Z f,
map_add' :=
(_ :
∀ (x y : X ⟶ Y),
(fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y) }
(r✝ • x✝) =
↑(RingHom.id S) r✝ •
AddHom.toFun
{ toFun := fun f => leftComp S Z f,
map_add' :=
(_ :
∀ (x y : X ⟶ Y),
(fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y) }
x✝
[PROOFSTEP]
ext
[GOAL]
case h
C : Type u
inst✝³ : Category.{v, u} C
inst✝² : Preadditive C
S : Type w
inst✝¹ : CommSemiring S
inst✝ : Linear S C
X Y Z : C
r✝ : S
x✝¹ : X ⟶ Y
x✝ : Y ⟶ Z
⊢ ↑(AddHom.toFun
{ toFun := fun f => leftComp S Z f,
map_add' :=
(_ :
∀ (x y : X ⟶ Y),
(fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y) }
(r✝ • x✝¹))
x✝ =
↑(↑(RingHom.id S) r✝ •
AddHom.toFun
{ toFun := fun f => leftComp S Z f,
map_add' :=
(_ :
∀ (x y : X ⟶ Y),
(fun f => leftComp S Z f) (x + y) = (fun f => leftComp S Z f) x + (fun f => leftComp S Z f) y) }
x✝¹)
x✝
[PROOFSTEP]
simp
|
State Before: M : Type u_2
A : Type ?u.45573
B : Type ?u.45576
inst✝ : MulOneClass M
ι : Sort u_1
S : ι → Submonoid M
i : ι
⊢ ∀ {x : M}, x ∈ S i → x ∈ iSup S State After: M : Type u_2
A : Type ?u.45573
B : Type ?u.45576
inst✝ : MulOneClass M
ι : Sort u_1
S : ι → Submonoid M
i : ι
⊢ S i ≤ iSup S Tactic: rw [←SetLike.le_def] State Before: M : Type u_2
A : Type ?u.45573
B : Type ?u.45576
inst✝ : MulOneClass M
ι : Sort u_1
S : ι → Submonoid M
i : ι
⊢ S i ≤ iSup S State After: no goals Tactic: exact le_iSup _ _ |
namespace induction
open nat
-- Exercise: 2 stars, recommended (basic_induction)
theorem mult_0_r : ∀ n, n * 0 = 0
| 0 := rfl
| (nat.succ n) := rfl
theorem plus_n_Sm : ∀ n m, succ (n + m) = n + (succ m) :=
λ n m, nat.rec_on m
(show succ (n + 0) = n + 1, from rfl)
(λ m' ih,
show succ (n + succ m') = n + succ (succ m'), from rfl)
theorem plus_comm : ∀ n m : ℕ, n + m = m + n
| 0 m := by rw [add_zero, zero_add]
| (succ n) m := by rw [succ_add, add_succ, plus_comm]
theorem plus_assoc : ∀ n m p : ℕ, n + (m + p) = (n + m) + p
| 0 m p := by rw [zero_add, zero_add]
| (succ n) m p := by rw [succ_add, succ_add, succ_add, add_assoc]
-- Exercise: 2 stars (double_plus)
def double : ℕ → ℕ
| 0 := 0
| (succ n) := succ (succ (double n)).
lemma double_plus : ∀ n, double n = n + n
| 0 := rfl
| (succ n) := by rw [add_succ, succ_add, ←double_plus]; refl
-- Exercise: 2 stars, optional (evenb_S)
def evenb : ℕ → bool
| 0 := tt
| (succ 0) := ff
| (succ (succ n')) := evenb n'
theorem evenb_S : ∀ n : ℕ, evenb (succ n) = bnot (evenb n)
| 0 := rfl
| (succ n') := by simp [evenb]; rw [evenb_S, bnot_bnot]
end induction |
% **********************************************************************
% Author: Ajahn Chah
% Translator:
% Title: Monastery of Confusion
% First published: Everything is Teaching Us
% Comment:
% Copyright: Permission granted by Wat Pah Nanachat to reprint for free distribution
% **********************************************************************
% Notes on the text:
% A large section of this Dhamma talk has previously been published under the title `Free From Doubt'
% **********************************************************************
\chapterFootnote{\textit{Note}: This talk has been published elsewhere under the title: `\textit{Free From Doubt}'}
\chapter{Monastery of Confusion}
\index[general]{monks!attitude towards laypeople}
\index[general]{laypeople}
\vspace*{\baselineskip}
\dropcaps{S}{taying or going is not important,} but our thinking is. So all of you, please work together, cooperate and live in harmony. This should be the legacy you create here at Wat Pah Nanachat Bung Wai, the International Forest Monastery of Bung Wai District. Don't let it become Wat Pah Nanachat \textit{Woon Wai}, the International Forest Monastery of Confusion and Trouble.\footnote{One of Ajahn Chah's favourite plays on words.} Whoever comes to stay here should be helping create this legacy.
The way I see it, the laypeople are providing robes material, almsfood, the dwelling place, and medicines in appropriate measure. It's true that they are simple country folk, but they support you out of their faith as best they can. Don't get carried away with your ideas of how you think they should be, such as, `Oh, I try to teach these laypeople, but they do make me upset. Today is the observance day, and they came to take precepts. Then tomorrow they'll go casting their fishing nets. They'll drink their whisky. They do these things right out there where anyone can see. Then the next observance day, they'll come again. They'll take the precepts and listen to the Dhamma talk again, and then they'll go to put out their nets again, kill animals again, and drink again.'
You can get pretty upset thinking like this. You'll think that your activities with the laypeople don't bring any benefit at all. Today they take the precepts, and tomorrow they go cast fishing nets. A monk without much wisdom might get discouraged and feel he's failed, thinking his work bears no fruit. But it's not that his efforts have no result; it's those laypeople who get no result. Of course there is some good result from making efforts at virtue. So when there is such a situation and we start to suffer over it, what should we do?
\index[general]{perseverance}
We contemplate within ourselves to recognize that our good intentions have brought some benefit and do have meaning. It's just that the spiritual faculties of those people aren't developed. They aren't strong yet. That's how it is for now, so we patiently continue to advise them. If we just give up on such people, they are likely to become worse than they are now. If we keep at it, they may come to maturity one day and recognize their unskilful actions. Then they will feel some remorse and start to be ashamed of doing such things.
\index[general]{generosity!virtues of}
\looseness=1
Right now, they have the faith to support us with material offerings, giving us our requisites for living. I've considered this; it's quite a big deal. It's no small thing. Donating our food, our dwellings, the medicines to treat our illnesses, is not a small thing. We are practising for the attainment of \glsdisp{nibbana}{Nibb\=ana.} If we don't have any food to eat, that will be pretty difficult. How would we sit in meditation? How would we be able to build this monastery?
\index[similes]{buying/selling medicine!attitude towards laity}
We should recognize when people's spiritual faculties are not yet mature. So what should we do? We are like someone selling medicine. You've probably seen or heard them driving around with their loudspeakers touting the different medicines they have for different maladies. People who have bad headaches or poor digestion might come to buy.
We can accept money from those who buy our medicine; we don't take money from someone who doesn't buy anything. We can feel glad about the people who do buy something. If others stay in their houses and don't come out to buy, we shouldn't get angry with them for that. We shouldn't criticize them.
\index[general]{teaching!laypeople}
If we teach people but they can't practise properly, we shouldn't be getting angry with them. Don't do that! Don't criticize them, but rather keep on instructing them and leading them along. Whenever their faculties have ripened sufficiently, then they will want to do it. Just like when we are selling medicine, we just keep on doing our business. When people have ailments that trouble them, they will buy. Those who don't see a need to buy medicine probably aren't suffering from any such conditions. So never mind.
Keeping at it with this attitude, these problems will be done with. There were such situations in the Buddha's time too.
\index[general]{p\=aram\={\i}}
\index[similes]{ripening fruit!p\=aram\={\i}}
We want to do it right, but somehow we can't get there yet; our own faculties are not sufficiently mature. Our \pali{\glsdisp{parami}{p\=aram\={\i}}} are not complete. It's like fruit that's still growing on the tree. You can't force it to be sweet -- it's still unripe, it's small and sour, simply because it hasn't finished growing. You can't force it to be bigger, to be sweet, to be ripe -- you have to let it ripen according to its nature. As time passes and things change, people may come to spiritual maturity. As time passes the fruit will grow, ripen and sweeten of its own accord. With such an attitude you can be at ease. But if you are impatient and dissatisfied, you keep asking, `Why isn't this mango sweet yet? Why is it sour?' It's still sour because it's not ripe. That's the nature of fruit.
\index[similes]{four kinds of lotus!people in the world}
The people in the world are like that. It makes me think of the Buddha's teaching about four kinds of lotus. Some are still in the mud, some have grown out of the mud but are under the water, some are at the surface of the water, and some have risen above the water and blossomed. The Buddha was able to give his teachings to so many various beings because he understood their different levels of spiritual development. We should think about this and not feel oppressed by what happens here. Just consider yourselves to be like someone selling medicine. Your responsibility is to advertise it and make it available. If someone gets sick, they are likely to come and buy it. Likewise, if people's spiritual faculties mature sufficiently, one day they are likely to develop faith. It's not something we can force them to do. Seeing it in this way, we will be okay.
\index[general]{Buddha, the!recollection of}
Living here in this monastery is certainly meaningful. It's not without benefit. All of you, please practise together harmoniously and amicably. When you experience obstacles and suffering, recollect the virtues of the Buddha. What was the knowledge the Buddha realized? What did the Buddha teach? What does the Dhamma point out? How does the Sa\.ngha practise? Constantly recollecting the qualities of the Three Jewels brings a lot of benefit.
\index[general]{example!setting an}
Whether you are Thais or people from other countries is not important. It's important to maintain harmony and work together. People come from all over to visit this monastery. When folks come to Wat Pah Pong, I urge them to come here, to see the monastery, to practise here. It's a legacy you are creating. It seems that the populace have faith and are gladdened by it. So don't forget yourselves. You should be leading people rather than being led by them. Make your best efforts to practise well and establish yourselves firmly, and good results will come.
Are there any doubts about practice you need to resolve now?
\section{Questions and Answers}
\index[general]{hindrances!sloth and torpor}
\index[general]{meditation!advice}
\index[general]{meditation!all postures}
\noindent\qaitem{Question}: When the mind isn't thinking much, but is in a sort of dark and dull state, is there something we should do to brighten it? Or should we just sit with it?
\noindent\qaitem{Answer}: Is this all the time or when you are sitting in meditation? What exactly is this darkness like? Is it a lack of wisdom?
\qaitem{Q}: When I sit to meditate, I don't get drowsy, but my mind feels dark, sort of dense or opaque.
\qaitem{A}: So you would like to make your mind wise, right? Change your posture, and do a lot of walking meditation. That's one thing to do. You can walk for three hours at a time, until you're really tired.
\qaitem{Q}: I do walking meditation a couple of hours a day, and I usually have a lot of thinking when I do it. But what really concerns me is this dark state when I sit. Should I just try to be aware of it and let go, or is there some means I should use to counter it?
\index[general]{mind!wandering}
\qaitem{A}: I think maybe your postures aren't balanced. When you walk, you have a lot of thinking. So you should do a lot of discursive contemplation; then the mind can retreat from thinking. It won't stick there. But never mind. For now, increase the time you spend on walking meditation. Focus on that. Then if the mind is wandering, pull it out and do some contemplation, such as, for example, investigation of the body. Have you ever done that continuously rather than as an occasional reflection? When you experience this dark state, do you suffer over it?
\qaitem{Q}: I feel frustrated because of my state of mind. I'm not developing \glsdisp{samadhi}{sam\=adhi} or wisdom.
\index[general]{doubt}
\qaitem{A}: When you have this condition of mind the suffering comes about because of not knowing. There is doubt as to why the mind is like this. The important principle in meditation is that whatever occurs, don't be in doubt over it. Doubt only adds to the suffering. If the mind is bright and awake, don't doubt that. It's a condition of mind. If it's dark and dull, don't doubt about that. Just continue to practise diligently without getting caught up in reactions to that state. Take note and be aware of your state of mind, don't have doubts about it. It is just what it is. When you entertain doubts and start grasping at it and giving it meaning, then it is dark.
\index[general]{sleep!sleepiness}
As you practise, these states are things you encounter as you progress along. You needn't have doubts about them. Notice them with awareness and keep letting go. How about sleepiness? Is your sitting more sleepy or awake?
(No reply)
\index[general]{hindrances!sloth and torpor}
Maybe it's hard to recall if you've been sleepy! If this happens meditate with your eyes open. Don't close them. Instead, you can focus your gaze on one point, such as the light of a candle. Don't close your eyes! This is one way to remove the hindrance of drowsiness.
\index[general]{kasi\d{n}a}
When you're sitting you can close your eyes from time to time and if the mind is clear, without drowsiness, you can then continue to sit with your eyes closed. If it's dull and sleepy, open your eyes and focus on the one point. It's similar to \pali{\glsdisp{kasina}{kasi\d{n}a}} meditation. Doing this, you can make the mind awake and tranquil. The sleepy mind isn't tranquil; it's obscured by hindrance and it's in darkness.
\index[general]{sleep!right amount}
We should talk about sleep also. You can't simply go without sleep. That's the nature of the body. If you're meditating and you get unbearably, utterly sleepy, then let yourself sleep. This is one way to quell the hindrance when it's overwhelming you. Otherwise you practise along, keeping the eyes open if you have this tendency to get drowsy. Close your eyes after a while and check your state of mind. If it's clear, you can practise with eyes closed. Then after some time, take a rest. Some people are always fighting against sleep. They force themselves not to sleep, and the result is that when they sit they are always drifting off to sleep and falling over themselves, sitting in an unaware state.
\qaitem{Q}: Can we focus on the tip of the nose?
\qaitem{A}: That's fine. Whatever suits you, whatever you feel comfortable with and helps you fix your mind, focus on that.
\index[general]{attachment!to meditation techniques}
\index[general]{mindfulness of breathing}
It's like this: if we get attached to the ideals and take the guidelines that we are given in the instructions too literally, it can be difficult to understand. When doing a standard meditation such as mindfulness of breathing, first we should make the determination that right now we are going to do this practice, and we are going to make mindfulness of breathing our foundation. We only focus on the breath at three points, as it passes through the nostrils, the chest and the abdomen. When the air enters, it first passes the nose, then through the chest, then to the end point of the abdomen. As it leaves the body, the beginning is the abdomen, the middle is the chest, and the end is the nose. We merely note it. This is a way to start controlling the mind, tying awareness to these points at the beginning, middle and end of the inhalations and exhalations.
\index[similes]{sewing machine!meditation}
Before we begin we should first sit and let the mind relax. It's similar to sewing robes on a treadle sewing machine. When we are learning to use the sewing machine, first we just sit in front of the machine to get familiar with it and feel comfortable. Here, we just sit and breathe. Not fixing awareness on anything, we merely take note that we are breathing. We take note of whether the breath is relaxed or not and how long or short it is. Having noticed this, then we begin focusing on the inhalation and exhalation at the three points.
We practise like this until we become skilled in it and it goes smoothly. The next stage is to focus awareness only on the sensation of the breath at the tip of the nose or the upper lip. At this point we aren't concerned with whether the breath is long or short, but only focus on the sensation of entering and exiting.
\index[general]{contact}
\index[general]{vitakka-vic\=ara}
\index[general]{formations!as mental phenomena}
\index[general]{vitakka-vic\=ara}
\index[general]{phenomena!mental}
Different phenomena may contact the senses, or thoughts may arise. This is called initial thought (\pali{\glsdisp{vitakka}{vitakka}}). The mind brings up some idea, be it about the nature of compounded phenomena (\pali{\glsdisp{sankhara}{sa\.nkh\=ar\=a}}), about the world, or whatever. Once the mind has brought it up, the mind will want to get involved and merge with it. If it's an object that is wholesome, let the mind take it up. If it is something unwholesome, stop it immediately. If it is something wholesome, let the mind contemplate it, and gladness, satisfaction and happiness will come about. The mind will be bright and clear as the breath goes in and out, and as the mind takes up these initial thoughts. Then initial thought becomes discursive thought (\pali{\glsdisp{vicara}{vic\=ara}}). The mind develops familiarity with the object, exerting itself and merging with it. At this point, there is no sleepiness.
After an appropriate period of this, take your attention back to the breath. As you continue on, there will be initial thought and discursive thought, initial thought and discursive thought. If you are contemplating skilfully on an object such as the nature of \pali{sa\.nkh\=ara}, the mind will experience deeper tranquillity and rapture is born. There is the \pali{vitakka} and \pali{vic\=ara}, and that leads to happiness of mind. At this time there won't be any dullness or drowsiness. The mind won't be dark if we practise like this. It will be gladdened and enraptured.
\index[general]{rapture}
\index[general]{concentration}
This rapture will start to diminish and disappear after a while, so you can take up initial thought again. The mind will become firm and certain with it -- undistracted. Then you go on to discursive thought again, the mind becoming one with it. When you are practising a meditation that suits your temperament and doing it well, then whenever you take up the object, rapture will come about: the hairs of the body stand on end and the mind is enraptured and satiated.
\index[general]{sukha}
When it's like this there can't be any dullness or drowsiness. You won't have any doubts. Back and forth between initial and discursive thought, initial and discursive thought, over and over again and rapture comes. Then there is \pali{\glsdisp{sukha}{sukha.}}
\index[general]{hindrances}
\index[general]{mind!conditions of}
This takes place in sitting practice. After sitting for a while, you can get up and do walking meditation. The mind can be the same in the walking. Not sleepy, it has \pali{vitakka} and \pali{vic\=ara}, \pali{vitakka} and \pali{vic\=ara}, then rapture. There won't be any of the \pali{\glsdisp{nivarana}{n\={\i}vara\d{n}a,}} and the mind will be unstained. Whatever takes place, never mind; you don't need to doubt about any experiences you may have, be they of light, of bliss, or whatever. Don't entertain doubts about these conditions of mind. If the mind is dark, if the mind is illumined, don't fixate on these conditions, don't be attached to them. Let go, discard them. Keep walking, keep noting what is taking place without getting bound or infatuated. Don't suffer over these conditions of mind. Don't have doubts about them. They are just what they are, following the way of mental phenomena. Sometimes the mind will be joyful. Sometimes it will be sorrowful. There can be happiness or suffering; there can be obstruction. Rather than doubting, understand that conditions of mind are like this; whatever manifests is coming about due to causes ripening. At this moment this condition is manifesting; that's what you should recognize. Even if the mind is dark you don't need to be upset over that. If it becomes bright, don't be excessively gladdened by that. Don't have doubts about these conditions of mind, or about your reactions to them.
\index[general]{sleep!sleepiness}
Do your walking meditation until you are really tired, then sit. When you sit determine your mind to sit; don't just play around. If you get sleepy, open your eyes and focus on some object. Walk until the mind separates itself from thoughts and is still, then sit. If you are clear and awake, you can close your eyes. If you get sleepy again, open your eyes and look at an object.
\index[general]{sleep}
\index[general]{walking meditation}
Don't try to do this all day and all night. When you're in need of sleep, let yourself sleep. Just as with our food: once a day we eat. The time comes and we give food to the body. The need for sleep is the same. When the time comes, give yourself some rest. When you've had an appropriate rest, get up. Don't let the mind languish in dullness, but get up and get to work -- start practising. Do a lot of walking meditation. If you walk slowly and the mind becomes dull, then walk fast. Learn to find the right pace for yourself.
\qaitem{Q}: Are \pali{vitakka} and \pali{vic\=ara} the same?
\index[general]{vitakka-vic\=ara}
\index[general]{death!contemplation of}
\qaitem{A}: You're sitting and suddenly the thought of someone pops into your head -- that's \pali{vitakka}, the initial thought. Then you take that idea of the person and start thinking about them in detail. \pali{Vitakka} picks up the idea, \pali{vic\=ara} investigates it. For example, we pick up the idea of death and then we start considering it: `I will die, others will die, every living being will die; when they die where will they go?' Then stop! Stop and bring it back again. When it gets running like that, stop it again; and then go back to mindfulness of the breath. Sometimes the discursive thought will wander off and not come back, so you have to stop it. Keep at it until the mind is bright and clear.
\index[general]{rapture}
If you practise \pali{vic\=ara} with an object that you are suited to, you may experience the hairs of your body standing on end, tears pouring from your eyes, a state of extreme delight, many different things occur as rapture comes.
\qaitem{Q}: Can this happen with any kind of thinking, or is it only in a state of tranquillity that it happens?
\index[general]{tranquillity}
\qaitem{A}: It's when the mind is tranquil. It's not ordinary mental proliferation. You sit with a calm mind and then the initial thought comes. For example, I think of my brother who just passed away. Or I might think of some other relatives. This is when the mind is tranquil -- the tranquillity isn't something certain, but for the moment the mind is tranquil. After this initial thought comes, I go into discursive thought. If it's a line of thinking that's skilful and wholesome, it leads to ease of mind and happiness, and there is rapture with its attendant experiences. This rapture came from the initial and discursive thinking that took place in a state of calmness. We don't have to give it names such as first \pali{\glsdisp{jhana}{jh\=ana,}} second \pali{jh\=ana} and so forth. We just call it tranquillity.
\index[general]{sukha}
The next factor is bliss (\pali{sukha}). Eventually we drop the initial and discursive thinking as tranquillity deepens. Why? The state of mind is becoming more refined and subtle. \pali{Vitakka} and \pali{vic\=ara} are relatively coarse, and they will vanish. There will remain just the rapture accompanied by bliss and one-pointedness of mind. When it reaches full measure there won't be anything, the mind is empty. That's absorption concentration.
\index[general]{jh\=ana!factors of}
We don't need to fixate or dwell on any of these experiences. They will naturally progress from one to the next. At first there is initial and discursive thought, rapture, bliss and one-pointedness. Then initial and discursive thinking are thrown off, leaving rapture, bliss, and one-pointedness. Rapture is thrown off,\footnote{The scriptures usually say, `with the fading of rapture.'} then bliss, and finally only one-pointedness and equanimity remain. It means the mind becomes more and more tranquil, and its objects are steadily decreasing until there is nothing but one-pointedness and equanimity.
When the mind is tranquil and focused this can happen. It is the power of mind, the state of the mind that has attained tranquillity. When it's like this there won't be any sleepiness. It can't enter the mind; it will disappear. The other hindrances of sensual desire, aversion, doubt and restlessness and agitation won't be present. Though they may still exist latent in the mind of the meditator, they won't occur at this time.
\qaitem{Q}: Should we be closing our eyes so as to shut out the external environment or should we just deal with things as we see them? Is it important whether we open or close the eyes?
\index[general]{meditation!closing the eyes}
\qaitem{A}: When we are new to training, it's important to avoid too much sensory input, so it's better to close the eyes. Not seeing objects that can distract and affect us, we build up the mind's strength. When the mind is strong then we can open the eyes and whatever we see won't sway us. Open or closed won't matter.
\index[general]{meditation!all postures}
When you rest you normally close your eyes. Sitting in meditation with eyes closed is the dwelling place for a practitioner. We find enjoyment and rest in it. This is an important basis for us. But when we are not sitting in meditation, will we be able to deal with things? We sit with eyes closed and we profit from that. When we open our eyes and leave the formal meditation, we can handle whatever we meet. Things won't get out of hand. We won't be at a loss. Basically we are just handling things. It's when we go back to our sitting that we really develop greater wisdom.
\index[general]{uncertainty}
\index[general]{disrobing}
This is how we develop the practice. When it reaches fulfilment, it doesn't matter whether we open or close our eyes, it will be the same. The mind won't change or deviate. At all times of the day -- morning, noon or night -- the state of mind will be the same. We dwell thus. There is nothing that can shake the mind. When happiness arises, we recognize, `It's not certain,' and it passes. Unhappiness arises and we recognize, `It's not certain,' and that's that. You get the idea that you want to disrobe. This is not certain. But you think it's certain. Before you wanted to be ordained, and you were so sure about that. Now you are sure you want to disrobe. It's all uncertain, but you don't see it because of your darkness of mind. Your mind is telling you lies, `Being here, I'm only wasting time.' If you disrobe and go back to the world, won't you waste time there? You don't think about that. Disrobing to work in the fields and gardens, to grow beans or raise pigs and goats, won't that be a waste of time?
\index[similes]{crab and bird!deceived by the mind}
There was once a large pond full of fish. As time passed, the rainfall decreased and the pond became shallow. One day a bird showed up at the edge of the pond. He told the fish, `I really feel sorry for you fish. Here you barely have enough water to keep your backs wet. Do you know that not very far from here there's a big lake, several meters deep where the fish swim happily?'
When the fish in that shallow pond heard this, they got excited. They said to the bird, `It sounds good. But how could we get there?'
The bird said, `No problem. I can carry you in my bill, one at a time.'
The fish discussed it among themselves. `It's not so great here anymore. The water doesn't even cover our heads. We ought to go.' So they lined up to be taken by the bird.
The bird took one fish at a time. As soon as he flew out of sight of the pond, he landed and ate the fish. Then he would return to the pond and tell them, `Your friend is right this moment swimming happily in the lake, and he asks when you will be joining him!'
It sounded fantastic to the fish. They couldn't wait to go, so they started pushing to get to the head of the line. The bird finished off the fish like that. Then he went back to the pond to see if he could find anymore. There was only one crab there. So the bird started his sales pitch about the lake.
The crab was sceptical. He asked the bird how he could get there. The bird told him he would carry him in his bill. But this crab had some wisdom. He told the bird, `Let's do it like this -- I'll sit on your back with my arms around your neck. If you try any tricks, I'll choke you with my claws.' The bird felt frustrated by this, but he gave it a try thinking he might still somehow get to eat the crab. So the crab got on his back and they took off.
The bird flew around looking for a good place to land. But as soon as he tried to descend, the crab started squeezing his throat with his claws. The bird couldn't even cry out. He just made a dry, croaking sound. So in the end he had to give up and return the crab to the pond.
I hope you can have the wisdom of the crab! If you are like those fish, you will listen to the voices that tell you how wonderful everything will be if you go back to the world. That's an obstacle ordained people meet with. Please be careful about this.
\qaitem{Q}: Why is it that unpleasant states of mind are difficult to see clearly, while pleasant states are easy to see? When I experience happiness or pleasure I can see that it's something impermanent, but when I'm unhappy that's harder to see.
\index[general]{happiness!and unhappiness}
\index[general]{happiness!overpowered by}
\qaitem{A}: You are thinking in terms of your attraction and aversion and trying to figure it out, but actually delusion is the predominant root. You feel that unhappiness is hard to see while happiness is easy to see. That's just the way your afflictions work. Aversion is hard to let go of, right? It's a strong feeling. You say happiness is easy to let go of. It's not really easy; it's just that it's not so overpowering. Pleasure and happiness are things people like and feel comfortable with. They're not so easy to let go of. Aversion is painful, but people don't know how to let go of it. The truth is that they are equal. When you contemplate thoroughly and get to a certain point you will quickly recognize that they're equal. If you had a scale to weigh them their weight would be the same. But we incline towards the pleasurable.
\index[general]{happiness!inclining towards}
\index[similes]{burnt and frozen!happiness and suffering}
Are you saying that you can let go of happiness easily, while unhappiness is difficult to let go of? You think that the things we like are easy to give up, but you're wondering why the things we dislike are hard to give up. But if they're not good, why are they hard to give up? It's not like that. Think anew. They are completely equal. It's just that we don't incline to them equally. When there is unhappiness we feel bothered, we want it to go away quickly and so we feel it's hard to get rid of. Happiness doesn't usually bother us, so we are friends with it and feel we can let go of it easily. It's not like that; it's not oppressing and squeezing our hearts, that's all. Unhappiness oppresses us. We think one has more value or weight than the other, but in truth they are equal. It's like heat and cold. We can be burned to death by fire. We can also be frozen stiff by cold and we die just the same. Neither is greater than the other. Happiness and suffering are like this, but in our thinking we give them different values.
\index[general]{praise and blame}
Or consider praise and criticism. Do you feel that praise is easy to let go of and criticism is hard to let go of? They are really equal. But when we are praised we don't feel disturbed; we are pleased, but it's not a sharp feeling. Criticism is painful, so we feel it's hard to let go of. Being pleased is also hard to let go of, but we are partial to it so we don't have the same desire to get rid of it quickly. The delight we take in being praised and the sting we feel when criticized are equal. They are the same. But when our minds meet these things we have unequal reactions to them. We don't mind being close to some of them.
Please understand this. In our meditation we will meet with the arising of all sorts of mental afflictions. The correct outlook is to be ready to let go of all of it, whether pleasant or painful. Even though happiness is something we desire and suffering is something we don't desire, we recognize they are of equal value. These are things that we will experience.
\index[general]{nibb\=ana!description}
Happiness is wished for by people in the world. Suffering is not wished for. Nibb\=ana is something beyond wishing or not wishing. Do you understand? There is no wishing involved in Nibb\=ana. Wanting to get happiness, wanting to be free of suffering, wanting to transcend happiness and suffering -- there are none of these things. It is peace.
\index[general]{doubt!ending}
As I see it, realizing the truth doesn't happen by relying on others. You should understand that all doubts will be resolved by our own efforts, by continuous, energetic practice. We won't get free of doubt by asking others. We will only end doubt through our own unrelenting efforts.
\index[general]{patient endurance}
\index[general]{difficulties!bearing with}
Remember this! It's an important principle in practice. The actual doing is what will instruct you. You will come to know all right and wrong. `The Brahmin shall reach the exhaustion of doubt through unceasing practice.' It doesn't matter wherever we go -- everything can be resolved through our own ceaseless efforts. But we can't stick with it. We can't bear the difficulties we meet; we find it hard to face up to our suffering and not to run away from it. If we do face it and bear with it, then we gain knowledge, and the practice starts instructing us automatically, teaching us about right and wrong and the way things really are. Our practice will show us the faults and ill results of wrong thinking. It really happens like this. But it's hard to find people who can see it through. Everyone wants instant awakening. Rushing here and there following your impulses, you only end up worse off for it. Be careful about this.
\index[similes]{still, flowing water!tranquillity and wisdom}
I've often taught that tranquillity is stillness; flowing is wisdom. We practise meditation to calm the mind and make it still; then it can flow. In the beginning we learn what still water is like and what flowing water is like. After practising for a while we will see how these two support each other. We have to make the mind calm, like still water. Then it flows. Both being still and flowing: this is not easy to contemplate.
\index[general]{tranquillity!and insight}
We can understand that still water doesn't flow. We can understand that flowing water isn't still. But when we practise we take hold of both of these. The mind of a true practitioner is like still water that flows, or flowing water that's still. Whatever takes place in the mind of a Dhamma practitioner is like flowing water that is still. To say that it is only flowing is not correct. To say only still is not correct. Ordinarily, still water is still and flowing water flows. But when we have experience of practice, our minds will be in this condition of flowing water that is still.
\index[general]{mind!still and flowing}
This is something we've never seen. When we see flowing water it is just flowing along. When we see still water, it doesn't flow. But within our minds, it will really be like this; like flowing water that is still. In our Dhamma practice we have sam\=adhi, or tranquillity, and wisdom mixed together. We have morality, meditation and wisdom. Then wherever we sit the mind is still and it flows. Still, flowing water. With meditative stability and wisdom, tranquillity and insight, it's like this. The Dhamma is like this. If you have reached the Dhamma, then at all times you will have this experience. Being tranquil and having wisdom: flowing, yet still. Still, yet flowing.
Whenever this occurs in the mind of one who practises, it is something different and strange; it is different from the ordinary mind that one has known all along. Before, when it was flowing, it flowed. When it was still, it didn't flow, but was only still -- the mind can be compared to water in this way. Now it has entered a condition that is like flowing water being still. Whether standing, walking, sitting, or lying down, it is like water that flows yet is still. If we make our minds like this, there is both tranquillity and wisdom.
\index[general]{wisdom!purpose of}
\index[general]{tranquillity!purpose of}
\index[general]{suffering}
\index[general]{Four Noble Truths}
What is the purpose of tranquillity? Why should we have wisdom? They are only for the purpose of freeing ourselves from suffering, nothing else. At present we are suffering, living with \pali{\glsdisp{dukkha}{dukkha,}} not understanding \pali{dukkha}, and therefore holding onto it. But if the mind is as I've been speaking about, there will be many kinds of knowledge. One will know suffering, know the cause of suffering, know the cessation of suffering and know the way of practice to reach the end of suffering. These are the Noble Truths. They will appear of themselves when there is still, flowing water.
\index[general]{heedlessness}
When it is like this, then no matter what we are doing we will have no heedlessness; the habit of heedlessness will weaken and disappear. Whatever we experience we won't fall into heedlessness because the mind will naturally hold fast to the practice. It will be afraid of losing the practice. As we keep on practising and learning from experience we will be drinking of the Dhamma more and more, and our faith will keep increasing.
\index[general]{other people!being led by}
For one who practises it has to be like this. We shouldn't be the kind of people who merely follow others: If our friends aren't doing the practice we won't do it either because we would feel embarrassed. If they stop, we stop. If they do it, we do it. If the teacher tells us to do something, we do it. If he stops, we stop. This is not a very quick way to realization.
\index[general]{communal life!purpose of}
\index[general]{habits!building up good}
What's the point of our training here? It's so that when we are alone, we will be able to continue with the practice. So now, while living together here, when there are morning and evening gatherings to practise, we join in and practise with the others. We build up the habit so that the way of practice is internalized in our hearts, and then we will be able to live anywhere and still practise in the same way.
It's like having a certificate of guarantee. If the King is coming here, we prepare everything as perfectly as we can. He stays a short while and then goes on his way, but he gives his royal seal to acknowledge that things are in order here. Now many of us are practising together, and it's time to learn the practice well, to understand it and internalize it so that each of you can be a witness to yourself. It's like children coming of age.
|
open import Data.Product using ( _×_ ; _,_ )
open import Relation.Unary using ( _∈_ )
open import Web.Semantic.DL.ABox using ( ABox )
open import Web.Semantic.DL.Signature using ( Signature )
open import Web.Semantic.DL.TBox using ( TBox )
open import Web.Semantic.Util using ( Finite )
module Web.Semantic.DL.Category.Object {Σ : Signature} where
infixr 4 _,_
data Object (S T : TBox Σ) : Set₁ where
_,_ : ∀ X → (X ∈ Finite × ABox Σ X) → Object S T
IN : ∀ {S T} → Object S T → Set
IN (X , X∈Fin , A) = X
fin : ∀ {S T} → (A : Object S T) → (IN A ∈ Finite)
fin (X , X∈Fin , A) = X∈Fin
iface : ∀ {S T} → (A : Object S T) → (ABox Σ (IN A))
iface (X , X∈Fin , A) = A
|
#ifndef STRATUM_IO_UNITS_PRINTER
#define STRATUM_IO_UNITS_PRINTER
#include "stratum/macro/namespaces.hpp"
#include <iostream>
#include <iomanip>
#include <boost/io/ios_state.hpp>
#include "stratum/math/functions.hpp"
NAMESPACE_BEGIN(stratum, io)
struct units_printer
{
double value_;
const char* unit_;
const char* units_;
uint prec_;
units_printer(double value, const char* unit, const char* units = NULL, uint precision = 2)
: value_(value), unit_(unit), units_(units), prec_(precision)
{ assert(value >= 0.0); }
}; // struct units_printer
inline std::ostream& operator<<(std::ostream& out, const units_printer& value)
{
boost::io::ios_all_saver ias(out);
out << std::setfill('0') << std::fixed << std::setprecision(value.prec_) << std::left;
double rnd_value = stratum::math::round_digits(value.value_, value.prec_);
out << value.value_;
if ( rnd_value == 1.0f )
{
out << value.unit_;
}
else
{
if ( value.units_ )
out << value.units_;
else
out << value.unit_ << "s";
}
return out;
} // operator<<()
NAMESPACE_END(stratum, io)
#endif // STRATUM_IO_UNITS_PRINTER |
WinCraft Los Angeles Angels 12 x 18 Double Sided Garden Flag $13.99 Get your home decked out in Los Angeles Angels style with this double sided garden flag from WinCraft! It features bold team graphics and colors that'll give your lawn the perfect amount of Los Angeles Angels zeal. This colorful Los Angeles Angels garden flag will take your yard to the next level!
Show everyone how big of a Los Angels fan you are by adding this pennant to your wall. Measures 1 x 0. WinCraft Los Angels 1 x 0 Premium Player Pennant. Wincraft MLB Angels WCR 1 Carded Classic Pennant 1 x 0. MSR Reflective Utility Cord WinCraft WinCraft Los Angels x Shimmer Decal WinCraft WinCraft LA Clippers 1 x 0 Double Sided Cooling Towel is available now at FansEdge. Two plastic D rings. This versatile piece can be used as a device grip stand or cord management system and it also features unmistakable squad graphics to let everyone know who you root for. WinCraft Premium American Football Wimpel den Tennessee Titans mit gro em Teamlogo Druck. Wincraft Los Angels Pujols Can Cooler. Enjoy Fast Shipping At Competitive Prices On Your Next MLB Order at Shop. WinCraft Los Angels Action Divot Tool 1 of Number of Items per 1 Items Per Items Per Sort Order Sort By Newest Items Top Sellers Highest Price Lowest Price CUSTOMER SERVICE. Shop the latest licensed golf accessories to support your favorite team at Golf Galaxy. Full color imprinted 1 oz. Los Angels Recliner Protector Los Angels WinCraft 1 x 0 Premium Player Pennant. Banners WinCraft Los Angels x 0 Primary Logo Single Sided Vertical Banner. Looks great next to Angels pennant. Golf fans get flat rate shipping on every Los Wincraft Los Angeles Angels 12 X Angels purchase. Los Angels Of Anaheim Pennant UPC Brand Wincraft Inc. This Limited Edition Wincraft Los Angeles Angels 12 X vintage style authentic Ebbets Field Flannels authentic ball caps is crafted from genuine wool baseball fabric. Newborn Infant White Minnesota Wild Personalized Bib Burp Cloth Set. Keep your team enthusiasm close at hand wherever you go with this Los Dodgers PopSocket! Los Angels WinCraft 1 x 1 Double Sided Garden Flag. WinCraft Los Angels 1 x 10 Player MVP Photo Mint. A player receives a plus if he is on the ice for an even strength or shorthand goal scored. Boast your die hard Los Angels fandom with this premium pennant from WinCraft! WinCraft WinCraft Los Angels x Shimmer Decal WinCraft WinCraft LA Clippers 1 x 0 Double Sided Cooling Towel. Houston Astros Kansas City Royals Los Angels Los Dodgers Miami Marlins Milwaukee Brewers Minnesota Twins. Kentucky Wildcats WinCraft 1 x 1 Double Sided Garden Flag.
Special event items are produced by manufacturers only after the outcome of a game or event. Wincraft Los Dodgers MLB Garden Flag Double Sided Licensed 1. Sszehajthat labda form j tokba c pz razhat h tizs k Anyaga poli szter A h tizs k m rete 0 x x cm A labda tok m rete X 1 cm Nyomott csapatn v s log F rekesz cipz ras k ls oldalzseb Er s ig nybev telre nem javasolt Sz rmaz si hely K na. This amazing poster Touring the Majors is a must for the wall of any true baseball fan featuring all 0 current. Show off your team spirit in officially licensed Penguins jerseys in authentic replica alternate and Breakaway styles for every fan. Sszehajthat labda form j tokba c pz razhat h tizs k m rete 0 x x cm A labda tok m rete X 1 cm Nyomott csapatn v s log F rekesz cipz ras k ls oldalzseb Er s ig nybev telre nem javasolt Sz rmaz si hely K na. Made with 1 in.
Measures 1 x 0 Double Sided Cooling Towel. That experience a little more comfortable with this WinCraft 10 x 1 Stadium Seat Cushion Nothing holds you back from cheering on the Los Angels at the game. This convenient angels alarm clock is x is prominently displayed The hands of the alarm clock are luminescent Wincraft alarm clock Find other MLB Shop Product Detail By Wincraft From Lews Sports Los Angels Alarm Clock Alarm Clocks Los Angels Alarm Clock. Los Angels WinCraft Deluxe Stars Stripes x Flag Los Angels. Men's Fanatics Branded Black Los Angels Personalized Midnight Mascot T Shirt. Show off your Los Angels dedication when you cruise around with this x 1 Die Cut Decal from Wincraft. Los Angels Pujols Can Cooler. Arkansas Razorbacks 1 '' x 1 '' car flag. Antigua Remove Brand Antigua refinement Starter Remove Brand Starter refinement Wincraft Remove Brand. Comment Report abuse. Women's Majestic White Los Dodgers 01 World Series Cool Base Custom Jersey. This icon is the all time favorite fan item. Flat rate shipping from Yahoo!
Your Price. Show off your team style. Get Your MLB Los Angels Cups Mugs And Shots at the Shop. Los Angels Watches Clocks 1. Los Angels. Ships Free. Top of Page. Turn on search history to start remembering your searches. Houston Astros Kansas City Royals Los Angels Los Dodgers. Display your MLB spirit with officially licensed Philadelphia Phillies Gear including Jerseys from the ultimate sports store. Wincraft NBA 1101 Philadelphia Ers Premium Pennant 1 X 0. Flat rate shipping on every Los Angels purchase. Buy Indians WinCraft 1. Los Angels WinCraft Premium Metal Key Ring. Los Angels WinCraft 1 x 1 Double. Wincraft NBA Los Lakers 01 Wood Sign 11 x 1 Black. NC State Wolfpack WinCraft 1 x 1 Double Sided Garden Flag No Size. Arkansas Razorbacks WinCraft Wincraft Los Angeles Angels 12 X x Multi Use Decal. WinCraft Los Angels 1 x Waffle Towel. WinCraft Los Angels 1 x 0 Double Sided Cooling Towel. Find NFL NHL MLB NCAA golf accessories today.
Banners WinCraft Los Angels Deluxe Stars Stripes x Flag Los Angels. Measuring approximately 1 x 0 this cool pennant features a bold Los Angels graphic on the front. Cardinals Personalized Bib Burp Cloth Set. Show everyone how big of a Los Angels fan you are by adding this Los Angels decal to your car. 's Player Series commemorates the amazing Swisher with style and flair. Average rating 0 out of stars based Wincraft Los Angeles Angels 12 X on 0 reviews Write a review. Own a rare and beautiful collectors item celebrating the arrival of the Harper in Philly! WinCraft WinCraft Los Angels x Deluxe flag from WinCraft youll show off your team style. Los Angels WinCraft 1 x 1 Double Sided Garden Flag No Size. Bring home the excitement of the game with licensed Los Angels Garden Decorations Yard Decoration and Los Angels Flags for the diehard LA Angel. Celebrate your Los Angels dedication when you cruise around with this x 1 Die Cut Decal from Wincraft. Houston Astros Kansas City Royals Los Angels Los Dodgers Miami Marlins Milwaukee Brewers Minnesota. Add the United States of Baseball to your wall! Made by WinCraft in the USA.
Almost Gone! Find essential Philadelphia Phillies Apparel at Fanatics. Los Angels WinCraft 1 x 0 this cool pennant features a bold Los Angels graphic on the front. NC State Wolfpack WinCraft 1 x 1 Double Sided Garden Flag. WinCraft Los Clippers Pennant Full Size 1 X 0. Wincraft Los Angels of Anaheim Set of Die Cut Decals.
WinCraft Los Angels Deluxe Stars Stripes x Flag. MSR Reflective Utility Cord WinCraft WinCraft Los Angels x 0 Primary Logo Single Sided Vertical Banner. WinCraft Los Angels x 0 White Double Sided Jersey Flag. Los Angels Los Dodgers. Decorative Garden Flags Yard Flags Mailbox covers and seasonal decorations from Discount Decorative Flags Colorado Buffaloes Black Metro Basket Collapsible Tote. Buy your Los Angels Flags for the diehard LA Angel. This brilliant pennant from Wincraft Inc. Shohei Ohtani Los Angels 1 x 10 Player MVP Photo Mint. Sold by salefolks. Los Angels WinCraft 1 x 0 Double Sided Cooling Towel. Los Angels WinCraft Deluxe Stars Stripes x Flag. He receives a minus if he is on the ice when his Club scores an even strength or shorthand goal scored. At Fanatics. Newborn Infant White St. Los Angels Piece BBQ Set. WinCraft Shohei Ohtani Los Angels 1 x 0 Premium Pennant Stockdale Baltimore Ravens Indoor Outdoor Pack Magnet Set. He receives a minus if he is on the ice for an even strength or shorthand goal. Stockpile Los Angels Outdoor Products from the comfort of home and save with our. Can cooler with an imprint area of x in. Arkansas Razorbacks 1 Art Glass Wall Clock. Packaged with tags for easy display. Los Angels The Original Team Logo Collectible Baseball. Celebrate your Los Angels fandom with this Arkansas Razorbacks 1 Art Glass Wall Clock. Air 1 Air 1. A player receives a plus if he is on the ice when his Club scores an even strength or shorthand goal. Your wall will be basking in even greater Los Angels memorabilia when you this banner on it. WinCraft Shohei Ohtani Los Angels 1 x 0 Premium Pennant. Has everything you need for the next Pens game including Pittsburgh Penguins apparel and gear for women men and kids. Fast shipping too! Los Angels WinCraft 1 x 0.
Los Angels WinCraft 11 x 1 Black Kansas City Chiefs Black Ventura Seat Portable Recliner Chair. Rawlings MLB Los Angels of Anaheim Set of Die Cut Decals. Best Seller in Equipment. It features Los Angels graphics everyone can youre the ultimate fan. WinCraft Los Angels MLB Large Pennant. Los Angels WinCraft Patriotic x Color Decal In Stock Ships Within One Business Day. Now you can make that experience a little more comfortable with this WinCraft 10 x 1 stadium seat cushion. Rawlings MLB Los Angels of Anaheim Team Logo Baseball Official White. Enjoy fast shipping and easy returns on all orders of product name. Felt pennants are the standard for sports.
Give us a call. Shop MLB Miami Marlins Mens Wallach at Fanatics. Los Angels of.
Los Angels Recliner Protector Los Angels WinCraft 1 x 1 Double. FansEdge is your trusted source for all the latest Los Angels gear. This is an officially licensed pennant. This eye catching pennant from WinCraft features a team graphic on the front Wincraft Fc Dallas 11 X 17 Multi Use Cut To Logo Decal Sheet. WinCraft Los Angels 1 x 0 Premium Player Pennant Los Angels x 0 White Double Sided Jersey Flag. This convenient angels alarm clock is x is prominently displayed The hands of the alarm clock are luminescent Wincraft alarm clock Find other MLB Shop Product Detail By Wincraft From Lews Sports Los Angels Alarm Clock. Buy Angels jerseys. WinCraft WinCraft Los Angels Ball Can Cooler. Your Price 1. Los Angels Auto Ambassador Flag Set. Snowman mailbox cover is just what you are looking for to brighten your yard driveway once the snow comes to town. Day shipping on your entire order. 0 out of stars 1. Get the perfect look to let everyone know you are a big Los Angels fan! WinCraft Los Angels MLB Large Pennant Sports Outdoors. WinCraft Shohei Ohtani Los Angels 1 x 0 Premium. Los Angels Majestic Womens Luck Tradition Color Block Sleeve Tri Blend V Neck T Shirt Green. Buy Los Angels merchandise and gear at JCPenney Sports Fan Shop! |
\subsubsection{Risk Management}
The student team has generated a list of potential risks which threaten the viability of the project and have assigned a likelihood and impact value to each of them (Table \ref{riskprofile}). The \textit{risk index} for each risk is the product of these two values: \textit{likelihood} and \textit{impact}.
\textit{To mitigate the outlined risks, the following risk management strategy is followed:}
If the risk index is \textbf{greater or equal to 0.4}, then the risk mitigation protocol \textit{must} be followed. This entails creating a card for the specific risk on Trello (the team's project management tool), discussing the status of the risk during the weekly group meeting, and providing immediate updates to the status of the risk on Slack as they occur.
If the risk index is \textbf{less than 0.4}, then the above risk mitigation protocol \textit{may} be followed, if deemed necessary by any team member. Otherwise, the risk is simply monitored for updates.
The risk profile is updated/reviewed weekly (during the team meeting) and as otherwise necessary.
\subsubsection{Potential Risks}
\begin{center}
% Make the table more stretched out
\renewcommand{\arraystretch}{1.3} % Default value: 1
\begin{longtable}{p{0.6\linewidth} *{3}{r}}
\caption{Risk Profile}\\
\hline
\textbf{Risk description} & \textbf{Likelihood} & \textbf{Impact} & \textbf{Risk ($\downarrow$)} \\
\hline
\endfirsthead
\multicolumn{4}{c}%
{\tablename\ \thetable\ -- \textit{Continued from previous page}} \\
\hline
\textbf{Risk description} & \textbf{Likelihood} & \textbf{Impact} & \textbf{Risk ($\downarrow$)} \\
\hline
\endhead
\hline \multicolumn{4}{r}{\textit{Continued on next page}} \\
\endfoot
\hline
\endlastfoot
Drone flight hardware (flight controllers, radio, motors) cannot function due to crashes and/or damage. & 0.9 & 1.0 & 0.90 \\ \hline
Payload is too heavy which significantly increases drone motor requirements and significant reduction in flight duration. & 0.8 & 0.8 & 0.64 \\ \hline
Accidents that damage the drone and computation equipment that require extra budget that we may not have. & 0.6 & 0.9 & 0.54\\ \hline
Total loss of drone hardware and payload during flight. & 0.5 & 1.0 & 0.50 \\\hline
Not enough time commitment from team members. & 0.7 & 0.7 & 0.49\\ \hline
Access to tools and shops for modifying and repairing drone hardware is inadequate or non-existent. & 0.6 & 0.8 & 0.48\\ \hline
Underestimation of project scope or work required, leading to insufficient time management and burn-outs. & 0.5 & 0.9 & 0.45\\ \hline
Payload is too heavy which exceeds total take-off weight. & 0.4 & 1.0 & 0.40 \\ \hline
Legacy documents for the project are insufficient, resulting in poor maintainability/extensibility for the client. & 0.7 & 0.5 & 0.35\\ \hline
Financial inefficiencies leading to budget overruns or lack of capital. & 0.4 & 0.8 & 0.32\\ \hline
Constrained to purchase lower-quality components due to budget, resulting in lower performance. & 0.6 & 0.5 & 0.30 \\ \hline
Team is indecisive or cannot make a timely decision --- resulting in delay. & 0.4 & 0.6 & 0.30\\ \hline
Development and management technique/methodology is not effective, leading to productivity losses. & 0.4 & 0.7 & 0.28\\ \hline
Not enough time to work on documentation. & 0.7 & 0.4 & 0.28\\ \hline
Not enough machine learning training data. & 0.5 & 0.5 & 0.25\\ \hline
Not enough FPGA logic elements to implement a desired ML model. & 0.5 & 0.5 & 0.25\\ \hline
Failure to acquire regulatory compliance resulting in inability fly drone legally. & 0.3 & 0.7 & 0.21\\ \hline
The software, tools or development environment for the project is inadequate. & 0.4 & 0.5 & 0.20\\ \hline
Knowledge and skill regarding ML is insufficient. & 0.5 & 0.4 & 0.20 \\ \hline
Technical debt paydown impacts project timeline. & 0.4 & 0.5 & 0.20\\ \hline
Deliverables fail to meet client’s expectations. & 0.2 & 0.9 & 0.18 \\ \hline
Client demands modification to the scope and requirements of the project that leads to delays or feature cuts. & 0.3 & 0.6 & 0.18\\ \hline
Team lacks ineffective communication skills which lead to overlapping work, missed work, and/or incompatible work. & 0.4 & 0.4 & 0.16\\ \hline
Internal documentation or documentation for libraries and parts are not sufficient for development. & 0.4 & 0.4 & 0.16\\ \hline
Camera module lacking in documentation. & 0.2 & 0.8 & 0.16\\ \hline
New technology or research emerges, changing the scope significantly. & 0.2 & 0.5 & 0.10\\ \hline
Sabotage of the project. & 0.1 & 1 & 0.10\\ \hline
Sudden loss of client. & $\leq$0.1 & 1 & 0.10\\ \hline
Sudden loss of team member. & $\leq$0.1 & 0.9 & 0.09\\ \hline
FPGA board lacks documentation. & $\leq$0.1 & 0.8 & 0.08\\ \hline
Client is not cooperative or does not provide necessary information. & $\leq$0.1 & 0.8 & 0.08\\ \hline
Key components are not available. & 0.1 & 0.7 & 0.07\\ \hline
Purchased orders of equipment or tools delayed or lost. & 0.1 & 0.5 & 0.05\\ \hline
Client is not available enough to provide significant help. & $\leq$0.1 & 0.5 & 0.05\\ \hline
Lack of resources to acquire machine learning knowledge. & $\leq$0.1 & 0.5 & 0.05\\ \hline
Camera module fails to interface with FPGA. & $\leq$0.1 & 0.5 & 0.05\\ \hline
Data transmitter fails to interface with FPGA. & $\leq$0.1 & 0.5 & 0.05\\ \hline
Market competition significantly affects project requirements and scope. & $\leq$0.1 & 0.4 & 0.04\\ \hline
Software license does not allow our application to be delivered. & 0.1 & 0.3 & 0.03\\ \hline
Laws regarding drone operation and piloting change significantly. & 0.1 & 0.2 & 0.02
\label{riskprofile}
\end{longtable}
\end{center}
|
lemma diameter_ball [simp]: fixes a :: "'a::euclidean_space" shows "diameter(ball a r) = (if r < 0 then 0 else 2*r)" |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.