content
stringlengths
10
4.9M
<reponame>DannyParker0001/Kisak-Strike //===- SectionMemoryManager.h - Memory manager for MCJIT/RtDyld -*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file contains the declaration of a section-based memory manager used by // the MCJIT execution engine and RuntimeDyld. // //===----------------------------------------------------------------------===// #ifndef LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H #define LLVM_EXECUTIONENGINE_SECTIONMEMORYMANAGER_H #include "llvm/ADT/SmallVector.h" #include "llvm/ExecutionEngine/JITMemoryManager.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Memory.h" namespace llvm { /// This is a simple memory manager which implements the methods called by /// the RuntimeDyld class to allocate memory for section-based loading of /// objects, usually those generated by the MCJIT execution engine. /// /// This memory manager allocates all section memory as read-write. The /// RuntimeDyld will copy JITed section memory into these allocated blocks /// and perform any necessary linking and relocations. /// /// Any client using this memory manager MUST ensure that section-specific /// page permissions have been applied before attempting to execute functions /// in the JITed object. Permissions can be applied either by calling /// MCJIT::finalizeObject or by calling SectionMemoryManager::applyPermissions /// directly. Clients of MCJIT should call MCJIT::finalizeObject. class SectionMemoryManager : public JITMemoryManager { SectionMemoryManager(const SectionMemoryManager&) LLVM_DELETED_FUNCTION; void operator=(const SectionMemoryManager&) LLVM_DELETED_FUNCTION; public: SectionMemoryManager() { } virtual ~SectionMemoryManager(); /// \brief Allocates a memory block of (at least) the given size suitable for /// executable code. /// /// The value of \p Alignment must be a power of two. If \p Alignment is zero /// a default alignment of 16 will be used. virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID); /// \brief Allocates a memory block of (at least) the given size suitable for /// executable code. /// /// The value of \p Alignment must be a power of two. If \p Alignment is zero /// a default alignment of 16 will be used. virtual uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, bool isReadOnly); /// \brief Applies section-specific memory permissions. /// /// This method is called when object loading is complete and section page /// permissions can be applied. It is up to the memory manager implementation /// to decide whether or not to act on this method. The memory manager will /// typically allocate all sections as read-write and then apply specific /// permissions when this method is called. Code sections cannot be executed /// until this function has been called. /// /// \returns true if an error occurred, false otherwise. virtual bool applyPermissions(std::string *ErrMsg = 0); /// This method returns the address of the specified function. As such it is /// only useful for resolving library symbols, not code generated symbols. /// /// If \p AbortOnFailure is false and no function with the given name is /// found, this function returns a null pointer. Otherwise, it prints a /// message to stderr and aborts. virtual void *getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure = true); /// \brief Invalidate instruction cache for code sections. /// /// Some platforms with separate data cache and instruction cache require /// explicit cache flush, otherwise JIT code manipulations (like resolved /// relocations) will get to the data cache but not to the instruction cache. /// /// This method is not called by RuntimeDyld or MCJIT during the load /// process. Clients may call this function when needed. See the lli /// tool for example use. virtual void invalidateInstructionCache(); private: struct MemoryGroup { SmallVector<sys::MemoryBlock, 16> AllocatedMem; SmallVector<sys::MemoryBlock, 16> FreeMem; sys::MemoryBlock Near; }; uint8_t *allocateSection(MemoryGroup &MemGroup, uintptr_t Size, unsigned Alignment); error_code applyMemoryGroupPermissions(MemoryGroup &MemGroup, unsigned Permissions); MemoryGroup CodeMem; MemoryGroup RWDataMem; MemoryGroup RODataMem; public: /// /// Functions below are not used by MCJIT or RuntimeDyld, but must be /// implemented because they are declared as pure virtuals in the base class. /// virtual void setMemoryWritable() { llvm_unreachable("Unexpected call!"); } virtual void setMemoryExecutable() { llvm_unreachable("Unexpected call!"); } virtual void setPoisonMemory(bool poison) { llvm_unreachable("Unexpected call!"); } virtual void AllocateGOT() { llvm_unreachable("Unexpected call!"); } virtual uint8_t *getGOTBase() const { llvm_unreachable("Unexpected call!"); return 0; } virtual uint8_t *startFunctionBody(const Function *F, uintptr_t &ActualSize){ llvm_unreachable("Unexpected call!"); return 0; } virtual uint8_t *allocateStub(const GlobalValue *F, unsigned StubSize, unsigned Alignment) { llvm_unreachable("Unexpected call!"); return 0; } virtual void endFunctionBody(const Function *F, uint8_t *FunctionStart, uint8_t *FunctionEnd) { llvm_unreachable("Unexpected call!"); } virtual uint8_t *allocateSpace(intptr_t Size, unsigned Alignment) { llvm_unreachable("Unexpected call!"); return 0; } virtual uint8_t *allocateGlobal(uintptr_t Size, unsigned Alignment) { llvm_unreachable("Unexpected call!"); return 0; } virtual void deallocateFunctionBody(void *Body) { llvm_unreachable("Unexpected call!"); } virtual uint8_t *startExceptionTable(const Function *F, uintptr_t &ActualSize) { llvm_unreachable("Unexpected call!"); return 0; } virtual void endExceptionTable(const Function *F, uint8_t *TableStart, uint8_t *TableEnd, uint8_t *FrameRegister) { llvm_unreachable("Unexpected call!"); } virtual void deallocateExceptionTable(void *ET) { llvm_unreachable("Unexpected call!"); } }; } #endif // LLVM_EXECUTION_ENGINE_SECTION_MEMORY_MANAGER_H
Literature The Cuttlefish Behind the Wall What was I doing in Space? Not just regular outer-space, but this place, Space, where they had sent men into the atmosphere in years past. Where those very men send their kids to play astronaut for the Summer. Where the buildings are pink and blue, and your nostrils sting from salt-and-fallout-laden winds. I walk to the shore, where I was called, and sat down on a bed on the beach. Soon enough, a pretty cephalopod crawled up and sat next to me. Not too close, mind you. There were at least two salmon-lengths between us. She was one I'd seen before. The history suggested this would be a disaster, but I've been more headstrong about more certain doom than this and have come out on top, so I proceeded. She lolled about out in front where anyone could see her. She said, "Come closer, you look like fun." So I did. I danced (quite horribly, I'll admit) with her, but neither of us cared too much. She grabbed my arm, pulled me into a toy store, then she pulled me onto her roof, and I could have
Statistical Thermal Profile Considering Process Variations: Analysis and Applications The nonuniform substrate thermal profile and process variations are two major concerns in the present-day ultra-deep submicrometer designs. To correctly predict performance/ leakage/reliability measures and address any yield losses during the early stages of design phases, it is desirable to have a reliable thermal estimation of the chip. However, the leakage power sources vary greatly due to process variations and temperature, which result in significant variations in the hotspot and thermal profile formation in very large scale integration chips. Traditionally, no leakage variations have been considered during full-chip thermal analysis. In this paper, the dependence behavior among the process variability, leakage power consumption, and thermal profile construction are established to effectively extract a reliable statistical thermal profile over a die at the microarchitectural level. Knowledge of this is the key to the design and analysis of circuits. The probability density functions of temperatures are extracted while considering the leakage variations due to the gate-length and oxide-thickness variations and while accounting for the coupling between the temperature and the total leakage. Two applications of the developed analyzer are investigated, namely, the evaluation of the hotspots' relocations and the total full-chip power estimation. Finally, the accuracy and efficiency of the developed analyzer are validated by comparisons with Monte Carlo simulations.
// zzve, zzxi, zzxh, zzte public final class zzxg extends AbstractList implements zzve, RandomAccess { public zzxg(zzve zzve1) { // 0 0:aload_0 // 1 1:invokespecial #16 <Method void AbstractList()> zzcch = zzve1; // 2 4:aload_0 // 3 5:aload_1 // 4 6:putfield #18 <Field zzve zzcch> // 5 9:return } static zzve zza(zzxg zzxg1) { return zzxg1.zzcch; // 0 0:aload_0 // 1 1:getfield #18 <Field zzve zzcch> // 2 4:areturn } public final Object get(int i) { return ((Object) ((String)zzcch.get(i))); // 0 0:aload_0 // 1 1:getfield #18 <Field zzve zzcch> // 2 4:iload_1 // 3 5:invokeinterface #25 <Method Object zzve.get(int)> // 4 10:checkcast #27 <Class String> // 5 13:areturn } public final Iterator iterator() { return ((Iterator) (new zzxi(this))); // 0 0:new #31 <Class zzxi> // 1 3:dup // 2 4:aload_0 // 3 5:invokespecial #34 <Method void zzxi(zzxg)> // 4 8:areturn } public final ListIterator listIterator(int i) { return ((ListIterator) (new zzxh(this, i))); // 0 0:new #40 <Class zzxh> // 1 3:dup // 2 4:aload_0 // 3 5:iload_1 // 4 6:invokespecial #43 <Method void zzxh(zzxg, int)> // 5 9:areturn } public final int size() { return zzcch.size(); // 0 0:aload_0 // 1 1:getfield #18 <Field zzve zzcch> // 2 4:invokeinterface #48 <Method int zzve.size()> // 3 9:ireturn } public final Object zzbp(int i) { return zzcch.zzbp(i); // 0 0:aload_0 // 1 1:getfield #18 <Field zzve zzcch> // 2 4:iload_1 // 3 5:invokeinterface #51 <Method Object zzve.zzbp(int)> // 4 10:areturn } public final void zzc(zzte zzte) { throw new UnsupportedOperationException(); // 0 0:new #55 <Class UnsupportedOperationException> // 1 3:dup // 2 4:invokespecial #56 <Method void UnsupportedOperationException()> // 3 7:athrow } public final List zzxb() { return zzcch.zzxb(); // 0 0:aload_0 // 1 1:getfield #18 <Field zzve zzcch> // 2 4:invokeinterface #60 <Method List zzve.zzxb()> // 3 9:areturn } public final zzve zzxc() { return ((zzve) (this)); // 0 0:aload_0 // 1 1:areturn } private final zzve zzcch; }
Business sustainability frameworks a survey This paper aims to formulate a number of research questions based on a literature survey. A critical review of literature has been undertaken to evaluate the existing business sustainability frameworks. Business Sustainability frameworks from a number of domains such as manufacturing, e-learning, agriculture, bioenergy and Information technology has been reviewed. The knowledge gap in this space has been clearly outlined and a number of research questions were derived from this discussion. Based upon this review a framework will be proposed using the inputs from the existing literature. The proposed framework will be validated by interviewing domain experts. Once validated the framework would be applied to two different ICT businesses either from Australia or overseas. This will be a case study based research where the two ICT companies will be assessed using the proposed framework and the results from this case study will be used to draw generalized conclusions related to the sustainability of ICT businesses.
package paychmgr import ( "context" "fmt" "github.com/filecoin-project/venus/pkg/crypto" "github.com/filecoin-project/venus/venus-shared/types" "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" cborutil "github.com/filecoin-project/go-cbor-util" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/venus/venus-shared/actors" "github.com/filecoin-project/venus/venus-shared/actors/builtin/paych" ) // insufficientFundsErr indicates that there are not enough funds in the // channel to create a voucher type insufficientFundsErr interface { Shortfall() big.Int } type ErrInsufficientFunds struct { shortfall big.Int } func newErrInsufficientFunds(shortfall big.Int) *ErrInsufficientFunds { return &ErrInsufficientFunds{shortfall: shortfall} } func (e *ErrInsufficientFunds) Error() string { return fmt.Sprintf("not enough funds in channel to cover voucher - shortfall: %d", e.shortfall) } func (e *ErrInsufficientFunds) Shortfall() big.Int { return e.shortfall } type laneState struct { redeemed big.Int nonce uint64 } func (ls laneState) Redeemed() (big.Int, error) { return ls.redeemed, nil } func (ls laneState) Nonce() (uint64, error) { return ls.nonce, nil } // channelAccessor is used to simplify locking when accessing a channel type channelAccessor struct { from address.Address to address.Address // chctx is used by background processes (eg when waiting for things to be // confirmed on chain) chctx context.Context sa *stateAccessor api managerAPI store *Store lk *channelLock fundsReqQueue []*fundsReq msgListeners msgListeners } func newChannelAccessor(pm *Manager, from address.Address, to address.Address) *channelAccessor { return &channelAccessor{ from: from, to: to, chctx: pm.ctx, sa: pm.sa, api: pm.pchapi, store: pm.store, lk: &channelLock{globalLock: &pm.lk}, msgListeners: newMsgListeners(), } } func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Address) (paych.MessageBuilder, error) { nwVersion, err := ca.api.StateNetworkVersion(ctx, types.EmptyTSK) if err != nil { return nil, err } ver, err := actors.VersionForNetwork(nwVersion) if err != nil { return nil, err } return paych.Message(ver, from), nil } func (ca *channelAccessor) getChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() return ca.store.ByAddress(ctx, addr) } func (ca *channelAccessor) outboundActiveByFromTo(ctx context.Context, from, to address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() return ca.store.OutboundActiveByFromTo(ctx, from, to) } // createVoucher creates a voucher with the given specification, setting its // nonce, signing the voucher and storing it in the local datastore. // If there are not enough funds in the channel to create the voucher, returns // the shortfall in funds. func (ca *channelAccessor) createVoucher(ctx context.Context, ch address.Address, voucher paych.SignedVoucher) (*types.VoucherCreateResult, error) { ca.lk.Lock() defer ca.lk.Unlock() // Find the channel for the voucher ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return nil, xerrors.Errorf("failed to get channel info by address: %w", err) } // Set the voucher channel sv := &voucher sv.ChannelAddr = ch // Get the next nonce on the given lane sv.Nonce = ca.nextNonceForLane(ci, voucher.Lane) // Sign the voucher vb, err := sv.SigningBytes() if err != nil { return nil, xerrors.Errorf("failed to get voucher signing bytes: %w", err) } sig, err := ca.api.WalletSign(ctx, ci.Control, vb) if err != nil { return nil, xerrors.Errorf("failed to sign voucher: %w", err) } sv.Signature = sig // Store the voucher if _, err := ca.addVoucherUnlocked(ctx, ch, sv, big.NewInt(0)); err != nil { // If there are not enough funds in the channel to cover the voucher, // return a voucher create result with the shortfall var ife insufficientFundsErr if xerrors.As(err, &ife) { return &types.VoucherCreateResult{ Shortfall: ife.Shortfall(), }, nil } return nil, xerrors.Errorf("failed to persist voucher: %w", err) } return &types.VoucherCreateResult{Voucher: sv, Shortfall: big.NewInt(0)}, nil } func (ca *channelAccessor) nextNonceForLane(ci *ChannelInfo, lane uint64) uint64 { var maxnonce uint64 for _, v := range ci.Vouchers { if v.Voucher.Lane == lane { if v.Voucher.Nonce > maxnonce { maxnonce = v.Voucher.Nonce } } } return maxnonce + 1 } func (ca *channelAccessor) checkVoucherValid(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]paych.LaneState, error) { ca.lk.Lock() defer ca.lk.Unlock() return ca.checkVoucherValidUnlocked(ctx, ch, sv) } func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (map[uint64]paych.LaneState, error) { if sv.ChannelAddr != ch { return nil, xerrors.Errorf("voucher ChannelAddr doesn't match channel address, got %s, expected %s", sv.ChannelAddr, ch) } // check voucher is unlocked if sv.Extra != nil { return nil, xerrors.Errorf("voucher is Message Locked") } if sv.TimeLockMax != 0 { return nil, xerrors.Errorf("voucher is Max Time Locked") } if sv.TimeLockMin != 0 { return nil, xerrors.Errorf("voucher is Min Time Locked") } if len(sv.SecretPreimage) != 0 { return nil, xerrors.Errorf("voucher is Hash Locked") } // Load payment channel actor state act, pchState, err := ca.sa.loadPaychActorState(ctx, ch) if err != nil { return nil, err } // Load channel "From" account actor state f, err := pchState.From() if err != nil { return nil, err } from, err := ca.api.ResolveToKeyAddress(ctx, f, nil) if err != nil { return nil, err } // verify voucher signature vb, err := sv.SigningBytes() if err != nil { return nil, err } // TODO: technically, either party may create and sign a voucher. // However, for now, we only accept them from the channel creator. // More complex handling logic can be added later if err := crypto.Verify(sv.Signature, from, vb); err != nil { return nil, err } // Check the voucher against the highest known voucher nonce / value laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } // If the new voucher nonce value is less than the highest known // nonce for the lane ls, lsExists := laneStates[sv.Lane] if lsExists { n, err := ls.Nonce() if err != nil { return nil, err } if sv.Nonce <= n { return nil, fmt.Errorf("nonce too low") } // If the voucher amount is less than the highest known voucher amount r, err := ls.Redeemed() if err != nil { return nil, err } if sv.Amount.LessThanEqual(r) { return nil, fmt.Errorf("voucher amount is lower than amount for voucher with lower nonce") } } // Total redeemed is the total redeemed amount for all lanes, including // the new voucher // eg // // lane 1 redeemed: 3 // lane 2 redeemed: 2 // voucher for lane 1: 5 // // Voucher supersedes lane 1 redeemed, therefore // effective lane 1 redeemed: 5 // // lane 1: 5 // lane 2: 2 // - // total: 7 totalRedeemed, err := ca.totalRedeemedWithVoucher(laneStates, sv) if err != nil { return nil, err } // Total required balance must not exceed actor balance if act.Balance.LessThan(totalRedeemed) { return nil, newErrInsufficientFunds(big.Sub(totalRedeemed, act.Balance)) } if len(sv.Merges) != 0 { return nil, fmt.Errorf("dont currently support paych lane merges") } return laneStates, nil } func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (bool, error) { ca.lk.Lock() defer ca.lk.Unlock() recipient, err := ca.getPaychRecipient(ctx, ch) if err != nil { return false, err } ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return false, err } // Check if voucher has already been submitted submitted, err := ci.wasVoucherSubmitted(sv) if err != nil { return false, err } if submitted { return false, nil } mb, err := ca.messageBuilder(ctx, recipient) if err != nil { return false, err } mes, err := mb.Update(ch, sv, secret) if err != nil { return false, err } ret, err := ca.api.Call(ctx, mes, nil) if err != nil { return false, err } if ret.Receipt.ExitCode != 0 { return false, nil } return true, nil } func (ca *channelAccessor) getPaychRecipient(ctx context.Context, ch address.Address) (address.Address, error) { _, state, err := ca.api.GetPaychState(ctx, ch, nil) if err != nil { return address.Address{}, err } return state.To() } func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta big.Int) (big.Int, error) { ca.lk.Lock() defer ca.lk.Unlock() return ca.addVoucherUnlocked(ctx, ch, sv, minDelta) } func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta big.Int) (big.Int, error) { ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return big.Int{}, err } // Check if the voucher has already been added for _, v := range ci.Vouchers { eq, err := cborutil.Equals(sv, v.Voucher) if err != nil { return big.Int{}, err } if eq { // Ignore the duplicate voucher. log.Warnf("AddVoucher: voucher re-added") return big.NewInt(0), nil } } // Check voucher validity laneStates, err := ca.checkVoucherValidUnlocked(ctx, ch, sv) if err != nil { return big.NewInt(0), err } // The change in value is the delta between the voucher amount and // the highest previous voucher amount for the lane laneState, exists := laneStates[sv.Lane] redeemed := big.NewInt(0) if exists { redeemed, err = laneState.Redeemed() if err != nil { return big.NewInt(0), err } } delta := big.Sub(sv.Amount, redeemed) if minDelta.GreaterThan(delta) { return delta, xerrors.Errorf("addVoucher: supplied token amount too low; minD=%s, D=%s; laneAmt=%s; v.Amt=%s", minDelta, delta, redeemed, sv.Amount) } ci.Vouchers = append(ci.Vouchers, &VoucherInfo{ Voucher: sv, }) if ci.NextLane <= sv.Lane { ci.NextLane = sv.Lane + 1 } return delta, ca.store.putChannelInfo(ctx, ci) } func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (cid.Cid, error) { ca.lk.Lock() defer ca.lk.Unlock() ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } has, err := ci.hasVoucher(sv) if err != nil { return cid.Undef, err } // If the channel has the voucher if has { // Check that the voucher hasn't already been submitted submitted, err := ci.wasVoucherSubmitted(sv) if err != nil { return cid.Undef, err } if submitted { return cid.Undef, xerrors.Errorf("cannot submit voucher that has already been submitted") } } mb, err := ca.messageBuilder(ctx, ci.Control) if err != nil { return cid.Undef, err } msg, err := mb.Update(ch, sv, secret) if err != nil { return cid.Undef, err } smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil) if err != nil { return cid.Undef, err } // If the channel didn't already have the voucher if !has { // Add the voucher to the channel ci.Vouchers = append(ci.Vouchers, &VoucherInfo{ Voucher: sv, }) } // Mark the voucher and any lower-nonce vouchers as having been submitted err = ca.store.MarkVoucherSubmitted(ctx, ci, sv) if err != nil { return cid.Undef, err } return smsg.Cid(), nil } func (ca *channelAccessor) allocateLane(ctx context.Context, ch address.Address) (uint64, error) { ca.lk.Lock() defer ca.lk.Unlock() return ca.store.AllocateLane(ctx, ch) } func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() // TODO: just having a passthrough method like this feels odd. Seems like // there should be some filtering we're doing here return ca.store.VouchersForPaych(ctx, ch) } // laneState gets the LaneStates from chain, then applies all vouchers in // the data store over the chain state func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct // (but technically dont't need to) laneCount, err := state.LaneCount() if err != nil { return nil, err } // Note: we use a map instead of an array to store laneStates because the // api sets the lane ID (the index) and potentially they could use a // very large index. laneStates := make(map[uint64]paych.LaneState, laneCount) err = state.ForEachLaneState(func(idx uint64, ls paych.LaneState) error { laneStates[idx] = ls return nil }) if err != nil { return nil, err } // Apply locally stored vouchers vouchers, err := ca.store.VouchersForPaych(ctx, ch) if err != nil && err != ErrChannelNotTracked { return nil, err } for _, v := range vouchers { for range v.Voucher.Merges { return nil, xerrors.Errorf("paych merges not handled yet") } // Check if there is an existing laneState in the payment channel // for this voucher's lane ls, ok := laneStates[v.Voucher.Lane] // If the voucher does not have a higher nonce than the existing // laneState for this lane, ignore it if ok { n, err := ls.Nonce() if err != nil { return nil, err } if v.Voucher.Nonce < n { continue } } // Voucher has a higher nonce, so replace laneState with this voucher laneStates[v.Voucher.Lane] = laneState{v.Voucher.Amount, v.Voucher.Nonce} } return laneStates, nil } // Get the total redeemed amount across all lanes, after applying the voucher func (ca *channelAccessor) totalRedeemedWithVoucher(laneStates map[uint64]paych.LaneState, sv *paych.SignedVoucher) (big.Int, error) { // TODO: merges if len(sv.Merges) != 0 { return big.Int{}, xerrors.Errorf("dont currently support paych lane merges") } total := big.NewInt(0) for _, ls := range laneStates { r, err := ls.Redeemed() if err != nil { return big.Int{}, err } total = big.Add(total, r) } lane, ok := laneStates[sv.Lane] if ok { // If the voucher is for an existing lane, and the voucher nonce // is higher than the lane nonce n, err := lane.Nonce() if err != nil { return big.Int{}, err } if sv.Nonce > n { // Add the delta between the redeemed amount and the voucher // amount to the total r, err := lane.Redeemed() if err != nil { return big.Int{}, err } delta := big.Sub(sv.Amount, r) total = big.Add(total, delta) } } else { // If the voucher is *not* for an existing lane, just add its // value (implicitly a new lane will be created for the voucher) total = big.Add(total, sv.Amount) } return total, nil } func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid.Cid, error) { ca.lk.Lock() defer ca.lk.Unlock() ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } mb, err := ca.messageBuilder(ctx, ci.Control) if err != nil { return cid.Undef, err } msg, err := mb.Settle(ch) if err != nil { return cid.Undef, err } smgs, err := ca.api.MpoolPushMessage(ctx, msg, nil) if err != nil { return cid.Undef, err } ci.Settling = true err = ca.store.putChannelInfo(ctx, ci) if err != nil { log.Errorf("Error marking channel as settled: %s", err) } return smgs.Cid(), nil } func (ca *channelAccessor) collect(ctx context.Context, ch address.Address) (cid.Cid, error) { ca.lk.Lock() defer ca.lk.Unlock() ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } mb, err := ca.messageBuilder(ctx, ci.Control) if err != nil { return cid.Undef, err } msg, err := mb.Collect(ch) if err != nil { return cid.Undef, err } smsg, err := ca.api.MpoolPushMessage(ctx, msg, nil) if err != nil { return cid.Undef, err } return smsg.Cid(), nil }
THE state of Veracruz, on the Gulf coast, is Mexico at its most fertile. Along the tropical coastline, vast sugar-cane plantations shimmer in the heat. Climb the mountains towards the balmier state capital of Jalapa and the landscape changes into a canopy of coffee plants and orange trees, with cattle and horses grazing. Mexicans will tell you that this natural bounty is the essence of their country. What many fail to realise, though, is that until 500 years ago none of these crops or animals existed in Mexico. Veracruz was the gateway through which they entered, and it was Spaniards who brought them. Get our daily newsletter Upgrade your inbox and get our Daily Dispatch and Editor's Picks. This is where one of the great military expeditions of history began: Hernán Cortés´s march in 1519-20 from the Gulf of Mexico to Tenochtitlán, seat of the Aztec empire. Historians liken it to Julius Caesar’s conquest of Gaul. Its protagonist, a cunning 34-year-old with almost no experience of war, led about 500 men and just over a dozen horses into territories whose bloodthirsty warriors hugely outnumbered his own. He exploited seething tribal rivalries to conquer a civilisation—albeit with the help of gunpowder, smallpox and his wily Indian lover. At times he used mischief; at times cruelty. He had an eye for his place in history—as well as for the ladies. His soldiers did not just subjugate the people they conquered. From the very start they bred with the Indians too, creating a mixed race through mestizaje, with a common language and religion that defines Mexico today. Today the journey—bits of which your correspondent did by car, bits on horse-back—is rather easier than it was five centuries ago. Even so, it can be tricky. To follow Cortés’s first march from beginning to end requires trampling through jungle, skirting snow-capped volcanoes, traipsing through fields of fighting bulls and battling Mexico City’s traffic. Only a few historians, such as Juan Miralles, a Mexican diplomat, have deciphered the conquistadors’ lousy spelling of Indian names to identify the route. Sometimes history seems to have swallowed Cortés’s footsteps up. On the march When Cortés disembarked on the sand-banks of Veracruz on April 22nd (Good Friday) in 1519, after a long journey from Cuba via the Yucatán and Tabasco, he set in train three manoeuvres that would help determine the outcome of the conquest. He met ambassadors of Moctezuma, lord of the Aztecs, and the more gold they gave him as a bribe to stop him travelling to Tenochtitlán, the more they whetted his appetite to go. He double-crossed men loyal to the Cuban governor, Diego Velázquez, to give himself free rein to pursue his path to glory in service of the King of Spain. And he realised the usefulness of Indian allies, above all the alluring Malintzin, or La Malinche, who had been given to him as a slave a few weeks before and whose linguistic skills and womanly wiles helped him penetrate the great Aztec empire by brokering pacts with its enemies. Today there is no trace of the mosquito-infested dunes where he established the first Villa Rica de la Veracruz, a make-believe town created solely to hold a rigged election that would give him powers to conquer. San Juan de Ulúa is a historic fort near an esplanade where marimba music flutters and elderly men play chess and drink coffee. In a possible echo of the control the Aztecs once exerted, parts of Veracruz look like a police state: heavily armed marines in balaclavas patrol the area to stop drug violence. Back in 1519 the Aztecs’ rivals, the local Totonac Indians, may have looked as threatening as today’s drug lords. They had, according to one account, drooping holes in their lower lips and ears, studded with stones and gold. But they were friendlier than they looked. Once their overlords, the Aztec ambassadors, had given up their vain attempt to get rid of the Spaniards, the Totonacs invited Cortés and his crew farther up the coast to meet their chieftain, a splendid character who has gone down in history as the “fatcacique” (ruler). Thanks to his hospitality, Cortés built the second Villa Rica de la Veracruz, with its own imposing fortress. But you would hardly know it now. About 50km (31 miles) north of modern-day Veracruz is Quiahuiztlán, a breathtaking Totonac settlement in a clearing halfway up a jagged mountain overlooking the coast. There Cortés spent many weeks with the fat cacique, trying to convert him to Christianity while gleaning vital information about the Aztecs. Here the Spaniard inspired the sport in which Mexicans continue to excel: tax-dodging. He incited the chieftain to jail some of Moctezuma’s haughty and perfumed tax-collectors, only to secretly free them later so that they would return to their lord and give a favourable account of the stranger’s magnanimity. This is also where the first seeds of mestizaje may have been sown. The fat cacique gave the Spanish conquistador and his commanders a gift of eight noblewomen in ornate clothing, with gold collars and earrings, to breed with. The gift was not gracefully received, perhaps because Cortés got the ugliest one, the chieftain’s niece. He repaid the favour by throwing down the Totonac idols at the cacique’s feet, saying he and his men could take the women only if they first became Christians. La Malinche may already have become his lover by then. Later she bore his first child. Beneath Quiahuiztlán is the first Spanish settlement in the Americas. Visitors might expect such a place to be marked, if not celebrated, yet it is almost lost to history. Villa Rica, like most Mexican seaside resorts, is an appealing mixture of thatched palapas (huts) selling fish and beer, and hammocks to sleep off lunch. The presence of a nuclear-power station in the adjacent bay does not diminish its popularity. But few of its visitors know that it is where one of the most remarkable events in Mexican history took place: Cortés’s most famous act of bold, decisive cool. Some of his soldiers, loyal to Velázquez, wanted to return to Cuba. Cortés needed total commitment to his expedition, so he scuttled his ships by smashing holes in them and abandoning them on the beach. From then on, victory was the only option for his troops. There is nothing in Villa Rica to recall this extraordinary act of leadership, nor to mark the remains of the fortress that Cortés personally helped to build that lies just above the beach. It is now surrounded by barbed wire and hidden under a thicket of brambles. The contrast with meticulously excavated Jamestown, where English settlers first disembarked on the coast of Virginia in 1607, speaks volumes about how differently two neighbour-nations can treat their early history. In America, where the conquerors wiped out most of the past, the place of arrival is celebrated with the pride of victory; in Mexico, where the settlers mixed with the indigenous people, it is regarded as the starting-place of a painful conquest, best ignored. This attitude to Cortés prevails in poor, lethargic areas, strongholds of the Institutional Revolutionary Party (PRI) that has ruled Mexico for most of the past century, often by peddling to Mexicans a narrative of oppression at the hands of outsiders. The same attitude can be found in what was to become the third Villa Rica de la Veracruz. It is now La Antigua, a village 20km north of Veracruz. Bernal Díaz del Castillo, who along with Cortés provides the best eyewitness account of the conquest, tells of the deep river they struggled to cross there, using broken canoes or by swimming. On the banks, giant ceiba trees take some of the sting from the tropical heat; the Spaniards perhaps remembered these, and within decades of the conquest they made the place a hub of trade between the new world and Spain. But there is little trace of that today. Almost hidden among vast amate trees in La Antigua is the so-called “Casa de Cortés” (pictured), with roots threaded through its window frames and doors as if it has been asleep for centuries. Though it is unlikely that the conquistador ever lived there, it was built with princely craftsmanship. Faint traces remain of the white stucco that used to adorn it—an old indigenous recipe, a guide says, of cactus, oysters, gulls’ eggs, crushed tortoise shells, sand and water. Restoration work started only four years ago. The guide, a spiky-haired 19-year-old called Sergio, takes visitors to the main square where he shows off a recently erected plaque that contains the conciliatory message: “We stretch out our hands to our Spanish brethren because we are heirs to the wisdom of the Indians and the gallantry of the Spaniards.” But he is personally scornful of the conquistadors. They were the dregs of society, he says, infested with diseases like smallpox and measles that ravaged the indigenous population. “We’d rather forget the conquest altogether,” he says. Horses, gods and machismo Yet in the pine-covered mountains above, a few days’ march away, where Cortés travelled on his way to the central plains of Mexico, people take a different view of these things. This is farming country, populated by conservative and independent-minded smallholders who have little time for the PRI’s spurious “revolutionary” populism. Horse culture, which played an outsized role in the conquest, runs deep. Chroniclers delight in describing how the conquistadors on their horses galloped in front of wide-eyed Indians to make them believe that this strange beast and man were one creature, either a monster or a god. The Spaniards rode close to what is now Xico, a mountain town that seems to float on the air amid the brightly coloured paper decorations strung along the streets. The Spaniards introduced its most famous crop, coffee. Its centre of gravity is a Catholic church—another Spanish import—whose tower is silhouetted by the snow-covered Pico de Orizaba behind. On one Sunday this summer a small troop of horses trotted up the main street, their riders dressed in embroidered jackets and trousers. The heart and soul of chivalrous Mexico, they displayed another Spanish trait, machismo. One, a 72-year-old farmer in a sombrero with a moustache and hair as silver-grey as his horse, nodded to his young daughter riding at his side. “The chewing gum still sticks,” he guffawed, pointing to his penis. They headed to church for a mounted Sunday mass a week before the town’s annual bull run. “The conquest brought us everything good in life—God, horses and bulls,” says another horseman. From Xico, the conquistadors flogged through freezing mountain passes and waterless terrain to arrive in Tlaxcala, Mexico’s smallest state. Although it is a quiet haven of colonial architecture, haciendas and bull studs only two hours from the capital, few people visit it. That is partly because its unique role in the conquest tarnished its reputation, partly because it is known now for trafficking girls. Half a millennium on, the curse persists. Led by a local prince, Xicoténcatl the Younger, the Tlaxcalans almost beat Cortés and his men in battle. They quickly killed two of his horses, destroying the myth of the conquistadors’ invincibility. The wounded Spaniards were forced to treat their injuries with the body fat of a dead Indian, the only ointment they could find. But eventually the Tlaxcalans capitulated—hampered perhaps by their tradition of trying to capture their enemies for sacrifice, rather than slaughtering them. Cortés was astonished by their city. He thought it “greater and stronger than Granada”; it teemed with fresh game to eat, in the market there were barbers’ shops, and it even had a functioning justice system (modern Mexico, take note). It also provided allies; its people were sworn enemies of the Aztecs and were delighted to join up with the Spaniards. The idea that the conquest is all Cortés’s dirty work is incorrect. “The conquest was a war of Indians against Indians. The Spaniards were far too small a force to do it by themselves,” says Andrea Martínez, a historian and expert on Tlaxcala. For a few centuries Tlaxcala did well out of its co-conquest. It earned a royal seal from Spain as a “very noble and very loyal” city. According to Ms Martínez, its Indian caciques were allowed to retain control of their people and fought tenacious legal battles to stop the imperial authorities from stripping them of their rights. On behalf of the crown, loyal Tlaxcalans helped conquer territory and build settlements from Central America as far north as Albuquerque, New Mexico. That all changed with independence. Enrique Krauze, a historian, notes that the country’s break from Spain in 1821 was portrayed by liberals as the reversal of the conquest. And the anti-conquest fervour drew further strength from the revolution of 1910-17 that eventually gave rise to PRI rule. As a result, Tlaxcala became a dirty word and schoolchildren were taught that it was a “traitor state”. Now there are few traces of Cortés, and the only Tlaxcalan celebrated nationwide is Xicoténcatl the Younger, who was hanged for failing to support the final siege against the Aztecs. Armando Díaz de la Mora, a state historian, finds it exasperating. “Cortés was a great friend to Tlaxcala,” he says, “[but] there are no squares, streets or statues named after him.” Your correspondent, conquistador-style, took a day trip on horseback to find traces of the legendary wall on the border of Tlaxcala that kept the Aztec marauders at bay. There is no sign of it. The battlegrounds on which Cortés and his men fought tens of thousands of club-wielding Tlaxcalans are now overrun by fighting bulls. But the landscape still has the beauty that must have captivated Cortés 500 years ago. Above it looms a striking volcano, called La Malinche. Legend has it that when the locals saw Malintzin bathing in a pool, she looked so voluptuous that they named it after her. Blood, sweat and tears History takes a different turn with the final leg of Cortés’s first journey: the approach to Tenochtitlán. In the city of Cholula, which was loyal to Moctezuma, Cortés’s Spanish and Tlaxcalan forces massacred thousands in the main square, though accounts differ as to whether it was a pre-emptive strike to fend off an attack or a simple case of bloodlust. The slaughter is one of the biggest blights on Cortés´s memory. Visit Cholula today and it is the ancient pre-hispanic pyramid, buried beneath a church, that draws history buffs, not the conquistador. In Tenochtitlán, the capital, he crossed the causeways to greet, kidnap and ultimately destroy Moctezuma The path between the two volcanoes that tower above Cholula is called the Paso de Cortés. It is spectacular. From there he had his first awe-inspiring sight of what is now Mexico City. The lakes that shimmered below him are mostly now drained and cluttered with some of Latin America’s biggest slums, with barely a lick of paint on the houses to brighten them. The causeways—still known as calzadas—that took him across the lakes to greet, kidnap and ultimately destroy Moctezuma are the distant forebears of multi-lane highways now snarled with some of the world’s worst traffic. Along one of them, the Calzada Mexico-Tacuba, Cortés fled on a rainy night in 1520, pursued by enraged Aztecs avenging the death of their emperor. Many of his panicked followers fell into the surrounding lake, drowning under the weight of their armour and turning the water red with blood. Today, next to the burnt husk of a giant ahuehuete tree, with buses spewing their fumes alongside, is a sign saying that Cortés wept there for the fate of his men. The event has passed down in history as la noche triste (the sad night), because it is the moment when the Spaniards came closest to defeat. The sign, however, erected in 2013, takes the Aztec view and calls it la noche victoriosa. That is a reflection of Mexico’s struggle with its past. Should it accept the historical record, with all its brutality, come to terms with the inevitability of Tenochtitlán’s fall and celebrate the boldness and enterprise of Cortés and his men? Or should it continue to glorify the Aztecs and anguish over the genocide that Cortés perpetrated in God’s name? Should it continue to demonise Cortés, in the words of Octavio Paz, a Nobel prize-winning poet, as a symbol of violent penetration, or learn to appreciate him as the unifier of two cultures? The dilemma is more than historical. “Hatred of Cortés is not even hatred of Spain. It’s hatred of ourselves,” wrote Paz on the 500th anniversary of the conquistador’s birth in 1985. Some Mexicans are making efforts to reconcile the country to its history. A television magnate, Ricardo Salinas Pliego, is using the rights to Miralles’s history books, which he owns, to produce a TV drama that he hopes will re-shape the story of the country’s birth on its half-millennium. “We’re definitely going to rewrite history,” he says. Cortés, he believes, “was really in love with Mexico”. La Malinche, too, has “been very poorly served by history. She shows what a strong woman can do with history. And she gave birth to the first mestizo.” For now, though, the national dilemma lingers. At the journey’s end, the centre of Mexico City, a mural in a stairwell in the baroque sanctuary of San Ildefonso, beside the remains of Tenochtitlán’s dazzling Great Temple, depicts a uniquely Mexican version of Adam and Eve. Painted by José Clemente Orozco, a stern Cortés is clasping the hand of the dark-skinned La Malinche. Both are naked. Beneath their feet is a dead, faceless Indian. A few streets away, at the site where the conquistador first met Moctezuma, another mural offers a gentler version of the story. Behind the plain façade of the Hospital de Jesús, which Cortés built in 1524 to train Indian doctors, are two porticoed courtyards, with perhaps the only bust of Cortés to be found in a public building in Mexico. Upstairs, a mural depicts Spanish, Indian and mestizo doctors and nurses working side by side. In the next-door church, which is closed for refurbishment, lie Cortés’s bones.
export const getColumnStateQuery = (index: string) => ` query columnsStateQuery { ${index} { columnsState { state { type keyField defaultSorted { id desc } columns { field accessor show type sortable canChangeShow query jsonPath } } } } } `;
The Revolutionary Aesthetics of the Second Russian Avant-Garde The text focuses on the stiob (the Russian word for a particular form of parody) and subversive aesthetic praxis of the Second Russian Avant-Garde. In particular, Ioffe analyzes Michail Grobman’s oeuvre from the perspective of various irreverent techniques associated with the political left and the cynic tradition, drawing a conceptual parallel between the avant-garde’s life-creational outrage and Surrealist patterns of discursive terror. Ioffe reflects on the synthetic nature of the avant-garde, which puts equal emphasis on visual and verbal arts. His analysis explores the radical artistic gesture that represents one of the unique contributions of this cultural paradigm.
A Mutant Exhibiting Abnormal Habituation Behavior in Caenorhabditis elegans The acquisition and retention of information by the nervous system are major processes of learning. Habituation is a simple learning process that occurs during repeated exposure to harmless stimuli. C. elegans is habituated when repeatedly given mechanical stimuli and recover from the habituation when the stimuli are stopped. A habituation abnormal mutant was isolated and assigned to a new gene hab-1 whose mutation causes slow habituation. The hab-1 mutant phenotype is remarkable at short time interval stimuli. However, hab-1 mutant worms show normal dishabituation. Ablations of neurons constituting the neural circuit for mechanical reflexes did not abolish abnormalities caused by the hab-1 mutation.
This story originally appeared on xoJane.com. What I’m about to say might make me the most unpopular teacher in the lounge. Standardized testing actually isn’t all that bad. In fact, if used correctly, it can do some great stuff. Sorry, I just had to duck as a head of lettuce was thrown my way. The Brief Newsletter Sign up to receive the top stories you need to know right now. View Sample Sign Up Now I teach adults English as a Second Language (ESL) in the Los Angeles Unified School District. Our students take two or three tests during a semester, and if their scores improve, we get money from the state. At first, like many teachers, I hated these tests. Then, I got a job as my school’s testing advisor. For nine years, I assisted teachers with the testing, trained them to do it more effectively, scanned all the test results into a giant database, collected a bunch of demographic information, and then sent off the final report to the school district’s main office. From there, it went on to some shadowy place in the government, and somehow this all resulted in a check for some books we might buy. The thing that bothered me was that all that work never truly resulted in helping struggling teachers or improving our programs. We made some cursory attempts at improvement, but the focus was on the money we earned from performance, not the evaluative process itself. We improved in order to get more funding. We didn’t truly examine core issues. There was a widespread distrust of the tests among teachers and administrators. In my experience, teachers tend to be, oddly enough, people who distrust authority. We look at ourselves as overworked heroes, and we see standardized tests as the instruments that don’t truly measure the value of our work. But the thing is, the tests did measure a few things that we should have examined more closely. I had never been much of a computer geek, but I had a knack for this bean counting database job. So I got pretty familiar with the reporting we did, and year after year, the numbers painted a clear picture. Some teachers were getting more improvements than others. Yes, there were outliers and exceptions and individual situations that affected testing. But if you took a step back and looked at trends, you could see which teachers were more effective — at least at the subjects our tests covered. Moreover, unlike K-12 programs, adult education is not compulsory. Students who don’t enjoy your class can just walk out. They “vote with their feet.” The data showed us which teachers had high retention and which teachers had high drop rates. In other words, we could see who was engaging students and who might be a little, um…boring. This isn’t to say that a boring teacher is a bad teacher, or that a fun teacher is necessarily a great teacher. But if I could see great retention and consistent test score improvements, term after term, I knew a teacher was pretty right on. At least, this was a teacher we could all learn a bit from. One criticism that gets lobbed against standardized testing is that it can be used against instructors. But what about the opposite situation, when it can be used in their favor? There was a younger teacher in our school who just amazed me with his stellar test results. He dressed flamboyantly and danced and sang in his classroom. His learner persistence was great, too. Students loved him, because he was fun and he enjoyed the hell out of his job. He was denied tenure. An administrator simply didn’t like his style. She was a more conservative, buttoned-down type, and she thought he was too silly, too wild. She overlooked the empirical evidence of his great results. Why aren’t we using these kinds of measurements to reward teachers like him or learn to emulate them? (He finally did get tenure.) There was a lot of useful information that could have helped us improve our teaching. But we didn’t make enough use of that information. No teachers were dismissed for being uninteresting, but sadly, no teachers were trained to be more interesting, either. In many ways, the cynicism of the staff toward the tests was understandable. The whole testing system was pretty flawed (and still is). And over the years, the pressure to do well on these tests mounted as our other funding was cut. So of course, when people don’t believe something has value, but they are forced to do it to earn money, a predictable result occurs. Teachers started to manipulate the test results. One day, I was looking at a set of tests, and I noticed some strange patterns in the data. Students were scoring very high at the end of the semester, but then their scores would plummet at the beginning of the next semester. After narrowing this activity down to one class, I realized this teacher was cheating. He was depressing scores at the beginning of the term in order to see bigger results at the end of the term. Oddly enough, he actually walked into my office right as I was making this discovery. And he proudly told me how he’d been getting such great test results: He’d been giving his students 10 minutes on the first test and an hour on the second. And bam! Improvement points. I explained to this teacher, and a few others, that we couldn’t play the game like that. And my pitch basically went something like this: “Look, in the most basic sense, teachers should not be the ones trying to cheat on tests. But if that call to honesty doesn’t convince you, just know that your cheating is transparent, and if I can see it, you’ll get caught by someone less nice, who might fire you.” I cleaned up that sort of behavior at our school, but I was aware that it was happening at other schools. I told some people who maybe could have done something about it. Not much happened. At first, my school always did well, and I was considered competent. But year by year, we fell behind. Every year, when we didn’t meet our targets, I had to go do a sort of confessional with my supervisor in the district office. I had to examine the error of my ways and make a plan of improvement. There was no question that my job was riding on that improvement. Just when I thought I’d have to capitulate or lose my job, the cheating at the other schools was discovered, the people involved were disciplined, and I was vindicated. Our school came out smelling clean and fresh and non-cheaty. I wish I could say this was a victory, but funding for our entire program was cut so drastically (because of a little economic crash that happened back in 2008) that hundreds of teachers lost their jobs anyway. Since layoffs were based on seniority, I’m sure a lot of teachers who did well on the tests were let go. It’s hard to be a teacher. I’m back in the classroom now, and I’m giving these tests once again to my students. At the beginning of the semester, I didn’t even have my textbooks yet. I teach a group of adult students who are among the most economically challenged in the nation, and they show up to class exhausted from working full days, cleaning houses, painting, working at fast food restaurants, or taking care of kids or elderly people for low wages. These tests take up my time, cause me to exercise organizational muscles that I don’t have, and put stress on me. And there’s the rest of my life, too, where I’m not planning lessons but trying to write articles or parent a kid. Yeah, the tests are a pain in the butt. And of course, the test results aren’t the only measure of a teacher’s worth. But I know from sifting through data that the tests show us useful information, if only we would use it — and of course, if we are honestly taking the tests and not gaming the system. I don’t have a problem with the tests being financially compensated, but when you make educators desperate to earn money because regular funds are so scarce, that’s bound to incentivize dishonesty. If we had better working conditions to start with, we might have time to read the story of what we do in test measurements, without the pressure of counting on the results for our survival. Julie Cross is a screenwriter and graduate of UCLA Film School. Contact us at [email protected].
def markov_blanket(self, node_id): blanket = set() for pa in self.parents(node_id): blanket.add(pa) for ch in self.children(node_id): blanket.add(ch) for copa in self.coparents(node_id): blanket.add(copa) if node_id in blanket: blanket.remove(node_id) return sorted(list(blanket))
/** * Returns true if expr is fully bound by slotIds, otherwise false. */ public boolean isBoundBySlotIds(List<SlotId> slotIds) { for (Expr child: children_) { if (!child.isBoundBySlotIds(slotIds)) return false; } return true; }
import { mergeStyleSets } from "@fluentui/react"; import axios from "axios"; import React from "react" import { IConfig, IPyBCKGExperiment } from "../Interfaces"; import { api_url } from "../utils/api"; import { ExperimentField } from "../utils/ExperimentFields"; import { Form, IFormContext, ISubmitResult } from "../utils/Form"; import { IFormValues } from "../utils/FormShared"; export interface IProps { onSubmit: (values: IFormValues) => Promise<ISubmitResult>; options: string[] } export interface IExperimentType { id: number, name: string } export interface IExperimentProps { onSubmit: (values: IFormValues) => unknown; options: IExperimentType[] } const css = mergeStyleSets({ formField: { width: "60%", margin: "5px", padding: "5px", }, }) export const ExperimentTypeSelectorComponent: React.FC<IExperimentProps> = (props: IExperimentProps) => { const handleSubmit = async (values: IFormValues) => { // This gets called second // console.log('point 2'); const result = await props.onSubmit(values); return result; }; console.log(`Experiment type options: ${props.options.map( ex => ex.name) }`) return ( <form> <select name={'experimentType'} onSubmit={handleSubmit} > <div className={css.formField}> {props.options.map(experimentType => <option key={experimentType.id} value={experimentType.name}>{experimentType.name}</option> )} </div> </select> </form> ); }; export const ExperimentSelectorComponent: React.FC<IProps> = (props: IProps) => { const handleSubmit = async (values: IFormValues): Promise<ISubmitResult> => { const result = await props.onSubmit(values); return result; }; return ( <Form onSubmit={handleSubmit} defaultValues={{ experiment: "" }} validationRules={{}} showButton={false} > <div className={css.formField}> <ExperimentField name="experiment" label="Select an experiment" type="Select" options={props.options} /> </div> </Form> ); }; export interface INewExperimentProps { //onSubmit: (values: IFormValues) => unknown; experimentOptions: IPyBCKGExperiment[]; selectedExperiment: IPyBCKGExperiment; //getOptions: () => unknown; } interface INewExperimentState { experimentOptions: IPyBCKGExperiment[] selectedExperiment: IPyBCKGExperiment } export class NewExperimentSelectorComponent extends React.Component<unknown, INewExperimentState> { public constructor(props: INewExperimentProps) { super(props) this.state = { experimentOptions: [{}] as IPyBCKGExperiment[], selectedExperiment: {} as IPyBCKGExperiment } } private changeExperimentFields = (e: React.ChangeEvent<HTMLSelectElement>) => { const selectedExpName = e.target.value; console.log(`Selected experiment name: ${selectedExpName}`) const selectedExperiment = this.state.experimentOptions.find(exp => exp.Name === selectedExpName) if (selectedExperiment) { this.setState({ selectedExperiment: selectedExperiment }) } } async getExperimentOptions() { const res = await axios.get<IPyBCKGExperiment[]>(api_url + '/get-experiments') const experiments = res.data this.setState({ experimentOptions: experiments, }) } componentDidMount() { this.getExperimentOptions() } public render() { return ( <label> <h3>Select an Experiment:</h3> <div className="formContainer"> <select value={this.state.selectedExperiment.Name} onChange={ (e) => this.changeExperimentFields(e) }> {this.state.experimentOptions.map((exp,id) => <option key={'exp'+id} value={exp.Name}> {exp.Name} </option>) } </select> </div> </label> ) } } export interface INewConfigProps { //onSubmit: (values: IFormValues) => unknown; configOptions: IConfig[]; selectedConfig: IConfig; //getOptions: () => unknown; } interface INewConfigState { configOptions: IConfig[] selectedConfig: IConfig } export class NewConfigSelectorComponent extends React.Component<unknown, INewConfigState> { public constructor(props: INewExperimentProps) { super(props) this.state = { configOptions: [{}] as IConfig[], selectedConfig: {} as IConfig }} private changeConfigFields = (e: React.ChangeEvent<HTMLSelectElement>) => { const selectedConfigName = e.target.value; const selectedConfig = this.state.configOptions.find(cfg => cfg.name === selectedConfigName) if (selectedConfig) { this.setState({ selectedConfig: selectedConfig }) }} async getOptions() { const res = await axios.get<IConfig[]>(api_url + '/get-configs') const configs = res.data this.setState({ configOptions: configs, }) } componentDidMount() { this.getOptions() } public render() { return ( <label> <h3>Select a config:</h3> <div className="formContainer"> <select value={this.state.selectedConfig.name} onChange={ (event) => this.changeConfigFields(event) }> {this.state.configOptions.map(cfg => <option key={cfg.id} value={cfg.name}> {cfg.name} </option>) } </select> </div> </label> ) } } export const ConfigSelectorComponent: React.FC<IProps> = (props: IProps) => { const handleSubmit = async (values: IFormValues): Promise<ISubmitResult> => { const result = await props.onSubmit(values); return result; }; return ( <Form onSubmit={handleSubmit} defaultValues={{ experiment: "" }} validationRules={{}} showButton={false} > <div className={css.formField}> <ExperimentField name="config" label="Select a previous config" type="Select" options={props.options} /> </div> </Form> ); }; export const IterationSelectorComponent: React.FC<IProps> = (props: IProps) => { const handleSubmit = async (values: IFormValues): Promise<ISubmitResult> => { // This gets called second // console.log('point 2'); const result = await props.onSubmit(values); return result; }; return ( <Form onSubmit={handleSubmit} defaultValues={{ iteration: "1" }} validationRules={{}} showButton={true} > <div className="formField"> <ExperimentField name="iteration" label="Select an iteration" type="Select" options={props.options} /> </div> </Form> ); }; export const FoldSelectorComponent: React.FC<IProps> = (props: IProps) => { const handleSubmit = async (values: IFormValues): Promise<ISubmitResult> => { // This gets called second // console.log('point 2'); const result = await props.onSubmit(values); return result; }; return ( <Form onSubmit={handleSubmit} defaultValues={{ fold: "1" }} validationRules={{}} showButton={true} > <div className="formField"> <ExperimentField name="fold" label="Select a fold" type="Select" options={props.options} /> </div> </Form> ); };
/* Routine to get the thread-specific error variable */ SDL_error * SDL_GetErrBuf(void) { #if SDL_THREADS_DISABLED static SDL_error SDL_global_error; return &SDL_global_error; #else static SDL_SpinLock tls_lock; static SDL_bool tls_being_created; static SDL_TLSID tls_errbuf; static SDL_error SDL_global_errbuf; const SDL_error *ALLOCATION_IN_PROGRESS = (SDL_error *)-1; SDL_error *errbuf; if (!tls_errbuf && !tls_being_created) { SDL_AtomicLock(&tls_lock); if (!tls_errbuf) { SDL_TLSID slot; tls_being_created = SDL_TRUE; slot = SDL_TLSCreate(); tls_being_created = SDL_FALSE; SDL_MemoryBarrierRelease(); tls_errbuf = slot; } SDL_AtomicUnlock(&tls_lock); } if (!tls_errbuf) { return &SDL_global_errbuf; } SDL_MemoryBarrierAcquire(); errbuf = (SDL_error *)SDL_TLSGet(tls_errbuf); if (errbuf == ALLOCATION_IN_PROGRESS) { return &SDL_global_errbuf; } if (!errbuf) { SDL_TLSSet(tls_errbuf, ALLOCATION_IN_PROGRESS, NULL); errbuf = (SDL_error *)SDL_malloc(sizeof(*errbuf)); if (!errbuf) { SDL_TLSSet(tls_errbuf, NULL, NULL); return &SDL_global_errbuf; } SDL_zerop(errbuf); SDL_TLSSet(tls_errbuf, errbuf, SDL_free); } return errbuf; #endif }
Design, development, and testing of an environmental P-T cell for infrared spectroscopy measurements. Water absorption bands due to superficially adsorbed molecules often dominate the near-infrared spectra of particulate minerals and rocks, when measured in the laboratory in the reflectance mode. In order to remove this, the spectral effect is thus necessary to acquire spectra of samples in vacuum and at higher temperatures. With the aim to accomplish this task, we developed an environmental cell to perform infrared spectroscopic measurements at controlled pressure-temperature conditions. Currently the cell allows one to measure reflectance spectra in the temperature range from room values up to 300 °C (573 K), in the pressure range of 103-10-6 mbar. The acquisition of spectra continuously in two distinct phases, namely, during a preliminary pumping stage (at room T) and subsequently during a heating stage (in vacuum), permits to highlight and characterize separately the effect of pressure and temperature on infrared spectra.
/** * * @brief Configures a new std logger. * * @return A new logger instance. * ******************************************************************************* */ GlibLogger * GlibUtils_CreateStdLogger(void) { StdLogger *data = g_new0(StdLogger, 1); data->handler.logfn = StdLoggerLog; data->handler.addsTimestamp = FALSE; data->handler.shared = FALSE; data->handler.dtor = StdLoggerDestroy; return &data->handler; }
// WithCacheHeaderCheck add cache agent for response cache // in default mode, only kubelet, kube-proxy, flanneld, coredns User-Agent // can be supported to cache response. and with Edge-Cache header is also supported. func WithCacheHeaderCheck(handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { ctx := req.Context() if info, ok := apirequest.RequestInfoFrom(ctx); ok { if info.IsResourceRequest { needToCache := strings.ToLower(req.Header.Get(canCacheHeader)) if needToCache == "true" { ctx = util.WithReqCanCache(ctx, true) req = req.WithContext(ctx) } req.Header.Del(canCacheHeader) } } handler.ServeHTTP(w, req) }) }
// podmanBridge returns the command to use in a var for accessing the podman varlink bridge over ssh func podmanBridge(client *ssh.ExternalClient) string { command := []string{client.BinaryPath} command = append(command, client.BaseArgs...) command = append(command, "--", "sudo", "varlink", "-A", `\'podman varlink \\\$VARLINK_ADDRESS\'`, "bridge") return strings.Join(command, " ") }
""" 废弃 新浪网设置了访问频次限制。 新浪有许多以列表形式提供的汇总列,每天访问也仅仅一次。 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from datetime import date from urllib.error import HTTPError import pandas as pd import requests from bs4 import BeautifulSoup import logbook from toolz.itertoolz import partition_all from ..setting.constants import QUOTE_COLS from cnswd.utils import ensure_list # from cnswd.data_proxy import DataProxy from cnswd.websource.base import friendly_download, get_page_response from .._exceptions import NoWebData, FrequentAccess QUOTE_PATTERN = re.compile('"(.*)"') NEWS_PATTERN = re.compile(r'\W+') STOCK_CODE_PATTERN = re.compile(r'\d{6}') SORT_PAT = re.compile(r'↑|↓') DATA_BASE_URL = 'http://stock.finance.sina.com.cn/stock/go.php/' MARGIN_COL_NAMES = [ '股票代码', '股票简称', '融资余额', '融资买入额', '融资偿还额', '融券余量金额', '融券余量', '融券卖出量', '融券偿还量', '融券余额' ] INDEX_QUOTE_COLS = [ '指数简称', '最新价', '涨跌', '涨跌幅%', '成交量(万手)', '成交额(万元)' ] logger = logbook.Logger('新浪网') @friendly_download(10, 10, 10) def fetch_company_info(stock_code): """获取公司基础信息""" url_fmt = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vCI_CorpInfo/stockid/{}.phtml' url = url_fmt.format(stock_code) df = pd.read_html(url, attrs={'id': 'comInfo1'})[0] return df def fetch_issue_new_stock_info(stock_code): """获取发行新股信息""" url_fmt = 'http://vip.stock.finance.sina.com.cn/corp/go.php/vISSUE_NewStock/stockid/{}.phtml' url = url_fmt.format(stock_code) df = pd.read_html(url, attrs={'id': 'comInfo1'})[0] return df def _add_prefix(stock_code): pre = stock_code[0] if pre == '6': return 'sh{}'.format(stock_code) else: return 'sz{}'.format(stock_code) def _to_dataframe(content, p_codes): """解析网页数据,返回DataFrame对象""" res = [x.split(',') for x in re.findall(QUOTE_PATTERN, content)] df = pd.DataFrame(res).iloc[:, :32] df.columns = QUOTE_COLS[1:] df.insert(0, '股票代码', p_codes) # df['股票代码'] = p_codes df.dropna(inplace=True) return df def fetch_quotes(stock_codes): """ 获取股票列表的分时报价 Parameters ---------- stock_codes : list 股票代码列表 Returns ------- res : DataFrame 行数 = len(stock_codes) 33列 Example ------- >>> df = fetch_quotes(['000001','000002']) >>> df.iloc[:,:8] 股票代码 股票简称 开盘 前收盘 现价 最高 最低 竞买价 0 000001 平安银行 11.040 11.050 10.900 11.050 10.880 10.900 1 000002 万 科A 33.700 34.160 33.290 33.990 33.170 33.290 """ stock_codes = ensure_list(stock_codes) num = len(stock_codes) length = 800 url_fmt = 'http://hq.sinajs.cn/list={}' dfs = [] for p_codes in partition_all(length, stock_codes): # p_codes = stock_codes[i * length:(i + 1) * length] url = url_fmt.format(','.join(map(_add_prefix, p_codes))) content = get_page_response(url).text dfs.append(_to_dataframe(content, p_codes)) return pd.concat(dfs).sort_values('股票代码') def _add_index_prefix(code): pre = code[0] if pre == '0': return 's_sh{}'.format(code) else: return 's_sz{}'.format(code) def _to_index_dataframe(content, p_codes): """解析网页数据,返回DataFrame对象""" res = [x.split(',') for x in re.findall(QUOTE_PATTERN, content)] df = pd.DataFrame(res) df.columns = INDEX_QUOTE_COLS df.insert(0, '指数代码', p_codes) df['成交时间'] = pd.Timestamp.now().round('T') df.dropna(inplace=True) return df def fetch_index_quotes(codes): """ 获取指数列表的分时报价 Parameters ---------- codes : list 代码列表 Returns ------- res : DataFrame 行数 = len(stock_codes) 33列 Example ------- >>> df = fetch_index_quotes(['000001','000002']) >>> df.iloc[:,:8] 股票代码 股票简称 开盘 前收盘 现价 最高 最低 竞买价 0 000001 平安银行 11.040 11.050 10.900 11.050 10.880 10.900 1 000002 万 科A 33.700 34.160 33.290 33.990 33.170 33.290 """ codes = ensure_list(codes) length = 800 url_fmt = 'http://hq.sinajs.cn/list={}' dfs = [] for p_codes in partition_all(length, codes): url = url_fmt.format(','.join(map(_add_index_prefix, p_codes))) content = get_page_response(url).text dfs.append(_to_index_dataframe(content, p_codes)) return pd.concat(dfs).sort_values('指数代码') # 不可用 def fetch_globalnews(): """获取24*7全球财经新闻""" url = 'http://live.sina.com.cn/zt/f/v/finance/globalnews1' response = requests.get(url) today = date.today() soup = BeautifulSoup(response.content, "lxml") # 时间戳 stamps = [p.string for p in soup.find_all("p", class_="bd_i_time_c")] # 标题 titles = [p.string for p in soup.find_all("p", class_="bd_i_txt_c")] # 类别 categories = [ re.sub(NEWS_PATTERN, '', p.string) for p in soup.find_all("p", class_="bd_i_tags") ] # 编码bd_i bd_i_og clearfix data_mid = ['{} {}'.format(str(today), t) for t in stamps] return stamps, titles, categories, data_mid @friendly_download(10, 10, 2) def fetch_cjmx(stock_code, date_): """ 下载指定股票代码所在日期成交明细 Parameters ---------- stock_code : str 股票代码(6位数字代码) date_ : 类似日期对象 代表有效日期字符串或者日期对象 Returns ------- res : DataFrame Exception --------- 当不存在数据时,触发NoWebData异常 当频繁访问时,系统会在一段时间内阻止访问,触发FrequentAccess异常 Example ------- >>> df = fetch_cjmx('300002','2016-6-1') >>> df.head() 成交时间 成交价 价格变动 成交量(手) 成交额(元) 性质 0 15:00:03 8.69 NaN 1901 1652438 卖盘 1 14:57:03 8.69 -0.01 10 8690 卖盘 2 14:56:57 8.70 NaN 102 88740 买盘 3 14:56:51 8.70 NaN 15 13049 买盘 4 14:56:48 8.70 0.01 2 1739 买盘 """ dfs = [] url = 'http://vip.stock.finance.sina.com.cn/quotes_service/view/vMS_tradehistory.php' code_str = _add_prefix(stock_code) date_str = pd.Timestamp(date_).strftime(r'%Y-%m-%d') params = {'symbol': code_str, 'date': date_str, 'page': 1} # 单日交易数据不可能超过1000页 for i in range(1, 1000): params['page'] = i r = requests.get(url, params=params) r.encoding = 'gb18030' df = pd.read_html(r.text, attrs={'id': 'datatbl'}, na_values=['--'])[0] if '没有交易数据' in df.iat[0, 0]: df = pd.DataFrame() break dfs.append(df) res = pd.concat(dfs) if len(res) == 0: raise NoWebData('无法在新浪网获取成交明细数据。股票:{},日期:{}'.format( code_str, date_str)) return res @friendly_download(10, 10, 1) def _common_fun(url, pages, header=0, verbose=False): """处理新浪数据中心网页数据通用函数""" dfs = [] def sina_read_fun(x): return pd.read_html( x, header=header, na_values=['--'], flavor='html5lib', attrs={'class': 'list_table'})[0] for i in range(1, pages + 1): page_url = url + 'p={}'.format(i) if verbose: logger.info('第{}页'.format(i)) df = sina_read_fun(page_url) dfs.append(df) return pd.concat(dfs, ignore_index=True) def fetch_rating(page=1): """ 投资评级数据 参数 ---- page : int 指定页,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ url = DATA_BASE_URL + f'vIR_RatingNewest/index.phtml?p={page}' df = pd.read_html( url, header=0, na_values=['--'], flavor='html5lib', attrs={'class': 'list_table'})[0] df = df.iloc[:, :8] # 去掉排序符号 df.columns = [SORT_PAT.sub('', col) for col in df.columns] df['股票代码'] = df['股票代码'].map(lambda x: str(x).zfill(6)) df['评级日期'] = pd.to_datetime(df['评级日期'], errors='ignore') return df def fetch_organization_care(pages=1, last=30, verbose=False): """ 机构关注度 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 last : int 统计近期多少天的数据。默认30 可选范围:[10,30,60] verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ accept = (10, 30, 60) assert last in accept, 'last参数可选范围{}'.format(accept) if verbose: logger.info('提取机构关注度网页数据') # 名称中可能含有非法字符,重新定义列名称 cols = [ '股票代码', '股票名称', '关注度', '最新评级', '平均评级', '买入数', '持有数', '中性数', '减持数', '卖出数', '行业' ] url = DATA_BASE_URL + 'vIR_OrgCare/index.phtml?last={}&'.format(last) df = _common_fun(url, pages, verbose=verbose) df = df.iloc[:, :11] df.columns = cols df['股票代码'] = df['股票代码'].map(lambda x: str(x).zfill(6)) return df def fetch_industry_care(pages=2, last=30, verbose=False): """ 行业关注度 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 共2页 last : int 统计近期多少天的数据。默认30 可选范围:[10,30,60] verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame 备注 ---- 选取 """ accept = (10, 30, 60) assert last in accept, 'last参数可选范围{}'.format(accept) # 最多2页 if pages > 2: pages = 2 if verbose: logger.info('提取行业关注度网页数据') # 名称中可能含有非法字符,重新定义列名称 cols = [ '行业名称', '关注度', '关注股票数', '买入评级数', '持有评级数', '中性评级数', '减持评级数', '卖出评级数' ] url = DATA_BASE_URL + 'vIR_IndustryCare/index.phtml?last={}&'.format(last) df = _common_fun(url, pages, verbose=verbose) df.columns = cols return df def fetch_target_price(pages=1, verbose=False): """ 主流机构股价预测数据 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame 说明 ---- 原始数据没有期间,用处不大 """ if verbose: logger.info('提取主流机构股价预测网页数据') cols = ['股票代码', '股票名称', '预期下限', '预期上限'] url = DATA_BASE_URL + 'vIR_TargetPrice/index.phtml?' df = _common_fun(url, pages, 1, verbose=verbose) df = df.iloc[:, [0, 1, 5, 6]] df.columns = cols df['股票代码'] = df['股票代码'].str.extract(r'(?P<digit>\d{6})', expand=False) return df def fetch_performance_prediction(pages=1, verbose=False): """ 业绩预告数据 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ if verbose: logger.info('提取业绩预告网页数据') cols = ['股票代码', '股票名称', '类型', '公告日期', '报告期', '摘要', '上年同期', '同比幅度'] url = DATA_BASE_URL + 'vFinanceAnalyze/kind/performance/index.phtml?' df = _common_fun(url, pages, 0, verbose=verbose) df = df.iloc[:, :8] df.columns = cols df['股票代码'] = df['股票代码'].map(lambda x: str(x).zfill(6)) # def func(x): return re.findall(STOCK_CODE_PATTERN, str(x))[0] # df['股票代码'] = df['股票代码'].map(func) return df def fetch_eps_prediction(pages=1, verbose=False): """ EPS预测数据 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ if verbose: logger.info('提取EPS预测网页数据') url = DATA_BASE_URL + 'vPerformancePrediction/kind/eps/index.phtml?' df = _common_fun(url, pages, 0, verbose=verbose) df['股票代码'] = df['股票代码'].str.extract(r'(?P<digit>\d{6})', expand=False) return df.iloc[:, :9] def fetch_sales_prediction(pages=1, verbose=False): """ 销售收入预测数据 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ if verbose: logger.info('提取销售收入预测网页数据') url = DATA_BASE_URL + 'vPerformancePrediction/kind/sales/index.phtml?' df = _common_fun(url, pages, 0, verbose=verbose) df['股票代码'] = df['股票代码'].str.extract(r'(?P<digit>\d{6})', expand=False) return df.iloc[:, :9] def fetch_net_profit_prediction(pages=1, verbose=False): """ 净利润预测数据 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ if verbose: logger.info('提取净利润预测网页数据') url = DATA_BASE_URL + 'vPerformancePrediction/kind/np/index.phtml?' df = _common_fun(url, pages, 0, verbose=verbose) df['股票代码'] = df['股票代码'].str.extract(r'(?P<digit>\d{6})', expand=False) return df.iloc[:, :9] def fetch_roc_prediction(pages=1, verbose=False): """ 净资产收益率预测数据 参数 ---- pages : int 要抓取的页面数量,默认为1,取第一页 verbose : bool 是否显示日志信息,默认为假 返回 ---- res : pd.DataFrame """ if verbose: logger.info('提取净资产收益率预测网页数据') url = DATA_BASE_URL + 'vPerformancePrediction/kind/roe/index.phtml?' df = _common_fun(url, pages, 0, verbose=verbose) df['股票代码'] = df['股票代码'].str.extract(r'(?P<digit>\d{6})', expand=False) return df.iloc[:, :9] def fetch_margin(tdate): """指定日期融资融券数据""" tdate = "2020-09-14" url = f"http://vip.stock.finance.sina.com.cn/q/go.php/vInvestConsult/kind/rzrq/index.phtml?tradedate={tdate}" r = requests.get(url) df = pd.read_html(r.text, skiprows=[0, 1, 2])[1] df.drop(columns=[0], inplace=True) df.columns = MARGIN_COL_NAMES df['股票代码'] = df['股票代码'].map(lambda x: str(x).zfill(6)) return df
“I dare them to find the iPod on me,” Richie Sais told the New York Times in 2007, when he was preparing to run the Marine Corps Marathon. USA Track & Field, the national governing body for distance racing, had just decided to ban athletes from using portable music players in order "to ensure safety and to prevent runners from having a competitive edge." Rais resolved to hide his iPod shuffle under his shirt. Many fellow runners protested the new rule, which remains in effect today in an amended form: It now applies only to people vying for awards and money. For some athletes and for many people who run, jog, cycle, lift weights and otherwise exercise, music is not superfluous—it is essential to peak performance and a satisfying workout. Although some people prefer audio books, podcasts or ambient sounds, many others depend on bumpin' beats and stirring lyrics to keep themselves motivated when exercising. A quick Twitter search uncovers plenty of evidence: "Trying to let my phone charge a little more before I go, because lord knows I can't even try and workout without music," tweeted @Gianna_H21. "I just made my mom turn around to get my headphones. I can't possibly work out without music," @Codavoci_Kyle admitted. In the last 10 years the body of research on workout music has swelled considerably, helping psychologists refine their ideas about why exercise and music are such an effective pairing for so many people as well as how music changes the body and mind during physical exertion. Music distracts people from pain and fatigue, elevates mood, increases endurance, reduces perceived effort and may even promote metabolic efficiency. When listening to music, people run farther, bike longer and swim faster than usual—often without realizing it. In a 2012 review of the research, Costas Karageorghis of Brunel University in London, one of the world's leading experts on the psychology of exercise music, wrote that one could think of music as "a type of legal performance-enhancing drug." Selecting the most effective workout music is not as simple as queuing up a series of fast, high-energy songs. One should also consider the memories, emotions and associations that different songs evoke. For some people, the extent to which they identify with the singer's emotional state and viewpoint determines how motivated they feel. And, in some cases, the rhythms of the underlying melody may not be as important as the cadence of the lyrics. In recent years some researchers and companies have experimented with new ways to motivate exercisers through their ears, such as a smartphone app that guides the listener's escape from zombies in a postapocalyptic world and a device that selects songs based on a runner's heart rate. Let your body move to the music Research on the interplay of music and exercise dates to at least 1911, when American investigator Leonard Ayres found that cyclists pedaled faster while a band was playing than when it was silent. Since then psychologists have conducted around a hundred studies on the way music changes people's performance in a variety of physical activities, ranging in intensity from strolling to sprinting. Looking at the research as a whole, a few clear conclusions emerge. Two of the most important qualities of workout music are tempo—or speed—and what psychologists call rhythm response, which is more or less how much a song makes you want to boogie. Most people have an instinct to synchronize their movements and expressions with music—to nod their heads, tap their toes or break out in dance—even if they repress that instinct in many situations. What type of music excites this instinct varies from culture to culture and from person to person. To make some broad generalizations, fast songs with strong beats are particularly stimulating, so they fill most people's workout playlists. In a recent survey of 184 college students, for example, the most popular types of exercise music were hip-hop (27.7 percent), rock (24 percent) and pop (20.3 percent). Some psychologists have suggested that people have an innate preference for rhythms at a frequency of two hertz, which is equivalent to 120 beats per minute (bpm), or two beats per second. When asked to tap their fingers or walk, many people unconsciously settle into a rhythm of 120 bpm. And an analysis of more than 74,000 popular songs produced between 1960 and 1990 found that 120 bpm was the most prevalent pulse. When running on a treadmill, however, most people seem to favor music around 160 bpm. Web sites and smartphone apps such as Songza and jog.fm help people match the tempo of their workout music to their running pace, recommending songs as fast as 180 bpm for a seven-minute mile, for example. But the most recent research suggests that a ceiling effect occurs around 145 bpm: anything higher does not seem to contribute much additional motivation. On occasion, the speed and flow of the lyrics supersede the underlying beat: some people work out to rap songs, for example, with dense, swiftly spoken lyrics overlaid on a relatively mellow melody. Although many people do not feel the need to run or move in exact time with their workout music, synchrony may help the body use energy more efficiently. When moving rhythmically to a beat, the body may not have to make as many adjustments to coordinated movements as it would without regular external cues. In a 2012 study by C. J. Bacon of Sheffield Hallam University, Karageorghis and their colleagues, participants who cycled in time to music required 7 percent less oxygen to do the same work as cyclists who did not synchronize their movements with background music. Music, it seems, can function as a metronome, helping someone maintain a steady pace, reducing false steps and decreasing energy expenditure. Extending this logic, Shahriar Nirjon of the University of Virginia and his colleagues devised a personal music player that attempts to sync music with a runner's pace and heart rate. Accelerometers and a tiny microphone embedded in a pair of earbuds gauge the runner's pace and record the pulsing of blood vessels. The device wirelessly transmits the data it collects via a smartphone to a remote computer that chooses the next song. Brain beats Recent research clarifies not only what type of music is best suited to a workout, but also how music encourages people to keep exercising. Distraction is one explanation. The human body is constantly monitoring itself. After a certain period of exercise—the exact duration varies from person to person—physical fatigue begins to set in. The body recognizes signs of extreme exertion—rising levels of lactate in the muscles, a thrumming heart, increased sweat production—and decides it needs a break. Music competes with this physiological feedback for the brain's conscious attention. Similarly, music often changes people's perception of their own effort throughout a workout: it seems easier to run those 10 miles or complete a few extra biceps curls when Beyoncé or Eminem is right there with you. "Given that exercise is often tiresome, boring and arduous, anything that relieves those negative feelings would be welcome," Karageorghis explains. The benefits of distraction are most pronounced during low- to moderate-intensity exercise. When up against high-intensity exercise, music loses its power to override the physical feelings of tiredness, but it can still change the way people respond to that fatigue. The right music elevates mood and persuades people to ride out waves of exhaustion, rather than giving up. Karageorghis cautions, though, against listening to music while running in heavily trafficked areas—distraction from fatigue is great, as long as it does not put you in danger. Music also increases endurance by keeping people awash in strong emotions. Listening to music is often an incredibly pleasurable experience and certain songs open the mental floodgates with which people control their emotions in everyday situations. If one strongly identifies with the singer's emotions or perspective, the song becomes all the more motivational. Consider a song from someone's favorite musical film or Broadway show, such as "One Day More" from Les Misérables—an ensemble song with a complex melody and building energy—or "Defying Gravity" from Wicked, in which Elphaba, a central character, vows to overcome all limits others have imposed on her. In addition to exhilarating melodies and vocals, such songs immediately recall the entire milieu of the performance and awaken memories of particular characters who are part of a complex narrative. This mesh of associations and connotations woven into the music provides not just a inspiring perspective to adopt, but also an entire alternate reality to enter while running in place on a treadmill at the gym. Some game designers have experimented with new ways for people to escape into fictional worlds while running. In 2012 the online game company Six to Start released the immersive running game, Zombies, Run!, in the form of a smartphone app that narrates the listener's quest to survive the zombie apocalypse. Following spoken prompts, the listener imagines him or herself running around to collect ammunition and medicine to complete various missions. Whether music or zombie sound effects, what people listen to for motivation when they exercise acts on the same neural circuitry. "We are almost hardwired to appreciate music aesthetically," Karageorghis says. People's emotional response to music is visceral: It is, in part, ingrained in some of the oldest regions of the brain in terms of evolutionary history, rather than in the large wrinkly human cortex that evolved more recently. One patient—a woman known in the research literature as I. R.—exemplifies this primal response. I. R. has lesions to her auditory cortices, the regions of the cortex that process sound. When I. R. hears the normal version of a song and a horribly detuned version, she cannot tell the difference, explains Jessica Grahn, a cognitive neuroscientist who studies music at Western University's Brain and Mind Institute in Ontario. But when I. R. hears a happy song and a sad song, she immediately distinguishes them from one another. Scientists now know that, although different regions of the human brain specialize in processing different senses—sound, sight, touch—the brain uses the information it receives from one sense to help it understand another. What people see and feel while listening to speech or music, for example, changes what they hear. Music and movement are particularly entangled in the brain. Recent studies suggest that—even if someone is sitting perfectly still—listening to enjoyable music increases electrical activity in various regions of the brain important for coordinating movements, including the supplementary motor area, cerebellum, basal ganglia and ventral premotor cortex. Some researchers think that this neural crosstalk underlies people's instinct to move in time to music. "We have also known for decades that there are direct connections from auditory neurons to motor neurons," explains Grahn, who enjoys working out to cheesy techno-music. "When you hear a loud noise, you jump before you have even processed what it is. That's a reflex circuit, and it turns out that it can also be active for non-startling sounds, such as music." In fact, the human brain may have evolved with the expectation that, wherever there is music, there is movement—although this idea emerges more from the imaginative minds of speculating evolutionary psychologists than from experimental evidence. Before the invention of reed flutes and other musical instruments, our ancestors likely produced the earliest forms of music by singing, screaming, chanting or otherwise using their vocal cords, as well as by physically interacting with their own bodies, other people and the environment. A fast tempo would have likely required fast movements: quick clapping or foot stamping, perhaps. Deep, loud sounds would have demanded great energy and force—belting a note or beating the ground or a rock. In its conception, music was likely an extension of the human body. Maybe the brain remembers it that way.
<gh_stars>1-10 module Shared.Api.Resource.Template.TemplateDTO where import qualified Data.UUID as U import GHC.Generics import Shared.Api.Resource.Template.TemplateFormatDTO data TemplateDTO = TemplateDTO { _templateDTOTId :: String , _templateDTOName :: String , _templateDTOVersion :: String , _templateDTODescription :: String , _templateDTOFormats :: [TemplateFormatDTO] } deriving (Show, Eq, Generic) data TemplateFileDTO = TemplateFileDTO { _templateFileDTOUuid :: U.UUID , _templateFileDTOFileName :: String , _templateFileDTOContent :: String } deriving (Show, Eq, Generic) data TemplateAssetDTO = TemplateAssetDTO { _templateAssetDTOUuid :: U.UUID , _templateAssetDTOFileName :: String , _templateAssetDTOContentType :: String } deriving (Show, Eq, Generic)
/***** DYNAMIC LOADER FUNCTIONS *****/ /* Loads a given method ('s address) from the given instance. */ static PFN_vkVoidFunction load_instance_method(VkInstance vk_instance, const char* method_name) { PFN_vkVoidFunction to_return = vkGetInstanceProcAddr(vk_instance, method_name); if (to_return == nullptr) { logger.fatalc(Instance::channel, "Could not load function '", method_name, "'."); } logger.logc(Tools::Verbosity::debug, Instance::channel, "Loaded function '", method_name, "'."); return to_return; }
/** * In this test we will check the save of an entity association * referenced as a primitive (String) field. * * subTask (String) * Task ------------------> Task * * The field subTask is a String field, and refers to the id * of the subTask. * * The following steps are involved in this test: * 1. Setup the model to have this association modelled as a Task * using an open property * 2. Create the test data using JSON object. * The Open property setter should ensure that we get the id * from the subTask field and get the persistence managed object * If this object is not present, then create it. * Delegate most of this work to the framework. * 3. Check that the data is saved correctly * 4. Read the data and ensure the data is modelled * correctly as a Task object referenced by the open property * name * */ protected void checkOpenFieldEntityToOne() { final String TASK_NAME = "SETUP_DSL"; final String SUB_TASK_NAME = "SETUP_WIRING"; JSONObject json = new JSONObject(); json.put("name", TASK_NAME); json.put("displayName", "Setup DSL"); json.put("description", "Setup high-speed broadband internet using DSL technology"); JSONObject subTask = new JSONObject(); subTask.put("name", SUB_TASK_NAME); subTask.put("displayName", "Setup Wiring"); subTask.put("description", "Establish wiring from the external line to the exterior of the home"); json.put("subTaskObj", subTask); Settings settings = getSettings(); settings.setSupportsPostLogic(true); settings.setPostFlush(true); settings.expand(new AssociationSetting("subTaskObj")); settings.setEntityClass(Task.class); Task task = (Task) aggregateService.create(json, settings); assert(task.getId() != null); assert(task.getSubTask() != null); Object jsonObject = aggregateService.read(task, settings); JSONObject jsonTask = (JSONObject) jsonObject; JSONObject subTaskJson = jsonTask.getJSONObject("subTaskObj"); assert(subTaskJson.get("name").equals(SUB_TASK_NAME)); }
/** * {@code CyclicalTracker} is an implementation of {@link Tracker} which is a policy of learning * rate adjustment that increases the learning rate off a base value in a cyclical nature, as * detailed in the paper <a href="https://arxiv.org/abs/1506.01186">Cyclical Learning Rates for * Training Neural Networks</a>. */ public class CyclicalTracker implements Tracker { private float baseValue; private float maxValue; private int stepSizeUp; private int stepSizeDown; private int totalSize; private float stepRatio; private ScaleFunction scaleFunction; private boolean scaleModeCycle; /** * Creates a new instance of {@code CyclicalTracker}. * * @param builder the builder to create a new instance of {@code CyclicalTracker} */ public CyclicalTracker(Builder builder) { this.baseValue = builder.baseValue; this.maxValue = builder.maxValue; this.stepSizeUp = builder.stepSizeUp; this.stepSizeDown = builder.stepSizeDown > 0 ? builder.stepSizeDown : builder.stepSizeUp; this.totalSize = this.stepSizeUp + this.stepSizeDown; this.stepRatio = (float) this.stepSizeUp / this.totalSize; if (builder.scaleFunction != null) { this.scaleFunction = builder.scaleFunction; this.scaleModeCycle = builder.scaleModeCycle; } else { switch (builder.mode) { case TRIANGULAR: this.scaleFunction = new TriangularScaleFunction(); this.scaleModeCycle = true; break; case TRIANGULAR2: this.scaleFunction = new Triangular2ScaleFunction(); this.scaleModeCycle = true; break; case EXP_RANGE: this.scaleFunction = new ExpRangeScaleFunction(builder.gamma); this.scaleModeCycle = false; break; default: throw new UnsupportedOperationException("Unsupported Cyclical mode."); } } } /** * Creates a new builder. * * @return a new builder */ public static Builder builder() { return new Builder(); } /** {@inheritDoc} */ @Override public float getNewValue(int numUpdate) { int cycle = (int) Math.floor(1 + (float) numUpdate / this.totalSize); float x = 1 + (float) numUpdate / this.totalSize - cycle; float scaleFactor; float res; if (x < this.stepRatio) { scaleFactor = x / this.stepRatio; } else { scaleFactor = (x - 1) / (this.stepRatio - 1); } float baseHeight = (this.maxValue - this.baseValue) * scaleFactor; if (this.scaleModeCycle) { res = this.baseValue + baseHeight * this.scaleFunction.func(cycle); } else { res = this.baseValue + baseHeight * this.scaleFunction.func(numUpdate); } return res; } /** The Builder to construct an {@link CyclicalTracker} object. */ public static final class Builder { private float baseValue = 0.001f; private float maxValue = 0.006f; private int stepSizeUp = 2000; private int stepSizeDown; private CyclicalMode mode = CyclicalMode.TRIANGULAR; private ScaleFunction scaleFunction; private boolean scaleModeCycle = true; private float gamma = 1f; private Builder() {} /** * Sets the initial value. Default: 0.001. * * @param baseValue the initial value * @return this {@code Builder} */ public Builder optBaseValue(float baseValue) { this.baseValue = baseValue; return this; } /** * Sets the initial value. Default: 0.006. * * @param maxValue the max value * @return this {@code Builder} */ public Builder optMaxValue(float maxValue) { this.maxValue = maxValue; return this; } /** * Sets the number of iterations in up half of cycle. Default: 2000. * * @param stepSizeUp number of iterations in up half of cycle * @return this {@code Builder} */ public Builder optStepSizeUp(int stepSizeUp) { this.stepSizeUp = stepSizeUp; return this; } /** * Sets the number of iterations in up half of cycle. If {@code stepSizeDown} equals 0, it * is set to {@code stepSizeUp}. Default: 0. * * @param stepSizeDown number of iterations in the down half of cycle * @return this {@code Builder} */ public Builder optStepSizeDown(int stepSizeDown) { this.stepSizeDown = stepSizeDown; return this; } /** * Sets the cyclical mode. Can be {@code CyclicalMode.TRIANGULAR}, {@code * CyclicalMode.TRIANGULAR2} or {@code CyclicalMode.EXP_RANGE}. Values correspond to * policies detailed above. * * @param mode cyclical mode * @return this {@code Builder} */ public Builder optMode(CyclicalMode mode) { this.mode = mode; return this; } /** * Constant in {@code CyclicalMode.EXP_RANGE} scaling function. Default: 1. * * @param gamma constant in 'exp_range' scaling function: gamma^cycleIterations * @return this {@code Builder} */ public Builder optGamma(float gamma) { this.gamma = gamma; return this; } /** * Custom scaling function. If set, {@code mode} will be ignored. Default: null. * * @param scaleFunction Custom scaling function * @return this {@code Builder} */ public Builder optScaleFunction(ScaleFunction scaleFunction) { this.scaleFunction = scaleFunction; return this; } /** * Defines whether scaling function is evaluated on cycle number or update number. Default: * true. * * @param scaleModeCycle if true then scaling function is evaluated on cycle number, else on * update number * @return this {@code Builder} */ public Builder optScaleModeCycle(boolean scaleModeCycle) { this.scaleModeCycle = scaleModeCycle; return this; } /** * Builds a {@link CyclicalTracker} block. * * @return the {@link CyclicalTracker} block */ public CyclicalTracker build() { Preconditions.checkArgument(baseValue > 0, "baseValue has to be positive!"); Preconditions.checkArgument(maxValue > 0, "maxValue has to be positive!"); Preconditions.checkArgument( baseValue <= maxValue, "baseValue has to lower than maxValue!"); Preconditions.checkArgument(stepSizeUp >= 1, "stepSizeUp has to be positive!"); Preconditions.checkArgument(stepSizeDown >= 0, "stepSizeUp cannot be negative!"); Preconditions.checkArgument( gamma >= 0f && gamma <= 1f, "gamma has to be between 0 and 1!"); return new CyclicalTracker(this); } } /** * {@code CyclicalTracker} provides three predefined cyclical modes and can be selected by this * enum. * * <ul> * <li>TRIANGULAR: A basic triangular cycle without amplitude scaling. * <li>TRIANGULAR2: A basic triangular cycle that scales initial amplitude by half each cycle. * <li>EXP_RANGE: A cycle that scales initial amplitude by (gamma<sup>cycleIterations</sup>) * at each cycle iteration. * </ul> */ public enum CyclicalMode { TRIANGULAR, TRIANGULAR2, EXP_RANGE } /** {@code ScaleFunction} is an interface to implement a custom scale function. */ public interface ScaleFunction { /** * Custom scaling policy. Return value has to satisfy 0&lt;=func(steps)&lt;=1 for all * steps&gt;1. * * @param steps current cycles if {@code scaleModeCycle} is true; input update number if it * is false * @return scale ratio */ float func(int steps); } private static class TriangularScaleFunction implements ScaleFunction { @Override public float func(int steps) { return 1f; } } private static class Triangular2ScaleFunction implements ScaleFunction { @Override public float func(int steps) { return (float) (1 / (Math.pow(2f, steps - 1))); } } private static class ExpRangeScaleFunction implements ScaleFunction { float gamma; ExpRangeScaleFunction(float gamma) { this.gamma = gamma; } @Override public float func(int steps) { return (float) Math.pow(this.gamma, steps); } } }
/** Index is actually iSubItem or a column Key */ STDMETHODIMP CNSListSubItems::get_Item(VARIANT Index, INSListSubItem **pVal) { if (!pVal) return E_POINTER; if (!m_pListView) return E_UNEXPECTED; long iArrayIndex = FindItem(Index); HRESULT hr; if (iArrayIndex < 0) { long iSubItem = -1; ccVariant vIndex(Index); vIndex.ToValue(); if (vIndex.IsNumeric()) { iSubItem = (long)vIndex; } else { hr = vIndex.ChangeType(VT_BSTR); if (!FAILED(hr)) { CComPtr<INSColumnHeader> spColumnHeader; hr = m_pListView->m_spColumnHeaders->get_Item(vIndex,&spColumnHeader); if (FAILED(hr)) return E_FAIL; hr = spColumnHeader->get_SubItemIndex(&iSubItem); if (FAILED(hr)) return E_FAIL; } } if (iSubItem > 0) { CComObject<CNSListSubItem>* plsi = NULL; hr = CComObject<CNSListSubItem>::CreateInstance(&plsi); if (FAILED(hr)) return hr; CComPtr<INSListSubItem> spSubItem; hr = plsi->QueryInterface(IID_INSListSubItem,(void**)&spSubItem); if (FAILED(hr)) { delete plsi; return hr; } plsi->m_lSubItemIndex = iSubItem; plsi->m_pListItem = m_pListItem; plsi->m_pListView = m_pListView; plsi->m_pListSubItems = this; NSINITIALIZE(spSubItem); if (!m_arrListSubItems.Add(spSubItem)) return E_OUTOFMEMORY; *pVal = spSubItem; if (*pVal) (*pVal)->AddRef(); return S_OK; } else { return DISP_E_BADINDEX; } } else { *pVal = m_arrListSubItems[iArrayIndex]; if (*pVal) (*pVal)->AddRef(); return S_OK; } }
def find_connection_response(connections=None): if not connections or 'VpnConnections' not in connections: return None elif connections and len(connections['VpnConnections']) > 1: viable = [] for each in connections['VpnConnections']: if each['State'] not in ("deleted", "deleting"): viable.append(each) if len(viable) == 1: return viable[0] elif len(viable) == 0: return None else: raise VPNConnectionException(msg="More than one matching VPN connection was found. " "To modify or delete a VPN please specify vpn_connection_id or add filters.") elif connections and len(connections['VpnConnections']) == 1: if connections['VpnConnections'][0]['State'] not in ("deleted", "deleting"): return connections['VpnConnections'][0]
<filename>src/creeps/roles/hauler.ts import { setMovementData } from "creeps/creep"; import { offsetPositionByDirection } from "utils/RoomPositionHelpers"; import { unpackPosition } from "utils/RoomPositionPacker"; export interface HaulerMemory extends CreepMemory { source: number; gathering?: boolean; } export function hauler(creep: Creep) { const memory = creep.memory as HaulerMemory; const home = Game.rooms[creep.memory.home]; if (home.memory.genLayout === undefined || home.memory.genBuildings === undefined) { return; } const sourceData = home.memory.genLayout!.sources[memory.source]; const basicSourceData = home.memory.basicRoomData.sources[memory.source]; if (sourceData === undefined || basicSourceData === undefined) { return; } memory.gathering = memory.gathering ?? true; if (memory.gathering && creep.store.getFreeCapacity(RESOURCE_ENERGY) === 0) { memory.gathering = false; } if (!memory.gathering && creep.store.getUsedCapacity(RESOURCE_ENERGY) === 0) { memory.gathering = true; } if (memory.gathering) { const minerPos: RoomPosition = offsetPositionByDirection( unpackPosition(basicSourceData.pos), sourceData.container ); setMovementData(creep, { pos: minerPos, range: 1 }); if (creep.pos.isNearTo(minerPos)) { if (home.memory.genBuildings.containers[memory.source + 3].id !== undefined) { const container = Game.getObjectById(home.memory.genBuildings.containers[memory.source + 3].id!); if (container instanceof StructureContainer) { if ( container.store.getUsedCapacity(RESOURCE_ENERGY) >= creep.store.getFreeCapacity(RESOURCE_ENERGY) ) { creep.withdraw(container, RESOURCE_ENERGY); } } } } } else { // TODO: Add target locking let target: StructureContainer | StructureStorage | null = null; if (home.memory.genBuildings.containers[2].id !== undefined) { const container = Game.getObjectById(home.memory.genBuildings.containers[2].id!); if ( container instanceof StructureContainer && container.store.getFreeCapacity(RESOURCE_ENERGY) > creep.store.getUsedCapacity(RESOURCE_ENERGY) ) { target = container; } } if (target === null && home.memory.genBuildings.storage.id !== undefined) { const storage = Game.getObjectById(home.memory.genBuildings.storage.id); if ( storage instanceof StructureStorage && storage.store.getFreeCapacity(RESOURCE_ENERGY) >= creep.store.getUsedCapacity(RESOURCE_ENERGY) ) { target = storage; } } if (target != null) { setMovementData(creep, { pos: target.pos, range: 1 }); if (creep.pos.isNearTo(target.pos)) { creep.transfer(target, RESOURCE_ENERGY); } } } }
<reponame>chiubaka/core export interface IProductState { product: IProductInnerState; } export interface IProductInnerState { logoPath?: string; productName: string; testMarketingMode: boolean; }
def marshmallow_errors(): gettext('Missing data for required field.') gettext('Not a valid integer.') gettext('Not a valid datetime.')
/** * Recalculate stats of existing {@link Henna}s.<br> * <br> * A stat can't be superior to 5. Previous stats are dumped. */ private void recalculateStats() { for (int i = 0; i < _stats.length; i++) _stats[i] = 0; for (Henna henna : _hennas) { if (henna != null) { _stats[0] += henna.getINT(); _stats[1] += henna.getSTR(); _stats[2] += henna.getCON(); _stats[3] += henna.getMEN(); _stats[4] += henna.getDEX(); _stats[5] += henna.getWIT(); } } for (int i = 0; i < _stats.length; i++) _stats[i] = Math.min(_stats[i], MAX_HENNA_STAT_VALUE); }
import parse from "./parsing"; // Execute action parse();
Coxeter group actions on the complement of hyperplanes and special involutions We consider both standard and twisted action of a (real) Coxeter group G on the complement M_G to the complexified reflection hyperplanes by combining the reflections with complex conjugation. We introduce a natural geometric class of special involutions in G and give explicit formulae which describe both actions on the total cohomology H(M_G,C) in terms of these involutions. As a corollary we prove that the corresponding twisted representation is regular only for the symmetric group S_n, the Weyl groups of type D_{2m+1}, E_6 and dihedral groups I_2 (2k+1) and that the standard action has no anti-invariants. We discuss also the relations with the cohomology of generalised braid groups. Introduction. In 1969 V.I. Arnol'd computed the cohomology algebra of the configuration space M n of n distinct points of the complex plane. This remarkable short paper was the starting point of the active research in this area of mathematics on the crossroad of algebra, geometry and combinatorics. In particular, Brieskorn generalised Arnol'd's results to arbitrary irreducible Coxeter group G and showed that the Poincaré polynomial of the complement M G to the complexified reflection hyperplanes has the form P (M G , t) = (1 + m 1 t) · · · (1 + m n t), where m i = d i − 1 are the exponents of the Coxeter group G, d i are the degrees of the generators in the algebra of G-invariants. Since the product (1 + m 1 ) · · · (1 + m n ) = d 1 · · · d n = |G| is known to be the order of the group G, it is tempting to suggest that the total cohomology space H * (M G ) = H * (M G , C) is the regular representation with respect to the natural action of G on M G . However this turns out not to be true already for the symmetric group G = S n , as it was shown by Lehrer , although not far from being true. The starting point of this work was the following observation. Let us consider another action of G on M G using the anti-holomorphic extensions of the reflections from the real to the complexified space. In other words we combine the usual action of the reflections with complex conjugation. The claim is that, for the symmetric group G = S n with this new action, H * (M G , C) is indeed a regular representation. We were not able to find this result in the literature, although the proof can be easily derived from known results. While looking for the simplest explanation of this fact, we have found a simple universal way to investigate this representation for any Coxeter group, both in the standard and twisted case. The main result of this paper is the following two explicit formulae which show how far are the corresponding representations from the regular and describe them as virtual representations. Let G be a finite group generated by reflections in a real Euclidean space V of dimension n, M G be the complement to the complexified reflection hyperplanes in V ⊗ C. Let us denote the twisted representation of G on H * (M G , C) by H * ǫ (M G ). Alternatively we can write where ǫ is the alternating representation of G. We claim that for all Coxeter groups G with the standard action on M G and for the twisted action Here ρ is the regular representation of G, X G is a special set of involutions in G (more precisely, conjugacy classes of involutions), X ǫ G is the subset of X G consisting of even involutions, Ind G <σ> (1) is the induced representation from the trivial representation of the subgroup generated by σ. We should mention that for the standard action in most of the cases our result can be derived from the results of the papers , so our main contribution here is the following universal geometric description of the set X G . Namely, consider any involution σ in the geometric realisation of G as a group of orthogonal transformations of a Euclidean space V and the corresponding splitting V = V 1 ⊕ V 2 so that V 1 = V − (σ) and V 2 = V + (σ) are the eigenspaces of σ with eigenvalues −1 and 1, respectively. Let R 1 and R 2 be the intersections of the root system R of group G with these subspaces, and G 1 , G 2 be the corresponding Coxeter subgroups of G. We call the involution σ special if for any root a ∈ R at least one of the projections of a onto V 1 and V 2 is proportional to a root from R 1 or R 2 . In particular the identity and any simple reflection are always special involutions. We denote by X G the set of all conjugacy classes of special involutions in G. By choosing a representative in each class we can realise X G as a special set of involutions in G. For the symmetric group S n , Weyl groups of type D 2m+1 , E 6 and dihedral groups I 2 (2k + 1) the set X G consists of two elements: the identity and the class of a simple reflection s, so we have in that case For the Coxeter groups of type B n (or C n ) the set X G consists of 2n involutions which in the geometric realisation have the form σ k = P 12 ⊕ (−I k ) ⊕ I n−k−2 , k = 0, 1, . . . , n − 2 and τ l = (−I l ) ⊕ I n−l , l = 0, 1, . . . , n. Here P 12 denotes the permutation of the first two coordinates and I k is the k × k identity matrix. In the case of Weyl groups of type D 2m , E 7 , E 8 and for the icosahedral groups H 3 and H 4 , X G consists of 4 involutions: ±Id and ±s, s is a simple reflection. Finally, for the dihedral groups I 2 (2k) X G consists of 4 elements: two conjugacy classes of simple reflections and the central elements ±Id. In the Appendix we list the graphs of all equivalence classes of nontrivial special involutions using Richardson's description . As a corollary we have that the twisted action of an irreducible Coxeter group G on H * (M G ) is a regular representation only for the symmetric group S n , the Weyl groups of type D 2m+1 , E 6 and dihedral groups I 2 (2k + 1). Note that besides the one-dimensional Coxeter group of type A 1 this is the list of Coxeter groups with trivial centre. Our result can be reformulated in terms of the decomposition of the cohomology into irreducible G-modules: the multiplicity m(W ) of the irreducible G-module W in the decomposition of H * (M G ) is where W ± (σ) = {v ∈ W : σv = ±v} are the eigenspaces of the involution σ. For the twisted action, X G is replaced by X ǫ G . In particular, the trivial representation has the multiplicity where |X| denotes the number of elements in the set X. This gives us a topological interpretation of |X G | as the total Betti number of the corresponding quotient space Σ G = M G /G. More precisely we show that the Poincaré polynomial P (Σ G , t) has the form where V is the geometric representation of G and V − (σ) is the (−1)-eigenspace of σ in this representation. Notice that, according to a classical result due to Brieskorn and Deligne, Σ G is the Eilenberg-Mac Lane space K(π, 1) for the corresponding generalised braid group π = B G , so the last formula also can be interpreted in terms of the (rational) cohomology of B G (see ). Another interesting corollary of our results is that the multiplicity m(ǫ) of the alternating representation in H * (M G ) is zero. Due to (4) this is equivalent to the fact that the numbers of even and odd classes in X G are equal. For the group S n this was conjectured (in different terms) by Stanley and proved by Hanlon in . For the general Coxeter groups it was first proved by Lehrer . Our approach is geometrical and based on the (generalised) Lefschetz fixed point formula which says that the Lefschetz numbers for the action of the finite groups are equal to the Euler characteristic of the corresponding fixed sets (see ). We should mention that the idea to use the Lefschetz fixed point formula is not new in this area (see e.g. ), however in this form it seems to be not explored before. Characters and Lefschetz fixed point formula. Consider first the standard action of G on M G . The character of an element g ∈ G in the corresponding representation H * (M G ) is where g * k is the action of g on H k (M G ). Now replace the action of g by its composition with the complex conjugation which we denote asḡ. Then because complex conjugation is acting as (−1) k on the k-th cohomology of M G we have which by definition is the Lefschetz number L(ḡ) of the mapḡ. Now we can apply the Lefschetz fixed point formula which says that this number is equal to the Euler characteristic χ(Fḡ) of the fixed set Fḡ = {z ∈ M G :ḡ(z) = z}. In the twisted case all the even elements of G act in the standard way but the action of all odd elements are twisted and the corresponding character of an odd is the Lefschetz number of the standard action of g on M G . Proposition 1. (i) The character of an element g from G for the standard action of G on H * (M G ) is equal to the Euler characteristic of the fixed set Fḡ. (ii) The character of an odd element g from G in H * ǫ (M G ) for the twisted action of G on M G is equal to the Euler characteristic of the fixed set F g . (iii) The fixed set F g is empty unless g is identity, so the character of any odd element in the twisted representation H * ǫ (M G ) is zero. The fixed set Fḡ is empty unless g is an involution, so if the order of g is bigger than 2 then χ(g) = 0. Only the last part needs a proof. A fixed point of g corresponds to an eigenvector of g in the geometric realisation with the eigenvalue 1, which can be chosen real (recall that G is a real Coxeter group). But on the reals G acts freely on the set of Coxeter chambers. Thus F g is empty unless g is identity. Now assume thatḡ has a fixed point z ∈ M G , then obviously h =ḡ 2 = g 2 must also have a fixed point, which means that g 2 must be identity. Remark. The fact that only involutions may have non-zero characters is known (see Corollary 1.10 in , where a combinatorial explanation of this fact is given). Our proof is close to a similar proof from where the case G = S n was considered. Now we are ready to compute the characters for the classical series. Let us start with A n−1 , corresponding to the symmetric group G = S n acting by permutations on the configuration space M n of n distinct points of the complex plane. Any involution σ in that case is conjugated to one of the involutions of the form π k = (12)(34) . . . (2k −1, 2k), k = 1, . . . , , where (ij) denote the transposition of i and j. The fixed set F k for the actionπ k consists of the points (z 1 , It is easy to see that topologically all connected components are the same and equivalent to the product M k × R n−2k , where M k is the standard configuration space of k different points in the complex plane. Because the Euler characteristic of M k is zero for k > 1 we conclude that only the simple reflection s = π 1 = (12) has non-zero character. In that case we have 2(n − 2)! contractible connected components, so χ(s) = 2(n − 2)!. We can formulate this fact in the following form. Indeed the characters for the induced representations Ind G H (1) from the trivial representation of the subgroup H ⊂ G are given by the formula where h ∼ g means that h is conjugate to g in G, C(h) is the centraliser of h: C(h) = {g ∈ G : gh = hg}. The centraliser of the simple reflection s is C(s) = Z 2 ×S n−2 , where Z 2 is generated by s = (12) and S n−2 is the subgroup of S n permuting the numbers 3, 4, . . . , n. According to the formula (6) only identity e and g ∼ s have non-zero characters in 2Ind Sn <s> (1). Moreover χ(e) = |S n | = n! and χ(s) = |C(s)| = 2(n − 2)!. Comparing this with our previous analysis we have the proposition. Now let us consider the twisted action of the symmetric group S n on M n . From the previous analysis of the fixed sets Fḡ and Proposition 1 we deduce the following Proposition 3. The twisted action of S n on M n induces the regular representation on the total cohomology H * (M n ). Consider now the case B n . In the standard geometric realisation any involution is conjugate either to the diagonal form τ k = diag(−1, . . . , −1, 1, . . . , 1) = (−I k ) ⊕ I n−k or to the form when we have m additional elementary transposition blocks P : σ m,k = P ⊕ P · · · ⊕ P ⊕ (−I k ) ⊕ I n−k−2m . A simple analysis similar to the previous case shows that in the last case the fixed set consists of several connected components each topologically equivalent to M m × R n−2m . So if the number of transpositions m is bigger than 1 the Euler characteristic of this set and thus the corresponding character χ(σ m,k ) is zero. One can check that in the remaining cases σ k = σ 1,k and τ k all the components are contractible and the action of the corresponding centralisers C(σ) on them is effective. Thus if we define the set X Bn as the union of the 2n involutions σ k , k = 0, . . . , n − 2 and τ l , l = 0, 1, . . . , n we have the following Proposition 4. where ρ is the regular representation of B n . In the twisted case one should take the sum over the subset of X Bn consisting of the involutions which are even elements of G. For the standard action this result was first obtained by Lehrer in . In the D n case the results are slightly different for odd and even n. The fixed set of σ 2k consists of the points (z,z, ix 1 , . . . , ix 2k , y 1 , . . . , y n−2k−2 ), where z = ±z (i.e. z is not real or pure imaginary), x i , y i ∈ R, x p = ±x q , y p = ±y q if p = q. The number of connected components of the fixed set in that case is equal to the order of the group D 2 × D 2k × D n−2k−2 which acts on this set. However if k > 0 we have to exclude from this set also the subspaces of codimension 2 given by the equation x j = y l = 0 for all j = 1, . . . , 2k and l = 1, . . . , n − 2k − 2. One can check that as a result the Euler characteristic of all connected components (and thus of the whole fixed set) in that case is 0. Similar arguments show that the same is true for the fixed set of the involutions τ 2l . In the only remaining case of simple reflection σ 0 all the connected components of the corresponding fixed set are contractible. The centraliser C(σ 0 ) coincides with the subgroup D 2 × D n−2 , which acts freely and transitively on these components. For the groups of type D n with even n = 2m the involutions σ 0 , σ 2m−2 and central element τ 2m = −I n have the centralisers whose order is equal to the number of connected components of the corresponding fixed sets, which are all contractible in this case. The Euler characteristic of the fixed sets of all other involutions can be shown to be zero in the same way as in the odd case. Thus we have the following Proposition 5. For the Coxeter group G of type D n with the standard action we have (1) in the odd case n = 2m + 1 and (1) − ρ) in the even case n = 2m. For the twisted action the formulas are respectively (10) H * ǫ (M G ) = ρ in the odd case and In order to investigate the general case and to understand the nature of the exceptional set X G we will need the general results about involutions in the Coxeter groups summarised in the next section. Involutions in the Coxeter groups and their centralisers. We start with the description of the conjugacy classes of involutions in general Coxeter group due to Richardson . We refer to Bourbaki or Humphreys for the standard definitions. Let G be a Coxeter group, Γ be its Coxeter graph. Recall that the vertices of Γ correspond to the generating reflections, which form the set S. We will identify G with its geometric realisation in the Euclidean vector space V, in which s ∈ S acts as the reflection with respect to a hyperplane orthogonal to the corresponding root e s . Let J be a subset of S, G J be the subgroup G generated by J (such a subgroup is called parabolic). Let also J * = {e s , s ∈ J} and V J be the subspace of V generated by J * . Following Richardson we say that J satisfies the (−1)-condition if G J contains an element σ J which acts on V J as −Id. This element, being in G J , acts as the identity on the orthogonal complement of V J . Thus σ J is uniquely determined and is an involution. Proposition 6. (Richardson ) Let σ be any involution in G. Then (i) there exists a subset J ⊂ S satisfying the (−1)-condition such that σ is conjugate to σ J ; (ii) the involutions σ J and σ K are conjugate in G if and only if g(J * ) = K * for some g ∈ G. Richardson gave also an algorithm for testing G-equivalence based on the results by Howlett and Deodhar . For an irreducible Coxeter group G the whole set J = S satisfies the (−1)-condition only in the following cases: A 1 , B n , D 2n , E 7 , E 8 , F 4 , H 3 , H 4 , I 2 (2n). This means that each connected component of the Coxeter graph corresponding to the group G J for any J satisfying the (−1)-condition must be of that form. In particular we see that the components of type A n with n > 1 are forbidden, which imposes strong restrictions on such subsets J. In the Appendix we give a subset J for each conjugacy class of special involutions. To describe the centralisers of the involutions σ J we can use the results by Howlett who described the normalisers of the parabolic subgroups in the general Coxeter group. Indeed we have the following Proposition 7. The centraliser C(σ J ) coincides with the normaliser N (G J ) of the corresponding parabolic subgroup G J . Recall that the normaliser N (H) of a subgroup H ⊂ G consists of the elements g ∈ G such that gHg −1 = H. Consider the orthogonal splitting V = V J ⊕ V ⊥ J , corresponding to the spectral decomposition of σ J . Obviously the normaliser N (G J ) preserves this splitting and thus N (G J ) is a subgroup of the centraliser C(σ J ). To show the opposite inclusion take any g ∈ C(σ J ) then g(V J ) = V J and therefore g(e s ) belongs to the intersection of the root system R with the subspace V J . But according to (Proposition 1.10) this intersection coincides with the root system of the group G J . This means that gsg −1 belong to G J for any generating reflection s ∈ J and thus g belongs to the normaliser N (G J ). So the question now is only what are the special involutions which form the set X G . Special involutions in the Coxeter groups. Let σ be any involution in the Coxeter group G, V = V 1 ⊕ V 2 is the corresponding spectral decomposition of its geometric realisation, where V 1 and V 2 are the eigenspaces with the eigenvalues −1 and 1, respectively. According to the previous section one can assume that V 1 = V J for some subset J ∈ S. Consider the intersections R 1 and R 2 of the root system R of G with the subspaces V 1 and V 2 . According to , they are the root systems of the Coxeter subgroups G 1 and G 2 , where G 1 is the corresponding parabolic subgroup G J and G 2 is the subgroup of G consisting of the elements fixing the subspace V 1 . Definition. We will call the involution σ special if for any root from R at least one of its projections onto V 1 and V 2 is proportional to a root from R 1 or R 2 . In particular, the identity and simple reflections are special involutions for all Coxeter groups. The following Proposition explains the importance of this notion for our problem. Proposition 8. For any special involution σ all the connected components of the fixed set Fσ are contractible. Their number N σ = |G 1 ||G 2 | is equal to the product of the orders of the corresponding groups G 1 and G 2 . Proof. The fixed set Fσ is the subspace V 2 ⊕ iV 1 minus the intersections with the reflection hyperplanes. The hyperplanes corresponding to the roots from R 1 and R 2 split this subspace into |G 1 ||G 2 | contractible connected components. The meaning of the condition on the special involutions is to make all other intersections redundant. Indeed the intersection with the hyperplane (α, z) = 0 is equivalent to two conditions: (α 1 , x) = 0 and (α 2 , y) = 0 where α 1 and α 2 are the projections of the root α on V 1 and V 2 respectively, x ∈ V 1 , y ∈ V 2 . If at least one of the vectors α i is proportional to some root from R i this intersection will be contained in the hyperplanes already considered. Corollary 1. The character of the action of any special involution σ on H * (M G ) is non-zero and equal to |G 1 ||G 2 |. It turns out that the converse statement is also true. Proposition 9. If the character χ(g) = 0 in H * (M G ) then g is a special involution. Unfortunately the only proof we have is case by case check. For the classical series this follows from the fact that the special involutions in that case are exactly those described in the first section of this paper. For the exceptional Weyl groups one can also use the results of . The list of the special involutions (up to a conjugation) for all irreducible Coxeter groups is given in the Introduction (see also the Appendix for the corresponding Richardson's graphs). It is easy to check that they all have the following property which is very important for us. Proposition 10. The centraliser C(σ) of any special involution σ coincides with the product of the corresponding Coxeter subgroups G 1 × G 2 . Obviously G 1 × G 2 is a subgroup of C(σ) but the fact that C(σ) = G 1 × G 2 is not true for a general involution (an example is any of the involutions σ 2k with k > 0 in D 2m+1 -case, for which Summarising we have the following main Theorem 1. Let X G be the set of conjugacy classes of special involutions in the Coxeter group G. Then the total cohomology H * (M G ) as G-module with respect to the standard action of G on M G can be represented in the form For the twisted action one should replace in this formula X G by its subset consisting of the conjugacy classes of even special involutions. In particular, we see that H * ǫ (M G ) is the regular representation only for the symmetric group S n , the Weyl groups of type D 2m+1 , E 6 and dihedral groups I 2 (2k + 1). Applying Frobenius reciprocity formula for the characters of the induced representations we have the following corollary. Let W be an irreducible representation of G, W ± (σ) = {v ∈ W : σv = ±v} be the eigenspaces of the involution σ with eigenvalues ±1 respectively. In particular, for the trivial and alternating representations we have respectively G is the subset of X G consisting of odd special involutions and |X| denotes the number of elements in the set X. In other words the number of special involutions (up to a conjugacy) is equal to the number of invariant cohomology classes in H * (M G ). The corresponding numbers are given in the table below. Numbers |X G | of conjugacy classes of special involutions A n B n D n , n odd D n , n even E 6 E 7 E 8 F 4 H 3 H 4 I 2 (n), n odd I 2 (n), n even 2 2n 2 4 2 4 4 8 4 4 2 4 A simple analysis of this list leads to the following Proposition 11. For any irreducible Coxeter group G the numbers of even and odd involutions in the special set X G are equal: The multiplicity m(ǫ) of the alternating representation in H * (M G ) is zero for all G. In other words the alternating representation never appears in the cohomology of M G . For the symmetric group the fact that m(ǫ) = 0 was conjectured by Stanley (in a different, combinatorial terms) and proved by Hanlon in . The general case was settled by Lehrer in . We give some explanations of this remarkable fact in the next section. Special involutions and cohomology of Brieskorn's braid groups. Consider now the corresponding quotient space Σ G = M G /G. The fundamental group of this space is called Brieskorn's braid group and denoted as B G . For the space M G it is called Brieskorn's pure braid group and denoted as P G . It is known after Brieskorn and Deligne that M G and Σ G are Eilenberg-Mac Lane spaces K(π, 1) for the pure and usual braid groups P G and B G respectively (see , ), so the cohomology of these spaces can be interpreted as cohomology of the corresponding braid groups. The investigation of these cohomology was initiated by Arnol'd in , who considered the case of the symmetric group. The rational (or complex) coefficients cohomology of the braid groups related to arbitrary Coxeter group were computed by Brieskorn . Later Orlik and Solomon found an elegant description of the de Rham cohomology as the algebra generated by the differential forms ω α = d log α, α ∈ R, with explicitly given relations (defining the so-called Orlik-Solomon algebra A G ). Since H * (Σ G , C) is simply the G-invariant part of H * (M G , C), it follows from the previous section that the total Betti number of Σ G , which is the dimension of H * (Σ G , C), is equal to |X G |. The relation between special involutions in G and the cohomology H * (Σ G , C) can be described more precisely in the following way. Proposition 12. The Poincaré polynomial P (Σ G , t) of the cohomology of the Brieskorn's braid group B G has the form where V is the geometric representation of G and V − (σ) is the (−1)-eigenspace of σ in this representation. The simplest proof of this proposition is by comparison of the Brieskorn formulas from and our list of simple involutions. A more satisfactory proof may be found in the following way in terms of the corresponding Orlik-Solomon algebra A G ≈ H * (M G ). Let σ be a special involution and S σ be a set of the simple roots in the Coxeter subsystem R 1 , which is the set of roots α ∈ R such that σα = −α (see the previous section). Conjecture. For any special involution σ the symmetrisation of the product Ω σ = α∈Sσ ω α by the action of the group G is a non-zero element in A G . Any G-invariant element in this algebra is a linear combination of such elements. We have checked this for all Coxeter groups except E 7 , E 8 , F 4 , H 3 , H 4 , but the proof is by straightforward calculation with the use of the Orlik-Solomon relations. It would be nice to find a more conceptual proof for all Coxeter groups. Notice that in S n case our claim reduces to Arnold's result saying that the symmetrisation of any element from the cohomology of the pure braid group of degree more than 1 is equal to zero and the only G-invariant element of degree 1 is given by the differential form i =j d log(z i − z j ) (see Corollary 6 in ). Notice that as a corollary of Proposition 11 we have that the anti-symmetrisation of any element in Orlik-Solomon algebra is zero. This is also related to the fact that all the polynomials P (Σ G , t) are divisible by t + 1. Indeed from the previous section we know that m(ǫ) = |X ǫ G | − |X o G | which due to Proposition 12 is equal to P (Σ G , −1) and thus is zero. Although the fact that P (Σ G , t) is divisible by t + 1 is transparent from the list of all these polynomials given above we will give here an independent topological explanation. Namely consider the projectivisation P M G of the space M G . We have a diffeomorphism Indeed for any root α the map x → ( , (α, x)) establish such a diffeomorphism. Notice that it is compatible with the action of the group G if we assume that the action on C * is trivial. On the cohomology level we have an isomorphism which immediately implies the following result. Let W be any irreducible representation of G and P W (t) be the Poincaré polynomials of the multiplicities of W in H k (M G ). An explicit description of these polynomials is one of the most interesting open problems in this area. For the trivial representation the corresponding polynomial coincides with the Poincaré polynomial P (Σ G , t) according to Proposition 12. Proposition 13. All irreducible representations of the Coxeter group G appear in H * (M G ) in pairs in degrees differing by 1. The corresponding polynomials P W (t) are divisible by t + 1. In particular, this implies that P (Σ G , −1) = 0 and (modulo Propositions 11 and 12) the fact that the alternating representation never appears in the cohomology of generalised pure braid groups. Concluding remarks. We have shown that the action of the Coxeter groups G on the total cohomology of the corresponding complement space M G can be described in a very simple way (2) in terms of the special involutions. Although one might expect a formula like that knowing that only involutions may have non-zero characters in this representation, there are two facts which we found surprising: 1) the involutions with non-zero characters admit a very simple geometric characterisation; 2) the character of the action of such an involution σ on H * (M G ) is exactly the order of the corresponding centraliser C(σ). Special involutions also appear to play a key role in the description of the multiplicative structure of the cohomology of the Brieskorn braid group: if the conjecture formulated in the last section is true, a basis of representatives of the cohomology in the Orlik-Solomon algebra can be given in terms of special invlutions. The nature of the set X G also needs better understanding. In particular, it may be worth looking at the geometry of the subgraphs Γ J in the Coxeter graph Γ corresponding to the special involutions (see the Appendix for a complete list of these subgraphs). We would like to note that in all the cases except D 4 Γ J consist of at most two connected components which together with the (−1)-condition implies very strong restrictions on J. The only exception is the graph of the involution −s in D 4 , which consists of three components. It would be interesting to generalise our results for the space M G (R N ) which is V ⊗ R N without corresponding root subspaces Π α ⊗ R N , α ∈ R (our case corresponds to N = 2). For the symmetric group G = S n this is the configuration space of n distinct points in R N , this case was investigated by Cohen and Taylor in . In this relation we would like to mention the recent very interesting papers . In particular, the idea of Atiyah to use equivariant cohomology may be the clue to understanding the graded structure of the G-module H * (M G ). Another obvious generalisation to look at is the case of complex reflection groups.
A NEW FOURIER TRANSFORM In order to define a geometric Fourier transform, one usually works with either `-adic sheaves in characteristic p > 0 or with D-modules in characteristic 0. If one considers `-adic sheaves on the stack quotient of a vector bundle V by the homothety action of Gm, however, Laumon provides a uniform geometric construction of the Fourier transform in any characteristic. The category of sheaves on is closely related to the category of (unipotently) monodromic sheaves on V . In this article, we introduce a new functor, which is defined on all sheaves on V in any characteristic, and we show that it restricts to an equivalence on monodromic sheaves. We also discuss the relation between this new functor and Laumon’s homogeneous transform, the Fourier-Deligne transform, and the usual Fourier transform on D-modules (when the latter are defined).
import { ModalBuilder } from '../../../src/surfaces/modal'; export const params = { title: 'title', close: 'close', submit: 'submit', privateMetaData: 'privateMetaData', callbackId: 'callbackId', externalId: 'externalId', }; export const mock = new ModalBuilder(params);
<reponame>riazancev/QHTTP #ifndef QHTTP_HTTPSERVER_H #define QHTTP_HTTPSERVER_H #include "global.h" #include <QtCore/qcompilerdetection.h> #include <QtCore/qobject.h> #include <QtNetwork/qhostaddress.h> namespace QHTTP { class HttpClient; class HttpServerPrivate; class QHTTP_API HttpServer : public QObject { Q_OBJECT Q_DISABLE_COPY(HttpServer) HttpServerPrivate *d; signals: void newConnection(QHTTP::HttpClient *client); public: explicit HttpServer(QObject *parent = Q_NULLPTR); ~HttpServer(); public: bool listen(const QHostAddress &address = QHostAddress::Any, quint16 port = 0); void close(); }; } #endif // QHTTP_HTTPSERVER_H
<filename>src/histograms/AbstractHistogram.ts /* * Licensed to Gisaïa under one or more contributor * license agreements. See the NOTICE.txt file distributed with * this work for additional information regarding copyright * ownership. Gisaïa licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import { ChartDimensions, ChartAxes, SwimlaneAxes, SelectedOutputValues, HistogramUtils, DataType, HistogramData, Position, BrushCornerTooltips, SwimlaneData } from './utils/HistogramUtils'; import { HistogramParams } from './HistogramParams'; import { BrushBehavior } from 'd3-brush'; import { scaleUtc, scaleLinear, scaleTime } from 'd3-scale'; import { min, max } from 'd3-array'; export abstract class AbstractHistogram { public histogramParams: HistogramParams; public brushCornerTooltips: BrushCornerTooltips; public isBrushing = false; /** Contexts */ protected context: any; protected barsContext: any; protected noDatabarsContext: any; protected brushContext: any; protected tooltipCursorContext: any; protected allAxesContext: any; /** Chart dimensions */ protected chartDimensions: ChartDimensions; protected isWidthFixed = false; protected isHeightFixed = false; /** Data */ protected dataDomain: Array<HistogramData>; protected dataInterval: number; /** Brush selection */ protected selectionBrush: BrushBehavior<any>; protected selectionInterval: SelectedOutputValues = { startvalue: null, endvalue: null }; protected brushHandlesHeight: number = null; protected brushHandles; protected isBrushed = false; protected hasSelectionExceededData = null; protected selectedBars = new Set<number>(); protected fromSetInterval = false; /**Axes && ticks */ protected xTicksAxis; protected xLabelsAxis; protected xAxis; protected hoveredBucketKey: Date | number; protected yDimension = 1; protected plottingCount = 0; protected minusSign = 1; public constructor() { this.brushCornerTooltips = this.createEmptyBrushCornerTooltips(); } public plot(data: Array<HistogramData> | SwimlaneData) { } public init() { /** each time we [re]plot, the bucket range is reset */ this.histogramParams.bucketRange = undefined; this.setHistogramMargins(); if (this.context) { this.context.remove(); } } public abstract resize(histogramContainer: HTMLElement): void; /** * initialize a new BrushCornerTooltips object */ public setHTMLElementsOfBrushCornerTooltips(rightHTMLElement: HTMLElement, leftHTMLElement): void { if (!this.brushCornerTooltips) { this.brushCornerTooltips = this.createEmptyBrushCornerTooltips(); } this.brushCornerTooltips.rightCornerTooltip.htmlContainer = rightHTMLElement; this.brushCornerTooltips.leftCornerTooltip.htmlContainer = leftHTMLElement; } protected setHistogramMargins() { // tighten right and bottom margins when X labels are not shown if (!this.histogramParams.showXLabels) { this.histogramParams.margin.bottom = 5; this.histogramParams.margin.right = 7; } // tighten left margin when Y labels are not shown if (!this.histogramParams.showYLabels) { if (this.histogramParams.showXLabels) { this.histogramParams.margin.left = 10; } else { this.histogramParams.margin.left = 7; } } if (this.histogramParams.xAxisPosition === Position.top) { this.minusSign = -1; } } protected initializeDescriptionValues(start: Date | number, end: Date | number, dataInterval: number) { if (!this.fromSetInterval && this.histogramParams.hasDataChanged) { this.histogramParams.startValue = HistogramUtils.toString(start, this.histogramParams, dataInterval); this.selectionInterval.startvalue = start; this.histogramParams.endValue = HistogramUtils.toString(end, this.histogramParams, dataInterval); this.selectionInterval.endvalue = end; } } protected initializeChartDimensions(): void { // set chartWidth value equal to container width when it is not specified by the user if (this.histogramParams.chartWidth === null) { this.histogramParams.chartWidth = this.histogramParams.histogramContainer.offsetWidth; } else if (this.histogramParams.chartWidth !== null && this.plottingCount === 0) { this.isWidthFixed = true; } } protected initializeChartHeight(): void { if (this.histogramParams.chartHeight === null) { this.histogramParams.chartHeight = this.histogramParams.histogramContainer.offsetHeight; } else if (this.histogramParams.chartHeight !== null && this.plottingCount === 0) { this.isHeightFixed = true; } } protected getXDomainScale(): any { return (this.histogramParams.dataType === DataType.time) ? (this.histogramParams.useUtc) ? scaleUtc() : scaleTime() : scaleLinear(); } protected getHistogramMinMaxBorders(data: Array<HistogramData>): [number | Date, number | Date] { const minBorder = data[0].key; const followingLastBucket = this.getFollowingLastBucket(data); const maxBorder = followingLastBucket.key; return [minBorder, maxBorder]; } protected getFollowingLastBucket(data): HistogramData { const dataInterval = this.getDataInterval(data); const followingLastBucketKey = +data[data.length - 1].key + dataInterval; let value = 0; const minimum = min(data, (d: HistogramData) => this.isValueValid(d) ? d.value : Number.MAX_VALUE); const maximum = max(data, (d: HistogramData) => this.isValueValid(d) ? d.value : Number.MIN_VALUE); if (!this.histogramParams.yAxisFromZero) { if (minimum >= 0) { value = minimum; } else { value = maximum; } } const followingLastBucket = { key: followingLastBucketKey, value }; const parsedFollowingLastBucket = HistogramUtils.parseDataKey([followingLastBucket], this.histogramParams.dataType)[0]; return parsedFollowingLastBucket; } protected getXDomainExtent(data: Array<HistogramData>, selectedStartValue: Date | number, selectedEndValue: Date | number): Array<Date | number | { valueOf(): number }> { this.setDataInterval(data); const xDomainExtent = new Array<Date | number | { valueOf(): number }>(); const dataKeyUnionSelectedValues = new Array<Date | number>(); data.forEach(d => { dataKeyUnionSelectedValues.push(d.key); }); // Include the end of the last bucket to the extent dataKeyUnionSelectedValues.push(this.getHistogramMinMaxBorders(data)[1]); if (!this.histogramParams.displayOnlyIntervalsWithData) { this.histogramParams.intervalSelectedMap.forEach(values => { if (selectedStartValue > values.values.startvalue) { selectedStartValue = values.values.startvalue; } if (selectedEndValue < values.values.endvalue) { selectedEndValue = values.values.endvalue; } }); } dataKeyUnionSelectedValues.push(selectedStartValue); dataKeyUnionSelectedValues.push(selectedEndValue); if (this.histogramParams.dataType === DataType.time) { xDomainExtent.push(new Date(min(dataKeyUnionSelectedValues, (d: Date) => +d) - this.dataInterval / 5)); xDomainExtent.push(new Date(max(dataKeyUnionSelectedValues, (d: Date) => +d))); } else { xDomainExtent.push(min(dataKeyUnionSelectedValues, (d: number) => d) * 1 - this.dataInterval / 5 * this.yDimension); xDomainExtent.push(max(dataKeyUnionSelectedValues, (d: number) => d) * 1); } return xDomainExtent; } /** * Removes the indicator behind the hovered bucket of the histogram */ protected clearTooltipCursor(): void { } protected drawChartAxes(chartAxes: ChartAxes | SwimlaneAxes, leftOffset: number): void { const marginTopBottom = this.chartDimensions.margin.top * this.histogramParams.xAxisPosition + this.chartDimensions.margin.bottom * (1 - this.histogramParams.xAxisPosition); this.context = this.chartDimensions.svg.append('g') .attr('class', 'context') .attr('transform', 'translate(' + this.chartDimensions.margin.left + ',' + marginTopBottom + ')'); this.tooltipCursorContext = this.context.append('g').attr('class', 'tooltip_histogram_bar'); this.context.on('mouseleave', () => this.clearTooltipCursor()); this.allAxesContext = this.context.append('g').attr('class', 'histogram__all-axes'); // leftOffset is the width of Y labels, so x axes are translated by leftOffset // Y axis is translated to the left of 1px so that the chart doesn't hide it // Therefore, we substruct 1px (leftOffset - 1) so that the first tick of xAxis will coincide with y axis let horizontalOffset = this.chartDimensions.height; if (!!(chartAxes as any).yDomain) { if (!this.histogramParams.yAxisFromZero) { const minMax = (chartAxes as ChartAxes).yDomain.domain(); if (minMax[0] >= 0) { horizontalOffset = (chartAxes as ChartAxes).yDomain(minMax[0]); } else { horizontalOffset = (chartAxes as ChartAxes).yDomain(minMax[1]); } } else { horizontalOffset = (chartAxes as ChartAxes).yDomain(0); } } this.xAxis = this.allAxesContext.append('g') .attr('class', 'histogram__only-axis') .attr('transform', 'translate(' + (leftOffset - 1) + ',' + horizontalOffset + ')') .call(chartAxes.xAxis); this.xTicksAxis = this.allAxesContext.append('g') .attr('class', 'histogram__ticks-axis') .attr('transform', 'translate(' + (leftOffset - 1) + ',' + this.chartDimensions.height * this.histogramParams.xAxisPosition + ')') .call(chartAxes.xTicksAxis); this.xLabelsAxis = this.allAxesContext.append('g') .attr('class', 'histogram__labels-axis') .attr('transform', 'translate(' + (leftOffset - 1) + ',' + this.chartDimensions.height * this.histogramParams.xAxisPosition + ')') .call(chartAxes.xLabelsAxis); this.xTicksAxis.selectAll('path').attr('class', 'histogram__axis'); this.xAxis.selectAll('path').attr('class', 'histogram__axis'); this.xTicksAxis.selectAll('line').attr('class', 'histogram__ticks'); this.xLabelsAxis.selectAll('text').attr('class', 'histogram__labels'); if (!this.histogramParams.showXTicks) { this.xTicksAxis.selectAll('g').attr('class', 'histogram__ticks-axis__hidden'); } if (!this.histogramParams.showXLabels) { this.xLabelsAxis.attr('class', 'histogram__labels-axis__hidden'); } } protected plotBars(data: Array<HistogramData>, axes: ChartAxes | SwimlaneAxes, xDataDomain: any, barWeight?: number): void { const barWidth = barWeight ? axes.stepWidth * barWeight : axes.stepWidth * this.histogramParams.barWeight; this.barsContext = this.context.append('g').attr('class', 'histogram__bars').selectAll('.bar') .data(data.filter(d => this.isValueValid(d))) .enter().append('rect') .attr('x', function (d) { return xDataDomain(d.key); }) .attr('width', barWidth); this.noDatabarsContext = this.context.append('g').attr('class', 'histogram__bars').selectAll('.bar') .data(data.filter(d => !this.isValueValid(d))) .enter().append('rect') .attr('x', function (d) { return xDataDomain(d.key); }) .attr('width', axes.stepWidth); } protected isValueValid(bucket: HistogramData): boolean { return HistogramUtils.isValueValid(bucket); } /** * This method is called whenever the brush is being moved. It sets the positions the brush's left and right corner tooltips. */ protected setBrushCornerTooltipsPositions() { this.brushCornerTooltips.leftCornerTooltip.content = this.histogramParams.startValue; this.brushCornerTooltips.rightCornerTooltip.content = this.histogramParams.endValue; const leftPosition = this.getAxes().xDomain(this.selectionInterval.startvalue); const rightPosition = this.getAxes().xDomain(this.selectionInterval.endvalue); // If the html container of each corner tooltip is set, then we proceed to set their positions if (this.brushCornerTooltips && this.brushCornerTooltips.leftCornerTooltip.htmlContainer && this.brushCornerTooltips.rightCornerTooltip.htmlContainer) { const leftTooltipWidth = this.brushCornerTooltips.leftCornerTooltip.htmlContainer.offsetWidth; const rightTooltipWidth = this.brushCornerTooltips.rightCornerTooltip.htmlContainer.offsetWidth; if (rightTooltipWidth !== 0 && leftTooltipWidth !== 0) { if (leftPosition + leftTooltipWidth + 5 > rightPosition - rightTooltipWidth) { // If left tooltip and right tooltip meet, switch from horizontal to vertical positions this.brushCornerTooltips.horizontalCssVisibility = 'hidden'; this.brushCornerTooltips.verticalCssVisibility = 'visible'; this.setVerticalTooltipsWidth(); this.setBrushVerticalTooltipsXPositions(leftPosition, rightPosition); this.setBrushVerticalTooltipsYPositions(); } else { this.brushCornerTooltips.horizontalCssVisibility = 'visible'; this.brushCornerTooltips.verticalCssVisibility = 'hidden'; this.setBrushHorizontalTooltipsXPositions(leftPosition, rightPosition); this.setBrushHorizontalTooltipsYPositions(); } } } else { this.brushCornerTooltips.verticalCssVisibility = 'hidden'; this.brushCornerTooltips.horizontalCssVisibility = 'hidden'; } } protected setVerticalTooltipsWidth() { this.brushCornerTooltips.leftCornerTooltip.width = this.brushCornerTooltips.rightCornerTooltip.width = this.chartDimensions.height; } protected setBrushVerticalTooltipsXPositions(leftPosition: number, rightPosition: number) { this.brushCornerTooltips.leftCornerTooltip.xPosition = -this.chartDimensions.height + this.histogramParams.margin.left + leftPosition; this.brushCornerTooltips.rightCornerTooltip.xPosition = this.histogramParams.margin.left + rightPosition; } protected setBrushVerticalTooltipsYPositions() { if (this.histogramParams.xAxisPosition === Position.bottom) { this.brushCornerTooltips.leftCornerTooltip.yPosition = this.chartDimensions.height + this.histogramParams.margin.bottom; } else { this.brushCornerTooltips.leftCornerTooltip.yPosition = this.chartDimensions.height + this.histogramParams.margin.top; } this.brushCornerTooltips.rightCornerTooltip.yPosition = this.brushCornerTooltips.leftCornerTooltip.yPosition; } protected setBrushHorizontalTooltipsXPositions(leftPosition: number, rightPosition: number) { this.brushCornerTooltips.leftCornerTooltip.xPosition = leftPosition + this.histogramParams.margin.left; this.brushCornerTooltips.rightCornerTooltip.xPosition = this.histogramParams.margin.right + this.chartDimensions.width - rightPosition; } protected setBrushHorizontalTooltipsYPositions() { if (this.histogramParams.xAxisPosition === Position.bottom) { this.brushCornerTooltips.leftCornerTooltip.yPosition = this.chartDimensions.height + 10; } else { this.brushCornerTooltips.leftCornerTooltip.yPosition = -3; } this.brushCornerTooltips.rightCornerTooltip.yPosition = this.brushCornerTooltips.leftCornerTooltip.yPosition; } protected getHistogramDataInterval(data: Array<HistogramData>): number { let interval = Number.MAX_VALUE; if (data.length > 1) { /** We need to get the smallest difference between 2 buckets that is different from 0 */ for (let i = 0; i < data.length - 1; i++) { const diff = +data[i + 1].key - +data[i].key; if (diff > 0) { interval = Math.min(interval, diff); } } /** this means that all the buckets have the same key (with different chart ids) */ if (interval === Number.MAX_VALUE) { if (this.histogramParams.dataType === DataType.time) { /** interval = 1 day */ interval = 24 /** h */ * 3600 /** s */ * 1000 /** ms */; } else { /** interval = 1 unit */ interval = 1; } } } else { interval = 0; } return interval; } protected abstract setDataInterval(data: Array<HistogramData> | Map<string, Array<HistogramData>>): void; protected abstract getDataInterval(data: Array<HistogramData> | Map<string, Array<HistogramData>>): number; protected abstract getAxes(): ChartAxes | SwimlaneAxes; private createEmptyBrushCornerTooltips(): BrushCornerTooltips { const emptyLeftCornerTooltip = { htmlContainer: null, content: '', xPosition: 0, yPosition: 0 }; const emptyRightCornerTooltip = { htmlContainer: null, content: '', xPosition: 0, yPosition: 0 }; return { leftCornerTooltip: emptyLeftCornerTooltip, rightCornerTooltip: emptyRightCornerTooltip, verticalCssVisibility: 'hidden', horizontalCssVisibility: 'hidden' }; } }
#include "worker.h" #include "filesmodel.h" #include <QDir> Worker::Worker(QObject *parent) : QObject(parent), m_filesModel(new FilesModel(this)), m_directoryWatcher(new DirectoryWatcher) { m_directoryWatcher->setCallBack([this](const std::wstring &fileName, FileAction fileAction) { QString name = QString::fromStdWString(fileName); switch (fileAction) { case FileAction::FileAdded: m_filesModel->addFileInfo(QFileInfo(m_currentDir, name)); break; case FileAction::FileRemoved: m_filesModel->removeFileInfo(name); break; case FileAction::FileModified: m_filesModel->updateFileInfo(name); break; default: break; } }); connect(this, &Worker::currentPathChanged, this, [this](const QString &currentPath) { const QFileInfoList list = m_currentDir.entryInfoList(QDir::AllEntries | QDir::NoDotAndDotDot, QDir::DirsFirst); m_filesModel->setFileInfoList(list); m_directoryWatcher->setPath(currentPath.toStdWString()); }); enterToDirectory(QDir::homePath()); } void Worker::enterToDirectory(const QString &dirName) { if (m_currentDir.cd(dirName)) emit currentPathChanged(currentPath()); } void Worker::upDirectory() { if (m_currentDir.cdUp()) emit currentPathChanged(currentPath()); } bool Worker::rename(const QString &oldName, const QString &newName) { return m_currentDir.rename(oldName, newName); } FilesModel *Worker::filesModel() const { return m_filesModel; } QString Worker::currentPath() const { return m_currentDir.absolutePath(); }
<reponame>HellicarAndLewis/ProjectVictory /* # EncoderThread Accepts raw YUV420 and raw audio buffers as input which is will encode into FLVTags. A listener will be called from this thread when a new FLVTag has been generated; this thread creates a FLV bitstream. */ #ifndef ROXLU_ENCODER_THREAD_H #define ROXLU_ENCODER_THREAD_H extern "C" { # include <uv.h> } #include <vector> #include <streamer/core/EncoderTypes.h> class FLVWriter; class VideoEncoder; class AudioEncoder; void encoder_congestion_thread_func(void* user); void encoder_thread_func(void* user); // ------------------------------------------------------------------ class EncoderThread { public: EncoderThread(FLVWriter& flv, VideoEncoder& ve, AudioEncoder& ae); ~EncoderThread(); bool start(); bool stop(); void addPacket(AVPacket* pkt); /* add a packet that needs to be encoded */ public: FLVWriter& flv; AudioEncoder& audio_encoder; VideoEncoder& video_encoder; volatile bool must_stop; uv_thread_t thread; uv_cond_t cv; uv_mutex_t mutex; std::vector<AVPacket*> work; std::vector<AVPacket*> video_packets; uv_thread_t congest_thread; uv_mutex_t congest_mutex; }; #endif
#!/usr/bin/env python n = [int(x) for x in raw_input().split(" ")] passwords = [] for l in xrange(n[0]+1): passwords.append(raw_input()) solution = passwords[-1] passwords = passwords[:-1] length = len(solution) passwords.sort(key=len) result = 0 idx = 0 while(len(passwords[idx]) != length): idx = idx + 1 minimum = idx+1 while(idx < len(passwords) and len(passwords[idx]) == length ): idx = idx + 1 maximum = idx r = (minimum-1) / n[1] minimum = minimum + r*5 r = (maximum-1) / n[1] maximum = maximum + r*5 print("%d %d"%(minimum, maximum))
/** Like getCurationSet--used for layering new data. */ public Boolean addToCurationSet() throws ApolloAdapterException { boolean okay = false; if (curation_set == null) { String message = "Can't layer ChadoXML data on top of non-ChadoXML data."; logger.error(message); JOptionPane.showMessageDialog(null,message,"Error",JOptionPane.WARNING_MESSAGE); return new Boolean(false); } try { fireProgressEvent(new ProgressEvent(this, new Double(5.0), "Finding new data...")); InputStream xml_stream; xml_stream = chadoXmlInputStream(getInput()); BufferedInputStream bis = new BufferedInputStream(xml_stream); fireProgressEvent(new ProgressEvent(this, new Double(10.0), "Reading XML...")); XMLParser parser = new XMLParser(); XMLElement rootElement = parser.readXML(bis); if (rootElement == null) { String msg = "XML input stream was empty--nothing loaded."; logger.warn(msg); throw new ApolloAdapterException(msg); } xml_stream.close(); fireProgressEvent(new ProgressEvent(this, new Double(40.0), "Populating data models...")); populateDataModels(rootElement, curation_set); logger.info ("Completed XML parse of file " + getInput() + " for region " + curation_set.getName()); /* need to eradicate the huge hash tables generated when the xml is parsed */ parser.clean (); rootElement = null; fireProgressEvent(new ProgressEvent(this, new Double(90.0), "Reading transaction file...")); TransactionXMLAdapter.loadTransactions(getInput(), curation_set, getCurationState()); fireProgressEvent(new ProgressEvent(this, new Double(95.0), "Drawing...")); okay = true; } catch (ApolloAdapterException dae) { logger.error("Error while parsing " + getInput(), dae); throw dae; } catch ( Exception ex2 ) { logger.error("Error while parsing " + getInput(), ex2); throw new ApolloAdapterException(ex2.getMessage()); } return new Boolean(okay); }
Low light color balancing and denoising by machine learning based approximation for underwater images It is difficult for underwater archaeologists to recover the fine details of a captured image on the seabed when the image quality worsens due to the presence of more noisy artefacts, a mismatched device colour map, and a blurry image. To resolve this problem, we present a machine learning-based image restoration model (ML-IRM) for improving the visual quality of underwater images that have been deteriorated. Using this model, a home-made bowl set-up is created in which a different liquid concentration is used to replicate seabed water variation, and an object is dipped, or a video is played behind the bowl to recognise the object texture captured image in high-resolution for training the image restoration model is proposed. Gaussian and bidirectional pre-processing filters are used to both the high and low frequency components of the training image, respectively. To improve the clarity of the high-frequency channel background, soft-thresholding decreases the presence of distracting artefacts. On the other hand, the ML-IRM model can effectively keep the object textures on a low frequency channel. Experiment findings show that the proposed ML-IRM model improves the quality of seabed images, eliminates colour mismatches, and allows for more detailed information extraction. Blue shadow, green shadow, hazy, and low light test samples are randomly selected from all five datasets including U45 , EUVP , DUIE , UIEB , UM-ImageNet , and the proposed model. Peak Signal to Noise Ratio (PSNR) and Structural Similarity Index (SSIM) are computed for each condition separately. We list the values of PSNR (at 16.99 dB, 15.96 dB, 18.09 dB, 15.67 dB, 9.39 dB, 17.98 dB, 19.32 dB, 14.27 dB, 12.07 dB, and 25.47 dB) and SSIM (at 0.52, 0.57, 0.33, 0.47, 0.44, and 0.23, respectively. Similarly, it demonstrates that the proposed ML-IRM achieves a satisfactory result in terms of colour correction and contrast adjustment when applied to the problem of improving underwater images captured in low light. To do so, high-resolution images were captured in two low-light conditions (after 6 p.m. and again at 6 a.m.) for the training image datasets, and the results of their observations were compared to those of other existing state-of-the-art-methods.
#include "str_cmp.h" // cppcheck-suppress unusedFunction int cmp_str_desc(const char *a, const char *b) { return strcmp(b, a); } // cppcheck-suppress unusedFunction int cmp_str_nat_desc(const char *a, const char *b) { return strnatcmp(b, a); } // cppcheck-suppress unusedFunction int cmp_str_case_desc(const char *a, const char *b) { return strcasecmp(b, a); } // cppcheck-suppress unusedFunction int cmp_str_nat_case_desc(const char *a, const char *b) { return strnatcasecmp(b, a); } cmp_func get_compar(const size_t order_bits) { switch (order_bits) { case NATURAL_E: return strnatcmp; case CI_E: return strcasecmp; case NATURAL_E | CI_E: return strnatcasecmp; case DESC_E: return cmp_str_desc; case NATURAL_E | DESC_E: return cmp_str_nat_desc; case CI_E | DESC_E: return cmp_str_case_desc; case NATURAL_E | CI_E | DESC_E: return cmp_str_nat_case_desc; case 0: default: return strcmp; } }
import { Mage } from '../../../../aer-types/types' export const mages: Mage[] = [ { expansion: 'RTG', name: '<NAME>', id: 'OhatAndUlgimor', mageTitle: '', ability: ` <h2>Enrapture</h2> <p class="ability-activation">Activate during your main phase:</p> <p> Ohat mode: Suffer 2 damage. Any ally draws five cards and discards two cards in hand.<br /> Ulgimore mode: Cast a spell prepped to Ulgimor's IV breach without discarding it. </p> `, numberOfCharges: 4, uniqueStarters: [ { type: 'Spell', name: 'Shower Of Coals', expansion: 'RTG', id: 'ShowerOfCoals', cost: 0, effect: ` <p> Ohat Mode:<br /> <b>Cast:</b> Suffer 1 damage and gain 2 <span class="aether">&AElig;</span>.<br /> Ulgimor Mode:<br /> <b>Cast:</b> Deal 3 damage. </p> `, keywords: [], }, ], complexityRating: 4, }, { expansion: 'RTG', name: 'Cairna', id: 'Cairna', mageTitle: '', ability: ` <h2>Energize</h2> <p class="ability-activation">Activate during your main phase:</p> <p> Gain a spell that costs 5 <span class="aether">&AElig;</span> or less from a supply pile. You may lose two charges to gain any spell from a supply pile instead. You may lose a charge to prep a spell in your discard pile to one of your opened breaches. </p> `, numberOfCharges: 6, uniqueStarters: [ { type: 'Spell', name: 'Invigorate', expansion: 'RTG', id: 'Invigorate', cost: 0, effect: ` <p> <b>Cast:</b> Deal 1 damage. <span class="or">OR</span> <b>Cast:</b> Return a spell in your discard pile to your hand. </p> `, keywords: [], }, ], complexityRating: 4, }, ]
<filename>app/src/main/java/com/example/firstcodeandroid/Web/StartActivity10.java package com.example.firstcodeandroid.Web; import android.content.Intent; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.util.Log; import android.view.View; import android.widget.Button; import android.widget.TextView; import com.example.firstcodeandroid.R; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; import org.json.JSONArray; import org.json.JSONObject; import org.xml.sax.InputSource; import org.xml.sax.XMLReader; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserFactory; import java.io.BufferedReader; import java.io.InputStream; import java.io.InputStreamReader; import java.io.StringReader; import java.net.HttpURLConnection; import java.net.URL; import java.util.List; import javax.xml.parsers.SAXParserFactory; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; public class StartActivity10 extends AppCompatActivity { private static final String TAG = "StartActivity10"; private TextView responseText; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_start10); responseText = (TextView)findViewById(R.id.web_text); Button btn = (Button)findViewById(R.id.web_btn1); btn.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { Intent intent = new Intent(StartActivity10.this, WebViewActivity.class); startActivity(intent); } }); Button btn2 = (Button)findViewById(R.id.web_btn2); btn2.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { sendRequestWithHttpUrlConnection(); } }); Button btn3 = (Button)findViewById(R.id.okHttp); btn3.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { sendRequestWithOkHttp(); } }); Button btn4 = (Button)findViewById(R.id.xmlPull); btn4.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { convertXMLByPull(); } }); Button btn5 = (Button)findViewById(R.id.xmlSax); btn5.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { convertXMLBySax(); } }); Button btn6 = (Button)findViewById(R.id.jsonObject); btn6.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { convertJson(); } }); Button btn7 = (Button)findViewById(R.id.jsonGson); btn7.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { convertJsonWithGson(); } }); } private void paseJsonWithGson(String data){ Gson gson = new Gson(); UserJson list = gson.fromJson(data, new TypeToken<UserJson>(){}.getType()); for(User user:list.Data){ Log.d(TAG, "paseJsonWithGson: " + user.getUserId() + "|" + user.getUserName() ); } } private void convertJsonWithGson(){ new Thread(new Runnable() { @Override public void run() { try { OkHttpClient client = new OkHttpClient(); Request request = new Request.Builder() .url("http://10.0.2.2:8101/api/user/list") .build(); Response response = client.newCall(request).execute(); String data = response.body().string(); paseJsonWithGson(data); } catch (Exception ex) { ex.printStackTrace(); } } }).start(); } private void paseJsonWithJsonObject(String json){ try{ JSONObject json1 = new JSONObject(json); JSONArray jsonArray =json1.getJSONArray("Data"); for(int i = 0 ;i<jsonArray.length();i++){ JSONObject jsonObject = jsonArray.getJSONObject(i); String userId = jsonObject.getString("userId"); String userName = jsonObject.getString("userName"); Log.d(TAG, "paseJsonWithJsonObject: " + userId + "|" + userName); } }catch (Exception ex){ ex.printStackTrace(); } } private void convertJson(){ new Thread(new Runnable() { @Override public void run() { try { OkHttpClient client = new OkHttpClient(); Request request = new Request.Builder() .url("http://10.0.2.2:8101/api/user/list") .build(); Response response = client.newCall(request).execute(); String data = response.body().string(); paseJsonWithJsonObject(data); } catch (Exception ex) { ex.printStackTrace(); } } }).start(); } private void paseXMLWithSax(String data){ try{ SAXParserFactory factory = SAXParserFactory.newInstance(); XMLReader reader = factory.newSAXParser().getXMLReader(); MyHandler handler = new MyHandler(); reader.setContentHandler(handler); reader.parse(new InputSource(new StringReader(data))); }catch (Exception ex){ ex.printStackTrace(); } } private void convertXMLBySax(){ new Thread(new Runnable() { @Override public void run() { try { OkHttpClient client = new OkHttpClient(); Request request = new Request.Builder() .url("http://10.0.2.2:8101/demo.xml") .build(); Response response = client.newCall(request).execute(); String data = response.body().string(); paseXMLWithSax(data); }catch(Exception ex){ ex.printStackTrace(); } } }).start(); } private void paseXMLWithPull(String data){ try{ XmlPullParserFactory factory = XmlPullParserFactory.newInstance(); XmlPullParser xmlPullParser = factory.newPullParser(); xmlPullParser.setInput(new StringReader(data)); int eventType = xmlPullParser.getEventType(); String id = ""; String name = ""; while (eventType != xmlPullParser.END_DOCUMENT){ String nodeName = xmlPullParser.getName(); switch (eventType){ case XmlPullParser.START_TAG: if("d2p1:userId".equals(nodeName)){ id = xmlPullParser.nextText(); } else if("d2p1:userName".equals(nodeName)){ name = xmlPullParser.nextText(); } break; case XmlPullParser.END_TAG: if("d2p1:t_user".equals(nodeName)){ Log.d(TAG, "paseXMLWithPull: "+id+"|"+name); } break; default: break; } eventType = xmlPullParser.next(); } }catch (Exception ex){ ex.printStackTrace(); } } private void convertXMLByPull(){ new Thread(new Runnable() { @Override public void run() { try { OkHttpClient client = new OkHttpClient(); Request request = new Request.Builder() .url("http://10.0.2.2:8101/demo.xml") .build(); Response response = client.newCall(request).execute(); String data = response.body().string(); paseXMLWithPull(data); }catch(Exception ex){ ex.printStackTrace(); } } }).start(); } private void sendRequestWithOkHttp(){ new Thread(new Runnable() { @Override public void run() { try { /* * RequestBody body = new FormBody.Builder() * .add("userName", "123") * .add("password", "<PASSWORD>") * .build(); * request.post(body); * */ OkHttpClient client = new OkHttpClient(); Request request = new Request.Builder() .url("https://baidu.com") .build(); Response response = client.newCall(request).execute(); String data = response.body().string(); showResponse(data); }catch(Exception ex){ ex.printStackTrace(); } } }).start(); } private void sendRequestWithHttpUrlConnection(){ new Thread(new Runnable() { @Override public void run() { HttpURLConnection connection = null; BufferedReader reader = null; try { /* * post * connect.setRequestMethod("POST"); * DataOutputStream out = new DataOutputStream(connection.getOutputStream()) * out.writeBytes("username=admin&password=<PASSWORD>"); * */ URL url = new URL("https://www.baidu.com/"); connection = (HttpURLConnection) url.openConnection(); connection.setRequestMethod("GET"); connection.setConnectTimeout(8000); connection.setReadTimeout(8000); InputStream in = connection.getInputStream(); reader =new BufferedReader(new InputStreamReader(in)); StringBuilder response = new StringBuilder(); String line; while((line=reader.readLine())!=null){ response.append(line); } showResponse(response.toString()); }catch (Exception ex){ ex.printStackTrace(); } finally { if(connection!=null){ connection.disconnect(); } if(reader !=null){ try { reader.close(); }catch(Exception ex){ ex.printStackTrace(); } } } } }).start(); } private void showResponse(final String str){ runOnUiThread(new Runnable() { @Override public void run() { Log.d(TAG, "run: " + str); responseText.setText(str); } }); } }
/** * Returns the enum element that has the specified href property. * * @param href * the href to select the tab * * @return the tab * @throws NoSuchElementException * if the tab could not be found */ static Tab valueWithHref(final String href) { for (Tab tab : Tab.values()) { if (tab.href.equals(href)) { return tab; } } throw new NoSuchElementException("No such tab with href " + href); }
// ReadAll copies all elements from reader r into the provided column // pointers. ReadAll is not tuned for performance and is intended for // testing purposes. func ReadAll(ctx context.Context, r Reader, columns ...interface{}) error { columnsv := make([]reflect.Value, len(columns)) types := make([]reflect.Type, len(columns)) for i := range columns { columnsv[i] = reflect.ValueOf(columns[i]) if columnsv[i].Type().Kind() != reflect.Ptr { return errors.E(errors.Invalid, "attempted to read into non-pointer") } types[i] = reflect.TypeOf(columns[i]).Elem().Elem() } buf := frame.Make(slicetype.New(types...), defaultChunksize, defaultChunksize) for { n, err := r.Read(ctx, buf) if err != nil && err != EOF { return err } buf = buf.Slice(0, n) for i := range columnsv { columnsv[i].Elem().Set(reflect.AppendSlice(columnsv[i].Elem(), buf.Value(i))) } if err == EOF { break } buf = buf.Slice(0, buf.Cap()) } return nil }
/* The priority_map function maps dylan thread priorities to windows priorities * as below: * * Dylan Priorities windows priority * < -1249 THREAD_PRIORITY_IDLE * -1249 to -750 THREAD_PRIORITY_LOWEST * -749 to -250 THREAD_PRIORITY_BELOW_NORMAL * -250 to 249 THREAD_PRIORITY_NORMAL * 250 to 749 THREAD_PRIORITY_ABOVE_NORMAL * 750 to 1249 THREAD_PRIORITY_HIGHEST * > 1249 THREAD_PRIORITY_TIME_CRITICAL */ int priority_map(int dylan_priority) { int priority; if (dylan_priority < 0) { if (dylan_priority < -1249) { priority = THREAD_PRIORITY_IDLE; } else { priority = (dylan_priority - 250) / 500; } } else { if (dylan_priority > 1249) { priority = THREAD_PRIORITY_TIME_CRITICAL; } else { priority = (dylan_priority + 250) / 500; } } return (priority); }
def _get_projected_grids(imtgrid, topobase, projstr): topodict = topobase.getGeoDict() sampledict = topodict.getBoundsWithin(imtgrid.getGeoDict()) simtgrid = imtgrid.interpolateToGrid(sampledict, method='nearest') topogrid = topobase.interpolateToGrid(sampledict, method='linear') topogrid._data = topogrid._data.astype(np.float64) ptopogrid = topogrid.project(projstr, method='bilinear') pimtgrid = simtgrid.project(projstr) return (pimtgrid, ptopogrid)
<filename>src/main/java/uk/ac/ic/wlgitbridge/bridge/db/DBInitException.java package uk.ac.ic.wlgitbridge.bridge.db; /** * Created by winston on 23/08/2016. */ public class DBInitException extends RuntimeException { public DBInitException(String message) { super(message); } public DBInitException(String message, Throwable cause) { super(message, cause); } public DBInitException(Throwable cause) { super(cause); } }
def recognize_faces_in_directory(directory: str) -> Set[str]: files: List[str] = glob.glob(directory + "*.jpg") names: Set[str] = set() for i in range(len(files)): file: str = files[i] res = recognize_faces(path=file) names.update(res) return names
Serum markers for severe Clostridium difficile infection in immunosuppressed hospitalized patients. Clostridium difficile infection (CDI) has emerged as the leading cause of nosocomial diarrhea in the developed world. The prompt recognition of severe CDI is essential in providing early aggressive therapy. Though previous studies have identified leukocytosis, azotemia, and hypoalbuminemia as markers to differentiate severe from non-severe CDI in the general patient population, there is little data in immunosuppressed patients. We conducted a retrospective chart review of immunosuppressed patients with CDI to identify serum markers associated with severe CDI. Twenty-nine immunosuppressed patients with CDI (nine with severe disease) were identified. Those with severe disease were older and had evidence of renal dysfunction. The white blood cell count, platelet, and albumin levels were the same in the severe and non-severe immunosuppressed CDI patients. Therefore, recognized serum markers of severe CDI are not universally useful in immunosuppressed patients. Moreover, the clinician must be aware that immunosuppressed patients can develop severe CDI while remaining leukopenic.
/** * Create a new team entity with a name. * * @param name name of team. * @param attributes additional attributes for the Team. * @return A newly minted Team that exists in the VersionOne system. */ public Team team(String name, Map<String, Object> attributes) { Team team = new Team(instance); team.setName(name); addAttributes(team, attributes); team.save(); return team; }
Back to the future: reflections and directions of South African marine bioinvasion research Biological invasions continue to increase around the world, with impacts on many coastal marine systems. Here we review the South African marine invasion literature which, despite the field being relatively new, has grown to have significant presence in both the local and international arenas. Of the 79 papers reviewed, 70% focused on the establishment and spread of alien species, with modes of transport and introduction largely overlooked. An emphasis was also apparent towards field studies, in particular survey work, with few experimental studies. The overwhelming majority of papers focused on a single species, the Mediterranean mussel Mytilus galloprovincialis, reflecting the scale of this invasion and the tractable nature of rocky shores as study systems. With the exception of this one species, the impacts of marine alien species have rarely been quantified. We suggest that future research extends the taxonomic coverage of present work and develops a better understanding of the mechanisms of introduction, establishment and spread of marine alien species. Through an experimental approach, the drivers of altered ecological patterns and processes resulting from invasions should be addressed, providing insight into associated impacts. This approach will maintain the local applicability and international relevance of South African marine invasion research.
/** * @author Vlad Mihalcea */ @Repository public class ForumServiceImpl implements ForumService { @Autowired private PostDAO postDAO; @Autowired private TagDAO tagDAO; @Override @Transactional public Post newPost(String title, String... tags) { Post post = new Post(); post.setTitle(title); post.getTags().addAll(tagDAO.findByName(tags)); return postDAO.persist(post); } }
/** * Persist a share object of a program to a user group. If the program is already shared to the user group, its relation will be updated. Otherwise, a new * one will be created. * * @param userGroup the user group whose access rights have to be changed, not null * @param program the program affected, not null * @param relation the access right of the user group for the program, not null * @return the share object for the user group and program with the specified relation */ public UserGroupProgramShare persistUserProgramShare(UserGroup userGroup, Program program, Relation relation) { Assert.notNull(userGroup); Assert.notNull(program); Assert.notNull(relation); UserGroupProgramShare userGroupProgramShare = this.loadUserGroupProgramShare(userGroup, program); if ( userGroupProgramShare == null ) { userGroupProgramShare = new UserGroupProgramShare(userGroup, program, relation); this.session.save(userGroupProgramShare); } else { userGroupProgramShare.setRelation(relation); } return userGroupProgramShare; }
/** mainly for embedding SVG * no checking - * @author pm286 * */ public class HtmlObject extends HtmlElement { private final static Logger LOG = Logger.getLogger(HtmlObject.class); public final static String TAG = "object"; private static final String SRC = "src"; public static final String SVGTYPE = "image/svg+xml"; private static final String HEIGHT = "height"; static final String WIDTH = "width"; /** constructor. * */ public HtmlObject() { super(TAG); } public void setSrc(String src) { this.addAttribute(new Attribute(SRC, src)); } public String getSrc() { return this.getAttributeValue(SRC); } public void setHeight(double imgHeight) { this.addAttribute(new Attribute(HEIGHT, String.valueOf(imgHeight))); } public void setWidth(double imgWidth) { this.addAttribute(new Attribute(WIDTH, String.valueOf(imgWidth))); } }
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 Stefan Roese <[email protected]> */ #include <common.h> #include <dm.h> #include <fdtdec.h> #include <init.h> #include <asm/cache.h> #include <asm/global_data.h> #include <asm/ptrace.h> #include <linux/libfdt.h> #include <linux/sizes.h> #include <pci.h> #include <asm/io.h> #include <asm/system.h> #include <asm/arch/cpu.h> #include <asm/arch/soc.h> #include <asm/armv8/mmu.h> DECLARE_GLOBAL_DATA_PTR; /* * Not all memory is mapped in the MMU. So we need to restrict the * memory size so that U-Boot does not try to access it. Also, the * internal registers are located at 0xf000.0000 - 0xffff.ffff. * Currently only 2GiB are mapped for system memory. This is what * we pass to the U-Boot subsystem here. */ #define USABLE_RAM_SIZE 0x80000000ULL phys_size_t board_get_usable_ram_top(phys_size_t total_size) { unsigned long top = CFG_SYS_SDRAM_BASE + min(gd->ram_size, USABLE_RAM_SIZE); return (gd->ram_top > top) ? top : gd->ram_top; } /* * On ARMv8, MBus is not configured in U-Boot. To enable compilation * of the already implemented drivers, lets add a dummy version of * this function so that linking does not fail. */ const struct mbus_dram_target_info *mvebu_mbus_dram_info(void) { return NULL; } __weak int dram_init_banksize(void) { if (IS_ENABLED(CONFIG_ARMADA_8K)) return a8k_dram_init_banksize(); else if (IS_ENABLED(CONFIG_ARMADA_3700)) return a3700_dram_init_banksize(); else if (IS_ENABLED(CONFIG_ALLEYCAT_5)) return alleycat5_dram_init_banksize(); else return fdtdec_setup_memory_banksize(); } __weak int dram_init(void) { if (IS_ENABLED(CONFIG_ARMADA_8K)) { gd->ram_size = a8k_dram_scan_ap_sz(); if (gd->ram_size != 0) return 0; } if (IS_ENABLED(CONFIG_ARMADA_3700)) return a3700_dram_init(); if (IS_ENABLED(CONFIG_ALLEYCAT_5)) return alleycat5_dram_init(); if (fdtdec_setup_mem_size_base() != 0) return -EINVAL; return 0; } int arch_cpu_init(void) { /* Nothing to do (yet) */ return 0; } int arch_early_init_r(void) { struct udevice *dev; int ret; int i; /* * Loop over all MISC uclass drivers to call the comphy code * and init all CP110 devices enabled in the DT */ i = 0; while (1) { /* Call the comphy code via the MISC uclass driver */ ret = uclass_get_device(UCLASS_MISC, i++, &dev); /* We're done, once no further CP110 device is found */ if (ret) break; } /* Cause the SATA device to do its early init */ uclass_first_device(UCLASS_AHCI, &dev); /* Trigger PCIe devices detection */ if (IS_ENABLED(CONFIG_PCI)) pci_init(); return 0; }
Prime Minister Benjamin Netanyahu is considering closing down the Israeli offices of Qatari TV network Al Jazeera. Follow Ynetnews on Facebook and Twitter The prime minister held an initial discussion on the topic on Monday, and the Government Press Office (GPO), the Foreign Ministry, the Shin Bet and the rest of the security establishment have all begun studying the issue. Jordan and Saudi Arabia have already shut down Al Jazeera's offices as part of an Arab boycott against Qatar. Israel likely wants to take advantage of the situation to pull the plug on the Doha-based network's activity in the country. There have been numerous calls to close Al Jazeera's offices in the country over the last few years because of the network's negative coverage of Israel. However, concerns such a move would be a PR disaster for Israel along with the fact most of Al Jazeera's 34 employees are Israeli Arabs have so far stayed the government's hand on the matter. "Israel, along with the Arab states, sees Al Jazeera as a danger, a media body similar to those in Nazi Germany," Defense Minister Avigdor Lieberman said Monday. Walid al-Omari, Al Jazeera's bureau chief in Jerusalem, defended the network, telling Ynet on Tuesday, "We don't incite against Israel or anyone else." "We convey the news. It's not our fault if the news is ugly," al-Omari added. "We convey everything that happens to our viewers and to our target audience. In Israel, we put on air people from the government and the opposition, the right and the left, and even settlers... Even the prime minister himself was on our channel when he was the head of the opposition in 2009." Al Jazeera's Jerusalem bureau chief Walid al-Omari He said the fact Israel is considering closing down the network's offices in the country is a form of incitement. "I don't know why Israel needs to be dragged after everything that happens in the Arab world... Al Jazeera is a media body that operates lawfully and legally in Israel and in other country in the world," al-Omari claimed. Al Jazeera is expected to petition the Supreme Court against such a move. "We've been in this type of situation many times, be it in Israel, in Arab countries, or in other places," he said. "The Israeli government can't force itself on the world... and behave like a dark dictatorship. This is unacceptable," al-Omari added.
/** * Select - Abstract super class for LOOKUPSWITCH and TABLESWITCH instructions. * * <p>We use our super's <code>target</code> property as the default target. * * @see LOOKUPSWITCH * @see TABLESWITCH * @see InstructionList */ public abstract class Select extends BranchInstruction implements VariableLengthInstruction, StackConsumer /* @since 6.0 */, StackProducer { /** * @deprecated (since 6.0) will be made private; do not access directly, use getter/setter */ @Deprecated protected int[] match; // matches, i.e., case 1: ... TODO could be package-protected? /** * @deprecated (since 6.0) will be made private; do not access directly, use getter/setter */ @Deprecated protected int[] indices; // target offsets TODO could be package-protected? /** * @deprecated (since 6.0) will be made private; do not access directly, use getter/setter */ @Deprecated protected InstructionHandle[] targets; // target objects in instruction list TODO could be package-protected? /** * @deprecated (since 6.0) will be made private; do not access directly, use getter/setter */ @Deprecated protected int fixed_length; // fixed length defined by subclasses TODO could be package-protected? /** * @deprecated (since 6.0) will be made private; do not access directly, use getter/setter */ @Deprecated protected int match_length; // number of cases TODO could be package-protected? /** * @deprecated (since 6.0) will be made private; do not access directly, use getter/setter */ @Deprecated protected int padding = 0; // number of pad bytes for alignment TODO could be package-protected? /** * Empty constructor needed for Instruction.readInstruction. * Not to be used otherwise. */ Select() { } /** * (Match, target) pairs for switch. * `Match' and `targets' must have the same length of course. * * @param match array of matching values * @param targets instruction targets * @param defaultTarget default instruction target */ Select(final short opcode, final int[] match, final InstructionHandle[] targets, final InstructionHandle defaultTarget) { // don't set default target before instuction is built super(opcode, null); this.match = match; this.targets = targets; // now it's safe to set default target setTarget(defaultTarget); for (final InstructionHandle target2 : targets) { notifyTarget(null, target2, this); } if ((match_length = match.length) != targets.length) { throw new ClassGenException("Match and target array have not the same length: Match length: " + match.length + " Target length: " + targets.length); } indices = new int[match_length]; } /** * Since this is a variable length instruction, it may shift the following * instructions which then need to update their position. * * Called by InstructionList.setPositions when setting the position for every * instruction. In the presence of variable length instructions `setPositions' * performs multiple passes over the instruction list to calculate the * correct (byte) positions and offsets by calling this function. * * @param offset additional offset caused by preceding (variable length) instructions * @param max_offset the maximum offset that may be caused by these instructions * @return additional offset caused by possible change of this instruction's length */ @Override protected int updatePosition( final int offset, final int max_offset ) { setPosition(getPosition() + offset); // Additional offset caused by preceding SWITCHs, GOTOs, etc. final short old_length = (short) super.getLength(); /* Alignment on 4-byte-boundary, + 1, because of tag byte. */ padding = (4 - ((getPosition() + 1) % 4)) % 4; super.setLength((short) (fixed_length + padding)); // Update length return super.getLength() - old_length; } /** * Dump instruction as byte code to stream out. * @param out Output stream */ @Override public void dump( final DataOutputStream out ) throws IOException { out.writeByte(super.getOpcode()); for (int i = 0; i < padding; i++) { out.writeByte(0); } super.setIndex(getTargetOffset()); // Write default target offset out.writeInt(super.getIndex()); } /** * Read needed data (e.g. index) from file. */ @Override protected void initFromFile( final ByteSequence bytes, final boolean wide ) throws IOException { padding = (4 - (bytes.getIndex() % 4)) % 4; // Compute number of pad bytes for (int i = 0; i < padding; i++) { bytes.readByte(); } // Default branch target common for both cases (TABLESWITCH, LOOKUPSWITCH) super.setIndex(bytes.readInt()); } /** * @return mnemonic for instruction */ @Override public String toString( final boolean verbose ) { final StringBuilder buf = new StringBuilder(super.toString(verbose)); if (verbose) { for (int i = 0; i < match_length; i++) { String s = "null"; if (targets[i] != null) { s = targets[i].getInstruction().toString(); } buf.append("(").append(match[i]).append(", ").append(s).append(" = {").append( indices[i]).append("})"); } } else { buf.append(" ..."); } return buf.toString(); } /** * Set branch target for `i'th case */ public void setTarget( final int i, final InstructionHandle target ) { // TODO could be package-protected? notifyTarget(targets[i], target, this); targets[i] = target; } /** * @param old_ih old target * @param new_ih new target */ @Override public void updateTarget( final InstructionHandle old_ih, final InstructionHandle new_ih ) { boolean targeted = false; if (super.getTarget() == old_ih) { targeted = true; setTarget(new_ih); } for (int i = 0; i < targets.length; i++) { if (targets[i] == old_ih) { targeted = true; setTarget(i, new_ih); } } if (!targeted) { throw new ClassGenException("Not targeting " + old_ih); } } /** * @return true, if ih is target of this instruction */ @Override public boolean containsTarget( final InstructionHandle ih ) { if (super.getTarget() == ih) { return true; } for (final InstructionHandle target2 : targets) { if (target2 == ih) { return true; } } return false; } @Override protected Object clone() throws CloneNotSupportedException { final Select copy = (Select) super.clone(); copy.match = match.clone(); copy.indices = indices.clone(); copy.targets = targets.clone(); return copy; } /** * Inform targets that they're not targeted anymore. */ @Override void dispose() { super.dispose(); for (final InstructionHandle target2 : targets) { target2.removeTargeter(this); } } /** * @return array of match indices */ public int[] getMatchs() { return match; } /** * @return array of match target offsets */ public int[] getIndices() { return indices; } /** * @return array of match targets */ public InstructionHandle[] getTargets() { return targets; } /** * @return match entry * @since 6.0 */ final int getMatch(final int index) { return match[index]; } /** * @return index entry from indices * @since 6.0 */ final int getIndices(final int index) { return indices[index]; } /** * @return target entry * @since 6.0 */ final InstructionHandle getTarget(final int index) { return targets[index]; } /** * @return the fixed_length * @since 6.0 */ final int getFixed_length() { return fixed_length; } /** * @param fixed_length the fixed_length to set * @since 6.0 */ final void setFixed_length(final int fixed_length) { this.fixed_length = fixed_length; } /** * @return the match_length * @since 6.0 */ final int getMatch_length() { return match_length; } /** * @param match_length the match_length to set * @since 6.0 */ final int setMatch_length(final int match_length) { this.match_length = match_length; return match_length; } /** * * @param index * @param value * @since 6.0 */ final void setMatch(final int index, final int value) { match[index] = value; } /** * * @param array * @since 6.0 */ final void setIndices(final int[] array) { indices = array; } /** * * @param array * @since 6.0 */ final void setMatches(final int[] array) { match = array; } /** * * @param array * @since 6.0 */ final void setTargets(final InstructionHandle[] array) { targets = array; } /** * * @return the padding * @since 6.0 */ final int getPadding() { return padding; } /** @since 6.0 */ final int setIndices(final int i, final int value) { indices[i] = value; return value; // Allow use in nested calls } }
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef jsnum_h #define jsnum_h #include "mozilla/FloatingPoint.h" #include "jscntxt.h" #include "vm/NumericConversions.h" extern double js_PositiveInfinity; extern double js_NegativeInfinity; namespace js { extern bool InitRuntimeNumberState(JSRuntime *rt); #if !ENABLE_INTL_API extern void FinishRuntimeNumberState(JSRuntime *rt); #endif } /* namespace js */ /* Initialize the Number class, returning its prototype object. */ extern JSObject * js_InitNumberClass(JSContext *cx, js::HandleObject obj); /* * String constants for global function names, used in jsapi.c and jsnum.c. */ extern const char js_isNaN_str[]; extern const char js_isFinite_str[]; extern const char js_parseFloat_str[]; extern const char js_parseInt_str[]; class JSString; /* * When base == 10, this function implements ToString() as specified by * ECMA-262-5 section 9.8.1; but note that it handles integers specially for * performance. See also js::NumberToCString(). */ template <js::AllowGC allowGC> extern JSString * js_NumberToString(JSContext *cx, double d); namespace js { template <AllowGC allowGC> extern JSFlatString * Int32ToString(JSContext *cx, int32_t i); /* * Convert an integer or double (contained in the given value) to a string and * append to the given buffer. */ extern bool JS_FASTCALL NumberValueToStringBuffer(JSContext *cx, const Value &v, StringBuffer &sb); /* Same as js_NumberToString, different signature. */ extern JSFlatString * NumberToString(JSContext *cx, double d); extern JSFlatString * IndexToString(JSContext *cx, uint32_t index); /* * Usually a small amount of static storage is enough, but sometimes we need * to dynamically allocate much more. This struct encapsulates that. * Dynamically allocated memory will be freed when the object is destroyed. */ struct ToCStringBuf { /* * The longest possible result that would need to fit in sbuf is * (-0x80000000).toString(2), which has length 33. Longer cases are * possible, but they'll go in dbuf. */ static const size_t sbufSize = 34; char sbuf[sbufSize]; char *dbuf; ToCStringBuf(); ~ToCStringBuf(); }; /* * Convert a number to a C string. When base==10, this function implements * ToString() as specified by ECMA-262-5 section 9.8.1. It handles integral * values cheaply. Return NULL if we ran out of memory. See also * js_NumberToCString(). */ extern char * NumberToCString(JSContext *cx, ToCStringBuf *cbuf, double d, int base = 10); /* * The largest positive integer such that all positive integers less than it * may be precisely represented using the IEEE-754 double-precision format. */ const double DOUBLE_INTEGRAL_PRECISION_LIMIT = uint64_t(1) << 53; /* * Parse a decimal number encoded in |chars|. The decimal number must be * sufficiently small that it will not overflow the integrally-precise range of * the double type -- that is, the number will be smaller than * DOUBLE_INTEGRAL_PRECISION_LIMIT */ extern double ParseDecimalNumber(const JS::TwoByteChars chars); /* * Compute the positive integer of the given base described immediately at the * start of the range [start, end) -- no whitespace-skipping, no magical * leading-"0" octal or leading-"0x" hex behavior, no "+"/"-" parsing, just * reading the digits of the integer. Return the index one past the end of the * digits of the integer in *endp, and return the integer itself in *dp. If * base is 10 or a power of two the returned integer is the closest possible * double; otherwise extremely large integers may be slightly inaccurate. * * If [start, end) does not begin with a number with the specified base, * *dp == 0 and *endp == start upon return. */ extern bool GetPrefixInteger(JSContext *cx, const jschar *start, const jschar *end, int base, const jschar **endp, double *dp); /* ES5 9.3 ToNumber, overwriting *vp with the appropriate number value. */ JS_ALWAYS_INLINE bool ToNumber(JSContext *cx, Value *vp) { #ifdef DEBUG { SkipRoot skip(cx, vp); MaybeCheckStackRoots(cx); } #endif if (vp->isNumber()) return true; double d; extern bool ToNumberSlow(JSContext *cx, js::Value v, double *dp); if (!ToNumberSlow(cx, *vp, &d)) return false; vp->setNumber(d); return true; } JSBool num_parseInt(JSContext *cx, unsigned argc, Value *vp); } /* namespace js */ /* * Similar to strtod except that it replaces overflows with infinities of the * correct sign, and underflows with zeros of the correct sign. Guaranteed to * return the closest double number to the given input in dp. * * Also allows inputs of the form [+|-]Infinity, which produce an infinity of * the appropriate sign. The case of the "Infinity" string must match exactly. * If the string does not contain a number, set *ep to s and return 0.0 in dp. * Return false if out of memory. */ extern JSBool js_strtod(JSContext *cx, const jschar *s, const jschar *send, const jschar **ep, double *dp); extern JSBool js_num_valueOf(JSContext *cx, unsigned argc, js::Value *vp); namespace js { static JS_ALWAYS_INLINE bool ValueFitsInInt32(const Value &v, int32_t *pi) { if (v.isInt32()) { *pi = v.toInt32(); return true; } return v.isDouble() && mozilla::DoubleIsInt32(v.toDouble(), pi); } /* * Returns true if the given value is definitely an index: that is, the value * is a number that's an unsigned 32-bit integer. * * This method prioritizes common-case speed over accuracy in every case. It * can produce false negatives (but not false positives): some values which are * indexes will be reported not to be indexes by this method. Users must * consider this possibility when using this method. */ static JS_ALWAYS_INLINE bool IsDefinitelyIndex(const Value &v, uint32_t *indexp) { if (v.isInt32() && v.toInt32() >= 0) { *indexp = v.toInt32(); return true; } int32_t i; if (v.isDouble() && mozilla::DoubleIsInt32(v.toDouble(), &i) && i >= 0) { *indexp = uint32_t(i); return true; } return false; } /* ES5 9.4 ToInteger. */ static inline bool ToInteger(JSContext *cx, const js::Value &v, double *dp) { #ifdef DEBUG { SkipRoot skip(cx, &v); MaybeCheckStackRoots(cx); } #endif if (v.isInt32()) { *dp = v.toInt32(); return true; } if (v.isDouble()) { *dp = v.toDouble(); } else { extern bool ToNumberSlow(JSContext *cx, Value v, double *dp); if (!ToNumberSlow(cx, v, dp)) return false; } *dp = ToInteger(*dp); return true; } inline bool SafeAdd(int32_t one, int32_t two, int32_t *res) { *res = one + two; int64_t ores = (int64_t)one + (int64_t)two; return ores == (int64_t)*res; } inline bool SafeSub(int32_t one, int32_t two, int32_t *res) { *res = one - two; int64_t ores = (int64_t)one - (int64_t)two; return ores == (int64_t)*res; } inline bool SafeMul(int32_t one, int32_t two, int32_t *res) { *res = one * two; int64_t ores = (int64_t)one * (int64_t)two; return ores == (int64_t)*res; } } /* namespace js */ #endif /* jsnum_h */
// CreateExpect creates a new Conntrack Expect entry. Warning: Experimental, haven't // got this to create an Expect correctly. Best-effort implementation based on kernel source. func (c *Conn) CreateExpect(ex Expect) error { attrs, err := ex.marshal() if err != nil { return err } pf := netfilter.ProtoIPv4 if ex.Tuple.IP.IsIPv6() && ex.Mask.IP.IsIPv6() && ex.TupleMaster.IP.IsIPv6() { pf = netfilter.ProtoIPv6 } req, err := netfilter.MarshalNetlink( netfilter.Header{ SubsystemID: netfilter.NFSubsysCTNetlinkExp, MessageType: netfilter.MessageType(ctExpNew), Family: pf, Flags: netlink.Request | netlink.Acknowledge | netlink.Excl | netlink.Create, }, attrs) if err != nil { return err } _, err = c.conn.Query(req) if err != nil { return err } return nil }
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. // Package cloud9 provides the client and types for making API // requests to AWS Cloud9. // // AWS Cloud9 is a collection of tools that you can use to code, build, run, // test, debug, and release software in the cloud. // // For more information about AWS Cloud9, see the AWS Cloud9 User Guide (https://docs.aws.amazon.com/cloud9/latest/user-guide). // // AWS Cloud9 supports these operations: // // * CreateEnvironmentEC2: Creates an AWS Cloud9 development environment, // launches an Amazon EC2 instance, and then connects from the instance to // the environment. // // * CreateEnvironmentMembership: Adds an environment member to an environment. // // * DeleteEnvironment: Deletes an environment. If an Amazon EC2 instance // is connected to the environment, also terminates the instance. // // * DeleteEnvironmentMembership: Deletes an environment member from an environment. // // * DescribeEnvironmentMemberships: Gets information about environment members // for an environment. // // * DescribeEnvironments: Gets information about environments. // // * DescribeEnvironmentStatus: Gets status information for an environment. // // * ListEnvironments: Gets a list of environment identifiers. // // * UpdateEnvironment: Changes the settings of an existing environment. // // * UpdateEnvironmentMembership: Changes the settings of an existing environment // member for an environment. // // See https://docs.aws.amazon.com/goto/WebAPI/cloud9-2017-09-23 for more information on this service. // // See cloud9 package documentation for more information. // https://docs.aws.amazon.com/sdk-for-go/api/service/cloud9/ // // Using the Client // // To AWS Cloud9 with the SDK use the New function to create // a new service client. With that client you can make API requests to the service. // These clients are safe to use concurrently. // // See the SDK's documentation for more information on how to use the SDK. // https://docs.aws.amazon.com/sdk-for-go/api/ // // See aws.Config documentation for more information on configuring SDK clients. // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // // See the AWS Cloud9 client Cloud9 for more // information on creating client for this service. // https://docs.aws.amazon.com/sdk-for-go/api/service/cloud9/#New package cloud9
/// Try to CSE the users of \p From to the users of \p To. /// The original users of \p To are passed in ToApplyWitnessUsers. /// Returns true on success. static bool tryToCSEOpenExtCall(OpenExistentialAddrInst *From, OpenExistentialAddrInst *To, ApplyWitnessPair ToApplyWitnessUsers, DominanceInfo *DA) { assert(From != To && "Can't replace instruction with itself"); ApplyInst *FromAI = nullptr; ApplyInst *ToAI = nullptr; WitnessMethodInst *FromWMI = nullptr; WitnessMethodInst *ToWMI = nullptr; std::tie(FromAI, FromWMI) = getOpenExistentialUsers(From); std::tie(ToAI, ToWMI) = ToApplyWitnessUsers; if (!FromAI || !ToAI || !FromWMI || !ToWMI) return false; if (FromWMI->getMember() != ToWMI->getMember()) return false; if (!DA->properlyDominates(ToWMI, FromWMI)) return false; SILBuilder Builder(FromAI); assert(FromAI->getArguments().size() == ToAI->getArguments().size() && "Invalid number of arguments"); if (ToAI->getSubstitutions().size() != 1) return false; SmallVector<SILValue, 8> Args; for (auto Op : FromAI->getArguments()) { Args.push_back(Op == From ? To : Op); } auto FnTy = ToAI->getSubstCalleeSILType(); auto ResTy = FnTy.castTo<SILFunctionType>()->getSILResult(); ApplyInst *NAI = Builder.createApply(ToAI->getLoc(), ToWMI, FnTy, ResTy, ToAI->getSubstitutions(), Args, ToAI->isNonThrowing()); FromAI->replaceAllUsesWith(NAI); FromAI->eraseFromParent(); NumOpenExtRemoved++; return true; }
package com.github.ompc.athing.aliyun.thing.tsl.schema; import com.google.gson.annotations.SerializedName; import java.util.LinkedList; import java.util.List; public class TslServiceElement extends TslElement { @SerializedName("callType") private final CallType callType; private final String method; @SerializedName("inputData") private final List<TslData> inputData = new LinkedList<>(); @SerializedName("outputData") private final List<TslData> outputData = new LinkedList<>(); public TslServiceElement(String identifier, CallType callType) { super(identifier); this.callType = callType; this.method = String.format("thing.service.%s", identifier); } @Override public String toString() { return "SERVICE:" + getIdentifier(); } public CallType getCallType() { return callType; } public String getMethod() { return method; } public List<TslData> getInputData() { return inputData; } public List<TslData> getOutputData() { return outputData; } public enum CallType { ASYNC("async"), SYNC("sync"); private final String type; CallType(String type) { this.type = type; } public String getType() { return type; } } }
Selecting Some Variables to Update-Based Algorithm for Solving Optimization Problems With the advancement of science and technology, new complex optimization problems have emerged, and the achievement of optimal solutions has become increasingly important. Many of these problems have features and difficulties such as non-convex, nonlinear, discrete search space, and a non-differentiable objective function. Achieving the optimal solution to such problems has become a major challenge. To address this challenge and provide a solution to deal with the complexities and difficulties of optimization applications, a new stochastic-based optimization algorithm is proposed in this study. Optimization algorithms are a type of stochastic approach for addressing optimization issues that use random scanning of the search space to produce quasi-optimal answers. The Selecting Some Variables to Update-Based Algorithm (SSVUBA) is a new optimization algorithm developed in this study to handle optimization issues in various fields. The suggested algorithm’s key principles are to make better use of the information provided by different members of the population and to adjust the number of variables used to update the algorithm population during the iterations of the algorithm. The theory of the proposed SSVUBA is described, and then its mathematical model is offered for use in solving optimization issues. Fifty-three objective functions, including unimodal, multimodal, and CEC 2017 test functions, are utilized to assess the ability and usefulness of the proposed SSVUBA in addressing optimization issues. SSVUBA’s performance in optimizing real-world applications is evaluated on four engineering design issues. Furthermore, the performance of SSVUBA in optimization was compared to the performance of eight well-known algorithms to further evaluate its quality. The simulation results reveal that the proposed SSVUBA has a significant ability to handle various optimization issues and that it outperforms other competitor algorithms by giving appropriate quasi-optimal solutions that are closer to the global optima. Introduction The act of obtaining the optimal solution from multiple solutions under a given situation is known as optimization . In designed problems in different sciences, items such as cost minimization, profit maximization, shortest length, maximum endurance, best structure, etc., are often raised, which require mathematical modeling of the problem based on the structure of an optimization problem and solving it with appropriate methods. Mathematical methods of optimization are introduced according to the type of problem modeling, such as linear or nonlinear, constrained or non-constrained, continuous or linear programming, or nonlinear programming. Despite their good performance, these methods also have obstacles and disadvantages. These methods generally find the local optimal, especially if the initial guess is close to a local optimal. In addition, each of these methods assumes assumptions about the problem, which may not be true. These assumptions include derivability, convexity, and coherence. In addition to these disadvantages, the computation time of these methods in a group of optimization problems called nondeterministic polynomial-hard increases exponentially as the dimensions of the problem increase . To overcome these challenges, a special class of optimization methods called stochasticbased optimization algorithms were developed. Because these algorithms rely on probabilistic and random search decisions and principles in many search steps of the optimal solution, these algorithms are called stochastic methods . To find the best answer, optimization algorithms rely on a similar technique. The search procedure in most of these algorithms begins by generating a number of random answers within the allowable range of decision variables. This set of solutions in each of the algorithms has names such as population, colony, group, and so on. Moreover, each solution is assigned names such as chromosomes, ants, particles, and so on. The existing answers are then enhanced in various ways in an iterative process, and this action proceeds until the stop condition is achieved . The global optimum is the fundamental answer to an optimization issue. However, optimization algorithms as stochastic methods are not necessarily able to supply the global optimal answer. Hence, the solution obtained from an optimization algorithm for an optimization problem is called quasi-optimal . The criterion of goodness of a quasioptimal solution depends on how close it is to the global optimal. As a result, when comparing the effectiveness of several optimization algorithms in addressing a problem, the method that produces a quasi-optimal solution that is closer to the global ideal optimal is preferable. This issue, as well as the goal to attain better quasi-optimal solutions, has prompted academics to extensive efforts and research to develop a variety of optimization algorithms that can provide solutions that are closer to the global optimal for optimization issues. Stochastic-based optimization algorithms have wide applications in optimization challenges in various sciences such as sensor networks , image processing , data mining , feature selection , clustering , engineering , the internet of things , and so on. Is there still a need to develop new optimization algorithms despite the optimization algorithms that have been established so far? This is a key question that emerges in the research of optimization algorithms. The notion of the No Free Lunch (NFL) theorem has the answer to this question . According to the NFL theorem, an optimization method that is effective in optimizing a group of optimization issues does not ensure that it will be useful in solving other optimization problems. As a result, it is impossible to say that one method is the best optimizer for all optimization problems. The NFL theorem motivates academics to create novel optimization algorithms to tackle optimization issues more efficiently. The authors of this paper have developed several optimization algorithms in their previous works, such as the Pelican Optimization Algorithm (POA) and Teamwork Optimization Algorithm (TOA) . The common denominator of all optimization algorithms (both in the works of the authors of this article and the works of other researchers) can be considered the use of a random scan of the problem search space, random operators, no need for derivation process, easy implementation, simple concepts, and practicality in optimization challenges. The optimization process in population-based optimization algorithms starts with a random initial population. Then, in an iteration-based process, according to the algorithm steps, the position of the algorithm population in the search space is updated until the implementation is completed. The most important difference between optimization algorithms is in the same process of updating members of the algorithm population from one iteration to another. In POA, the algorithm population update process is based on simulating the strategies of pelicans while hunting. In TOA, modeling the activities and interactions of individuals in a group by presenting teamwork to achieve the team goal is the main idea in updating the population. The novelty of this paper is in the development and design of a new optimization method named Selecting Some Variables to Update-Based Algorithm (SSVUBA) to address 1. A new stochastic-based approach called Selecting Some Variables to Update-Based Algorithm (SSVUBA) used in optimization issues is introduced. 2. The fundamental idea behind the proposed method is to change the number of selected variables to update the algorithm population throughout iterations, as well as to use more information from diverse members of the population to prevent the algorithm from relying on one or several specific members. 3. SSVUBA theory and steps are described and its mathematical model is presented. 4. On a set of fifty-three standard objective functions of various unimodal, multimodal types, and CEC 2017, SSVUBA's capacity to optimize is examined. 5. The proposed algorithm is implemented in four engineering design problems to analyze SSVUBA's ability to solve real-world applications, 6. SSVUBA's performance is compared to the performance of eight well-known algorithms to better understand its potential to optimize. The following is the rest of the paper: A study of optimization methods is provided in Section 2. The proposed SSVUBA is introduced in Section 3. Simulation investigations are presented in Section 4. A discussion is provided in Section 5. The performance of SSVUBA in optimizing real-world applications is evaluated in Section 6. Section 7 contains the conclusions and recommendations for future research. Background Optimization algorithms are usually developed based on the simulation of various ideas in nature, physics, genetics and evolution, games, and any type of process that can be modeled as an optimizer. One of the first and most prominent meta-heuristic algorithms is the Genetic Algorithm (GA), which is based on the theory of evolution. The main operator of this algorithm is a crossover that combines different members of the population together. However, the mutation operator is also useful for preventing premature convergence and falling into the local optimal trap. The smart part of this method is the selection stage, which in each stage, transmits better solutions to the next generation . Ant Colony Optimization (ACO) is designed based on the inspiration of ants' group behavior in food discovery. Ants release pheromones along the way to food. The presence of more pheromones in a path indicates the presence of a rich food source near that path. By modeling the process of pheromone release, pheromone tracking, and its evaporation with sunlight, the ACO is completed . Particle Swarm Optimization (PSO) is one of the most established swarm-based algorithms, which is inspired by the social behavior of different biological species in their group life, such as birds and fish. This algorithm mimics the interaction between members to share information. Every particle is affected by its best situation and the best situation of the whole swarm, but it must move randomly . The Simulated Annealing (SA) algorithm is a physics-based stochastic search method for optimization that relies on the simulation of the gradual heating and cooling process of metals called annealing. The purpose of annealing metals is to achieve a minimum energy and a suitable crystalline structure. In SA, this idea has been applied for optimization and search . The Firefly Algorithm (FA) is based on the natural behavior of fireflies that live together in large clusters. FA simulates the activity of a group of fireflies by assigning a value to each firefly's position as a model for the quantity of firefly pigments and then updating the fireflies' location in subsequent iterations. The two main stages of FA in each iteration are the pigment update phase and the motion phase. Fireflies move toward other fireflies with more pigments in their neighborhood. In this way, during successive repetitions, the proposed solutions tend towards a better solution . The Teaching-Learning Based Optimization (TLBO) method is based on simulating a teacher's impact on the output of students in a classroom. TLBO is built on two fundamental modalities of teaching and learning: (1) Teacher phase in which knowledge is exchanged between the teacher and learners and (2) Learner phase Selecting Some Variables to Update-Based Algorithm (SSVUBA) In this section, the theory and all stages of the Selecting Some Variables to Update-Based Algorithm (SSVUBA) are described, and then its mathematical model is presented for application in tackling optimization issues. Mathmatical Model of SSVUBA SSVUBA is a population-based stochastic algorithm. Each optimization issue has a search space with the same number of axes as the problem's variables. According to its position in the search space, each member of the population assigns values to these axes. As a result, each member of the population in the SSVUBA is a proposed solution to the optimization issue. Each member of the population can be mathematically described as a vector, each component of which represents the value of one of the problem variables. As a result, the population members of the proposed SSVUBA can be modeled using a matrix termed the population matrix, as shown in Equation (1). where X is the SSVUBA's population matrix, X i is the ith member, x i,d is the value of the dth problem variable generated by the ith member, N is the number of population members, and m is the number of problem variables. The objective function of the problem can be assessed using the theory that each member of the population provides values for the problem variables. As a result, the values derived for the objective function based on the evaluation of different members of the population can be described employing a vector according to Equation (2). where F denotes the objective function vector and F i represents the objective function value obtained from the ith population member's evaluation. The process of updating population members in the proposed SSVUBA adheres to two principles. The first principle is that some members of the population may be in a situation where if only the values of some variables change, they will be in a better position instead of changing all of the variables. Therefore, in the proposed SSVUBA, the number of variables selected for the update process is set in each iteration. In this way, in the initial repetitions, the number is set to the maximum and at the end of the repetitions to the minimum number of variables. This principle is mathematically simulated using an index based on Equation (3). where I v denotes the number of selected variables for the update process, T is the maximum number of iterations, and t is the repetition counter. The second principle is to prevent the algorithm population update process from relying on specific members. Relying on algorithm updates to specific members of the population might cause the algorithm to converge towards the local optimum and prevent accurate scanning of the search space to attain the global optimum. The process of updating population members has been modeled using Equations (4)- (6) according to the two principles expressed. To update each member of the population, another member of the population is randomly selected. If the selected member has a better value for the objective function, the first formula in Equation (4) is used. Otherwise, the second formula is used. where X new i , i = 1, 2, . . . , N, is the new status of the ith member, x new i,k j , j = 1, 2, . . . , I v , k j is a random element from the set {1, 2, . . . , m} is the k j th dimension of the ith member, F new i is the objective function value of the ith population member in new status, r is a random number in interval , x s,k j is the selected member for guiding the ith member in the k j th dimension, and F s is the its objective function value. Repetition Process of SSVUBA After all members of the population have been updated, the SSVUBA algorithm goes on to the next iteration. In the new iteration, index I v is adjusted using Equation (3), and then population members are updated based on Equations (4)- (6). This process repeats until the algorithm is completed. The best quasi-optimal solution found by the algorithm during execution is offered as the answer to the problem after the complete implementation of SSVUBA for the specified optimization problem. Figure 1 depicts the flowchart of the SSVUBA's various steps, while Algorithm 1 presents its pseudocode. Computational Complexity of SSVUBA In this subsection, the computational complexity of SSVUBA is presented. In this regard, time and space complexities are discussed. Time Complexity SSVUBA preparation and initialization require O(N·m) time where N is the number of SVVUBA population members and m is the number of problem variables. In each iteration of the algorithm, population members are updated, which requires O(T·N·I v ) time where T is the maximum number of iteations and I v is the number of selected variables for the update process. Accordingly, the total time computational complexity of SSVUBA is equal to O(N(m + T·I v )). Computational Complexity of SSVUBA In this subsection, the computational complexity of SSVUBA is presented. In this regard, time and space complexities are discussed. Space Complexity The space complexity of SSVUBA is equal to O(N·m), which is considered the maximum value of space pending its initialization procedure. Input the optimization problem information: Decision variables, constraints, and objective function 2. Set the T and N parameters. Adjust number of selected variables to update (I v ) using Equation (3). For j = 1: Select a population member randomly to guide the ith population member. X S ← X(S, :), S randomly selected f rom {1, 2, . . . , N} and S = i , is the Sth row of the population matrix. 8. Select one of the variables at random to update. x i,k j , k j randomly selected f rom {1, 2, . . . , m}. Calculate the new status of the k j th dimension using Equation (4). Calculate the new status of the k j th dimension using Equation (4). Calculate the objective function based on X new Update the ith population member using Equation (6). Update the ith population member using Equation (6). Save the best solution so far. 24. end 25. Output the best obtained solution. End SSVUBA. Visualization of the Movement of Population Members towards the Solution In the SSVUBA approach, population members converge to the optimal area and solution in the search space under the exchange of information between each other and the algorithm steps. In this subsection, to provide the visualization of the members' movement in the search space, the process of SSVUBA members' access to the solution is intuitively shown. This visualization is presented in a two-dimensional space, with a population size equals 30 and 30 iterations in optimizing an objective function called the Sphere function; its mathematical model is as follows: Subject to: − 10 ≤ x 1 , x 2 ≤ 10 Figure 2 shows the process of achieving SSVUBA towards the solution by optimizing the mentioned objective function. In this figure, the convergence of the population members towards the optimal solution of the variables (i.e., x 1 = x 2 = 0) and the optimal value of the objective function (i.e., F(x 1 , x 2 ) = 0) is well evident. Simulation Studies and Results In this section, simulation studies are presented to evaluate the performance of the SSVUBA in optimization and provide appropriate solutions for optimization problems. For this purpose, the SSVUBA is utilized for twenty-three standard objective functions of unimodal, high-dimensional multimodal, and fixed-dimensional multimodal types (see their definitions in Appendix A). In addition to the twenty-three objective functions, SSVUBA performance has been tested in optimizing CEC 2017 test functions (see their definitions in Appendix A). Furthermore, the optimization results achieved for the above objective functions using SSVUBA are compared to the performance of twelve optimization methods: PSO, TLBO, GWO, WOA, MPA, TSA, GSA, GA, RFO, RSA, AHA, and HBA to assess the further proposed approach. Numerous optimization algorithms have been developed so far. Comparing an algorithm with all existing algorithms, although possible, will yield a large amount of results. Therefore, twelve optimization algorithms have been used to compare the results. The reasons for choosing these algorithms are as follows: (i) Popular and widely used algorithms: GA and PSO. (ii) Algorithms that have been widely cited and employed in a variety of applications: GSA, TLBO, GWO, WOA. (iii) Algorithms that have been published recently and have received a lot of attention: RFO, TSA, MPA, RSA, AHA, HBA. The average of the best obtained solutions (avg), the standard deviation of the best obtained solutions (std), the best obtained candidate solution (bsf), and the median of obtained solutions (med) are used to present the optimization outcomes of objective functions. Table 1 shows the values utilized for the control parameters of the compared optimization techniques. Simulation Studies and Results In this section, simulation studies are presented to evaluate the performance of the SSVUBA in optimization and provide appropriate solutions for optimization problems. For this purpose, the SSVUBA is utilized for twenty-three standard objective functions of unimodal, high-dimensional multimodal, and fixed-dimensional multimodal types (see their definitions in Appendix A). In addition to the twenty-three objective functions, SSVUBA performance has been tested in optimizing CEC 2017 test functions (see their definitions in Appendix A). Furthermore, the optimization results achieved for the above objective functions using SSVUBA are compared to the performance of twelve optimization methods: PSO, TLBO, GWO, WOA, MPA, TSA, GSA, GA, RFO, RSA, AHA, and HBA to assess the further proposed approach. Numerous optimization algorithms have been developed so far. Comparing an algorithm with all existing algorithms, although possible, will yield a large amount of results. Therefore, twelve optimization algorithms have been used to compare the results. The reasons for choosing these algorithms are as follows: Table 1 shows the values utilized for the control parameters of the compared optimization techniques. Assessment of F1 to F7 Unimodal Functions Unimodal functions are the first category of objective functions that are considered for analyzing the performance of optimization methods. The optimization results of unimodal objective functions including F1 to F7 using SSVUBA and eight compared algorithms are reported in Table 2. The SSVUBA has been able to find the global optimal for the F6 function. Further, SSVUBA is the first best optimizer for the F1 to F5 and F7 functions. Analysis of the performance of optimization algorithms against the results of the proposed approach indicates that SSVUBA is able to provide quasi-optimal solutions closer to the global optimum and thus has a higher capability in optimizing unimodal functions than the compared algorithms. Assessment of F8 to F13 High-Dimensional Multimodal Functions High-dimensional multimodal functions are the second type of objective function employed to assess the performance of optimization techniques. Table 3 reveals the results of the implementation of the SSVUBA and eight compared algorithms for functions F8 to F13. For the F9 and F11 functions, SSVUBA was able to deliver the best global solution. Further-more, for the F8, F10, F12, and F13 functions, SSVUBA was the superior optimizer. SSVUBA outperformed the other algorithms in solving high-dimensional multimodal issues by offering effective solutions for the F8 to F13 functions, according to the simulation findings. Assessment of F14 to F23 Fixed-Dimensional Multimodal Functions Fixed-dimensional functions are the third type of objective function used to evaluate the efficiency of optimization techniques. Table 4 shows the optimization results for the F14 to F23 functions utilizing the SSVUBA and eight compared techniques. SSVUBA was able to deliver the global optimum for the F14 function. The SSVUBA was also the first best optimizer for the F15, F16, F21, and F22 functions. SSVUBA, in optimizing functions F17, F18, F19, F20, and F23, was able to converge to quasi-optimal solutions with smaller values of the standard deviation. By comparing the performance of optimization algorithms in solving the F14 to F23 functions, it is clear that SSVUBA provides superior and competitive results versus the compared algorithms. Figure 3 shows the performance of SSVUBA as well as eight competitor algorithms in the form of a boxplot. Statistical Analysis Use of the average of the obtained solutions, standard deviation, best candidate solution, and median of obtained solutions to analyze and compare the performance of optimization algorithms in solving optimization issues offers significant information about Statistical Analysis Use of the average of the obtained solutions, standard deviation, best candidate solution, and median of obtained solutions to analyze and compare the performance of optimization algorithms in solving optimization issues offers significant information about the quality and capabilities of optimization algorithms. However, it is possible that the superiority of one algorithm among several algorithms in solving optimization problems is random by even a low probability. Therefore, in this subsection, in order to statistically analyze the superiority of SSVUBA, the Wilcoxon sum rank test is used. The Wilcoxon rank sum test is a nonparametric test to assess whether the distributions of results obtained between two separate methods for a dependent variable are systematically different from one another. The Wilcoxon rank sum test was implemented for the optimization results obtained from the optimization algorithms. The results of this analysis are presented in Table 5. In the Wilcoxon rank sum test, a p-value indicates whether the superiority of one algorithm over another is significant. Therefore, the proposed SSVUBA in cases where the p-value is less than 5% has a statistically significant performance superior to the compared algorithm. Sensitivity Analysis The proposed SSVUBA is a population-based algorithm that is able to solve optimization problems in an iteration-based procedure. Therefore, the two parameters N and T affect the performance of SSVUBA in achieving the solution. As a result, the sensitivity analysis of the proposed SSVUBA to these two parameters is described in this subsection. SSVUBA has been applied to F1 to F23 functions in independent runs for different populations with 20, 30, 50, and 80 members to investigate the sensitivity of the proposed SSVUBA performance to the N parameter. Table 6 reveals the findings of SSVUBA's sensitivity analysis to N. In addition, the convergence curves of the proposed SSVUBA to attain a quasi-optimal solution for different populations are plotted in Figure 4. The sensitivity analysis of the SSVUBA to the number of population members show that increasing the search agents in the search space leads to more accurate scanning of the search space and achieving more appropriate optimal solutions. The proposed approach is implemented in independent performances for the number of iterations 100, 500, 800, and 1000 in order to optimize the objective functions F1 to F23 with aim of the investigating the sensitivity of the performance of SSVUBA to parameter T. Table 7 shows the simulated results of this sensitivity study, and Figure 5 shows the convergence curves of the SSVUBA under the influence of this analysis. The results of the simulation and sensitivity analysis of the proposed algorithm to the parameter T illustrate that increasing the number of iterations of the algorithm provides more opportunity for the algorithm to converge towards optimal solutions. As a result, as the maximum number of iterations increases and the values of the objective functions decrease. SSVUBA has been applied to F1 to F23 functions in independent runs for different populations with 20, 30, 50, and 80 members to investigate the sensitivity of the proposed SSVUBA performance to the N parameter. Table 6 reveals the findings of SSVUBA's sensitivity analysis to N. In addition, the convergence curves of the proposed SSVUBA to attain a quasi-optimal solution for different populations are plotted in Figure 4. The sensitivity analysis of the SSVUBA to the number of population members show that increasing the search agents in the search space leads to more accurate scanning of the search space and achieving more appropriate optimal solutions. The proposed approach is implemented in independent performances for the number of iterations 100, 500, 800, and 1000 in order to optimize the objective functions F1 to F23 with aim of the investigating the sensitivity of the performance of SSVUBA to parameter T. Table 7 shows the simulated results of this sensitivity study, and Figure 5 shows the convergence curves of the SSVUBA under the influence of this analysis. The results of the simulation and sensitivity analysis of the proposed algorithm to the parameter T illustrate that increasing the number of iterations of the algorithm provides more opportunity for the algorithm to converge towards optimal solutions. As a result, as the maximum number of iterations increases and the values of the objective functions decrease. In addition to studying the analysis of SSVUBA sensitivity to the and parameters, each of the relationships used in Equation (4) also affects the performance of SSVUBA. Therefore, the effectiveness of all cases in Equation (4) is examined at this stage. In this regard, the proposed SSVUBA is implemented in three different modes for the objective functions F1 to F23. In the first case (mode 1), the first case of Equation (4) Table 8, and Figure 6 shows the SSVUBA convergence curves in the optimization of functions F1 to F23 in this study. What can be deduced from the simulation results is that applying the relationships in Equation (4) simultaneously has led to better and more efficient optimization results for the objective functions F1 to F23 compared to using each of the relationships separately. In addition to studying the analysis of SSVUBA sensitivity to the N and T parameters, each of the relationships used in Equation (4) also affects the performance of SSVUBA. Therefore, the effectiveness of all cases in Equation (4) is examined at this stage. In this regard, the proposed SSVUBA is implemented in three different modes for the objective functions F1 to F23. In the first case (mode 1), the first case of Equation (4), i.e., x i,k j + r· x s,k j − I·x i,k j is used. In the second case (mode 2), the second case of Equation (4), i.e., x i,k j + r· x i,k j − I·x s,k j is used. In the third case (mode 3), both cases introduced in Equation (4) are used simultaneously. The results of this analysis are shown in Table 8, and Figure 6 shows the SSVUBA convergence curves in the optimization of functions F1 to F23 in this study. What can be deduced from the simulation results is that applying the relationships in Equation (4) simultaneously has led to better and more efficient optimization results for the objective functions F1 to F23 compared to using each of the relationships separately. Population Diversity Analysis Population diversity has a significant impact on the success of the optimization process by optimization algorithms. Population diversity can improve the algorithm's ability to search globally in the problem-solving space, thus preventing it from falling into the trap of local optimal solutions. In this regard, in this subsection, population diversity analysis of SSVUBA performance has been studied. To show the population diversity of SSVUBA in achieving the solution during the iterations of the algorithm, the index is used, which is calculated using Equations (7) and (8) . Here, is the spreading of each population member from its centroid and is its centroid. Population Diversity Analysis Population diversity has a significant impact on the success of the optimization process by optimization algorithms. Population diversity can improve the algorithm's ability to search globally in the problem-solving space, thus preventing it from falling into the trap of local optimal solutions. In this regard, in this subsection, population diversity analysis of SSVUBA performance has been studied. To show the population diversity of SSVUBA in achieving the solution during the iterations of the algorithm, the I C index is used, which is calculated using Equations (7) and (8) . Here, I C is the spreading of each population member from its centroid and c j is its centroid. The impact of population diversity on the optimization process given by SSVUBA in optimizing functions F1 to F23 is shown in Figure 7. In this figure, population diversity and SSVUBA convergence curves are presented for each of the objective functions. As can be seen from the simulation results, SSVUBA has a high population diversity in the process of optimizing most of the target functions. By optimizing functions F1, F2, F3, F4, F7, F8, F9, F12, F13, F15, F16, F17, F18, F19, F20, F21, F22, and F23, it is evident that until the final iterations, the algorithm convergence process as well as population diversity continues. In handling the F5 function, it is evident that the convergence process continues until the final iteration. In the optimization of function F6, SSVUBA with high search power reached the global optimization, and then the population diversity decreased. In the optimization of function F10, the population diversity decreased while the algorithm achieved an acceptable solution. In solving function F11, the population diversity decreased, while SSVUBA converged to the best solution, the global optima. In optimizing function F14, the population diversity decreased after the algorithm converged to the optimal solution. Therefore, the results of population diversity analysis indicate the high ability of SSVUBA in maintaining population diversity, which has led to its effective performance in providing appropriate solutions for objective functions. The impact of population diversity on the optimization process given by SSVUBA in optimizing functions F1 to F23 is shown in Figure 7. In this figure, population diversity and SSVUBA convergence curves are presented for each of the objective functions. As can be seen from the simulation results, SSVUBA has a high population diversity in the process of optimizing most of the target functions. By optimizing functions F1, F2, F3, F4, F7, F8, F9, F12, F13, F15, F16, F17, F18, F19, F20, F21, F22, and F23, it is evident that until the final iterations, the algorithm convergence process as well as population diversity continues. In handling the F5 function, it is evident that the convergence process continues until the final iteration. In the optimization of function F6, SSVUBA with high search power reached the global optimization, and then the population diversity decreased. In the optimization of function F10, the population diversity decreased while the algorithm achieved an acceptable solution. In solving function F11, the population diversity decreased, while SSVUBA converged to the best solution, the global optima. In optimizing function F14, the population diversity decreased after the algorithm converged to the optimal solution. Therefore, the results of population diversity analysis indicate the high ability of SSVUBA in maintaining population diversity, which has led to its effective performance in providing appropriate solutions for objective functions. Evaluation of the CEC 2017 Test Functions In this subsection, the performance of SSVUBA in addressing the CEC 2017 benchmark is examined. The CEC 2017 set includes three unimodal functions (C1 to C3), seven simple multimodal functions (C4 to C10), ten hybrid functions (C11 to C20), and ten composition functions (C21 to C30). The results obtained from the implementation of SSVUBA and competitor algorithms for these functions are shown in Table 9. What can be deduced from the simulation results is that SSVUBA performed better than competitor algorithms in handling the C1, C2, C4, C5, C11, C12, C13, C14, C15, C16, C17, C18, C19, C20, C21, C24, C26, C27, C29, and C30 functions. Evaluation of the CEC 2017 Test Functions In this subsection, the performance of SSVUBA in addressing the CEC 2017 benchmark is examined. The CEC 2017 set includes three unimodal functions (C1 to C3), seven simple multimodal functions (C4 to C10), ten hybrid functions (C11 to C20), and ten composition functions (C21 to C30). The results obtained from the implementation of SSVUBA and competitor algorithms for these functions are shown in Table 9. What can be deduced from the simulation results is that SSVUBA performed better than competitor algorithms in handling the C1, C2, C4, C5, C11, C12, C13, C14, C15, C16, C17, C18, C19, C20, C21, C24, C26, C27, C29, and C30 functions. Discussion Two essential factors that influence the performance of optimization algorithms are the exploitation and exploration capabilities. To give an acceptable solution to an optimization issue, each optimization algorithm must strike a reasonable balance between these two requirements. In the study of optimization algorithms, the idea of exploitation refers to the algorithm's capacity to search locally. In reality, after reaching the optimal area in the optimization problem's search space, an optimization algorithm should be able to converge as much as feasible to the global optimal. As a result, when comparing the performance of several algorithms in solving an optimization issue, an algorithm that provides a solution that is closer to the global optimal has a better exploitation capability. The exploitation ability of an algorithm is essential, especially when solving problems that have only one basic solution. The objective functions F1 to F7, which are unimodal functions, have the property that they lack local optimal solutions and have only one main solution. As a result, functions F1 to F7 are good candidates for testing the exploitation ability of optimization techniques. The optimization results of the unimodal objective functions reported in Table 2 show that the proposed SSVUBA has a higher capability in local search than the compared algorithms and with high exploitation power, is able to deliver solutions very close to the global optimal. In the study of optimization algorithms, the idea of exploration refers to the algorithm's capacity to search globally. In reality, to find the optimal area, an optimization algorithm should be able to correctly scan diverse portions of the search space. Exploration power enables the algorithm to pass through all optimal local areas and avoid becoming trapped in a local optimum. As a result, when comparing the potential of various optimization algorithms to handle an optimization issue, an algorithm that can appropriately check the problem search space to distance itself from all local optimal solutions and move towards the global optimal solution has a higher exploration ability. The exploration ability of an algorithm is of particular importance, especially when solving issues with several optimal local solutions in addition to the original solution. The objective functions F8 to F23, which are multimodal functions, have this feature. As a result, these functions are good candidates for testing the exploration ability in optimization algorithms. The examination of the results of optimization of multimodal functions, provided in Tables 3 and 4, shows that the SSVUBA has a superior ability in global search and is capable of passing through the local optimum areas due to its high exploration power. Although exploitation and exploration affect the performance of optimization algorithms, each alone is not enough for the algorithm to succeed in optimization. Therefore, there is a need for a balance between these two indicators for an algorithm to be able to handle optimization problems. The simulation results show that SSVUBA has a high potential for balancing exploration and exploitation. The superiority of SSVUBA in the management of optimization applications with statistical criteria and ranking compared to competitor algorithms is evident. However, statistical analysis of the Wilcoxon rank sum test shows that this superiority is also statistically significant. SSVUBA sensitivity analysis to parameters N and T shows that the performance of the proposed algorithm under the influence of changes in these two parameters provides different results. This is because the algorithm must have sufficient power to scan the search space whose tool is search agents (population members, i.e., N), as well as a sufficient opportunity (i.e., T) to identify the optimal area and converge towards the global optima. Thus, as expected, increasing the T and N values improved the SSVUBA performance and decreased the target function values. To further analyze the performance of SSVUBA in optimization applications, this proposed method, along with competitor algorithms, was implemented on the CEC 2017 test suite. The simulation results in this type of optimization challenge indicate the successful performance of SSVUBA in addressing this type of optimization problem. Comparing SSVUBA with competing algorithms, it was found that SSVUBA ranked first in most cases and was more efficient than the compared algorithms. SSVUBA for Engineering Design Applications In order to analyze the efficiency of SSVUBA in real world purposes, this optimizer has been employed to address four engineering problems: pressure vessel design, speed reducer design, welded beam design, and tension/compression spring design. Pressure Vessel Design Problem Pressure vessel design is an engineering challenge in which the design purpose is minimizing the total cost (material, forming, and welding) of the cylindrical pressure vessel . The schematic of this issue is shown in Figure 8. This problem's mathematical model is as follows: Consider: The implementation results of SSVUBA and eight competitor algorithms in achieving the optimal design for pressure vessel are reported in Table 10. SSVUBA presents the optimal solution with the values of the variables equal to (0.7789938, 0.3850896, 40.3607, 199.3274) and the value of the objective function (5884.8824). The statistical results of SSVUBA performance against eight competitor algorithms in optimizing the pressure vessel problem are presented in Table 11. What can be seen from the statistical results is that SSVUBA has a superior performance over the compared algorithms by providing better values in statistical indicators. The behavior of the SSVUBA convergence curve during achieving the optimal solution for pressure vessel design is presented in Figure 9. The implementation results of SSVUBA and eight competitor algorithms in achieving the optimal design for pressure vessel are reported in Table 10. SSVUBA presents the optimal solution with the values of the variables equal to (0.7789938, 0.3850896, 40.3607, 199.3274) and the value of the objective function (5884.8824). The statistical results of SSVUBA performance against eight competitor algorithms in optimizing the pressure vessel problem are presented in Table 11. What can be seen from the statistical results is that SSVUBA has a superior performance over the compared algorithms by providing better values in statistical indicators. The behavior of the SSVUBA convergence curve during achieving the optimal solution for pressure vessel design is presented in Figure 9. Speed Reducer Design Problem Speed reducer design is a minimization challenge whose main goal in optimal design is to reduce the weight of the speed reducer, which is depicted schematically in Figure 10. . This problem's mathematical model is as follows: Consider Speed Reducer Design Problem Speed reducer design is a minimization challenge whose main goal in optimal design is to reduce the weight of the speed reducer, which is depicted schematically in Figure 10 . This problem's mathematical model is as follows: The results obtained from SSVUBA and eight competing algorithms in optimizing the speed reducer design are presented in Table 12. Based on the simulation results, it is obvious that SSVUBA has provided the optimal design of this problem for the values of the variables equal to (3.50003, 0.700007, 17, 7.3, 7.8, 3.35021, 5.28668) and the value of the objective function equal to (2996.3904). The statistical results of the SSVUBA performance as well as competitor algorithms in optimizing the speed reducer problem are reported in Table 13. Statistical results show the superiority of SSVUBA over competitor algorithms. The SSVUBA convergence curve when solving the speed reducer design is shown in Figure 11. Consider: 2.9 ≤ x 6 ≤ 3.9, and 5 ≤ x 7 ≤ 5.5 . The results obtained from SSVUBA and eight competing algorithms in optimizing the speed reducer design are presented in Table 12. Based on the simulation results, it is obvious that SSVUBA has provided the optimal design of this problem for the values of the variables equal to (3.50003, 0.700007, 17, 7.3, 7.8, 3.35021, 5.28668) and the value of the objective function equal to (2996.3904). The statistical results of the SSVUBA performance as well as competitor algorithms in optimizing the speed reducer problem are reported in Table 13. Statistical results show the superiority of SSVUBA over competitor algorithms. The SSVUBA convergence curve when solving the speed reducer design is shown in Figure 11. Welded Beam Design Welded beam design is an engineering topic with the main goal of minimizing the fabrication cost of the welded beam, a schematic of which is shown in Figure 12 . This problem's mathematical model is as follows: Consider Welded Beam Design Welded beam design is an engineering topic with the main goal of minimizing the fabrication cost of the welded beam, a schematic of which is shown in Figure 12 . This problem's mathematical model is as follows: The optimization results for the welded beam design are reported in Table 14. Analysis of the simulation results shows that SSVUBA has provided the optimal design for the welded beam with the values of the variables equal to (0.205730, 3.4705162, 9.0366314, 0.2057314) and the value of the objective function equal to (1.724852). The statistical results obtained from the implementation of SSVUBA and eight competitor algorithms on this design are presented in Table 15. Analysis of the results of this table shows that SSVUBA with better values in statistical indicators provides superior performance in solving the welded beam design against competitor algorithms. The SSVUBA convergence curve for the optimal solution of the welded beam design problem is shown in Figure 13. welded beam design against competitor algorithms. The SSVUBA convergence curve for the optimal solution of the welded beam design problem is shown in Figure 13. Tension/Compression Spring Design Problem Tension/compression spring design is an engineering challenge aimed at reducing the weight of the tension/compression spring, a schematic of which is shown in Figure 14 Subject to: Tension/Compression Spring Design Problem Tension/compression spring design is an engineering challenge aimed at reducing the weight of the tension/compression spring, a schematic of which is shown in Figure 14 . This problem's mathematical model is as follows: The results for the tension/compression spring design variables using SSVUBA and compared methods are provided in Table 16. The simulation results reveal that SSVUBA provides the optimal solution with the values of the variables equal to (0.051704, 0.357077, 11.26939) and the value of the objective function equal to (0.012665). The statistical results of implementation of SSVUBA and compared algorithms for the tension/compression spring problem are presented in Table 17. The observations indicate the superiority of SSVUBA performance due to the provision of better values of statistical indicators compared to competitor algorithms. The SSVUBA convergence curve when achieving the optimal solution to the tension/compression spring problem is shown in Figure 15. Consider: . Minimize: f (x) = (x 3 + 2)x 2 x 2 1 . Subject to: The results for the tension/compression spring design variables using SSVUBA and compared methods are provided in Table 16. The simulation results reveal that SSVUBA provides the optimal solution with the values of the variables equal to (0.051704, 0.357077, 11.26939) and the value of the objective function equal to (0.012665). The statistical results of implementation of SSVUBA and compared algorithms for the tension/compression spring problem are presented in Table 17. The observations indicate the superiority of SSVUBA performance due to the provision of better values of statistical indicators compared to competitor algorithms. The SSVUBA convergence curve when achieving the optimal solution to the tension/compression spring problem is shown in Figure 15. The results for the tension/compression spring design variables using SSVUBA and compared methods are provided in Table 16. The simulation results reveal that SSVUBA provides the optimal solution with the values of the variables equal to (0.051704, 0.357077, 11.26939) and the value of the objective function equal to (0.012665). The statistical results of implementation of SSVUBA and compared algorithms for the tension/compression spring problem are presented in Table 17. The observations indicate the superiority of SSVUBA performance due to the provision of better values of statistical indicators compared to competitor algorithms. The SSVUBA convergence curve when achieving the optimal solution to the tension/compression spring problem is shown in Figure 15. The SSVUBA's Applicability in Sensor Networks and Image Processing Many complex problems in the field of image processing are the focus of extensive research to find efficient methods. In this subject, local search approaches are commonly utilized for solving difficult problems. However, many issues and research in image processing are combinatorial and NP-hard. As optimization algorithms are populationbased stochastic approaches, they are generally better suited to solving these complicated challenges. As a result, optimization algorithms such as proposed SSVUBA can prevent becoming stuck in the local optimum and can frequently locate the global optimal solution. Recent advancements have resulted in an increased use of artificial intelligence approaches for image processing. Today, wireless sensor networks are one of the most popular wireless networks due to their various applications. These networks consist of a set of automated sensors to monitor physical or environmental conditions such as heat, sound, vibration, pressure, motion, or pollution. As a result, sensor networks are faced with a huge amount of valuable information. In this type of application, data analysis using classical methods is not very efficient and appropriate. Because of this, artificial intelligence approaches, such as the employment of the proposed SSVUBA for various applications in image processing and sensor networks, have become significant. The proposed SSVUBA approach is effective for topics such as energy optimization in sensor networks, sensor network placement, network coding (NC) in wireless sensor networks, sensor network coverage optimization, clustering in sensor networks, medical image processing, pattern recognition, video processing, and so on. Conclusions and Future Works Numerous optimization issues have been defined in various disciplines of science that must be addressed by employing proper approaches. One of the most successful and extensively used approaches for tackling such issues is optimization algorithms, which belong to the category of random methods. To handle different optimization challenges, a novel optimization technique named "Selecting Some Variables to Update-Based Algorithm" (SSVUBA) was developed in this study. Making more use of the information of different members of the population and adjusting the number of selected variables in order to update the population of the algorithm during successive iterations of the algorithm were the main ideas in the design of the proposed SSVUBA. The ability of SSVUBA to solve optimization problems was tested on fifty-three different objective functions. The results of optimization of unimodal functions indicated the strong ability of the proposed algorithm in the exploitation index and the presentation of solutions very close to the global optimal. The optimization results of multi-model functions showed that the SSVUBA with high capability in the exploration index is able to scan the search space of the problem and accurately and converge to the global optimal by passing local optimal areas. Further, in order to analyze the optimization results obtained from SSVUBA, these results were compared with the performance of eight well-known algorithms: PSO, TLBO, GWO, WOA, MPA, TSA, GSA, GA, RFO, RSA, AHA, and HBA. What is clear from the analysis of simulation results is that the SSVUBA has a strong ability to solve optimization problems by providing appropriate quasi-optimal solutions, and its performance is superior and more competitive than that of similar algorithms. In order to further analyze SSVUBA in optimization, the proposed algorithm was employed to optimize four engineering design challenges. The optimization results indicated the effective performance of SSVUBA in real-world applications and the provision of optimal values for design variables. The authors provide various recommendations for future research, including the development of multi-objective and binary SSVUBA versions. Other proposals for future investigations of this work include using the proposed SSVUBA to solve optimization issues in many fields as well as real-world applications. The proposed SSVUBA approach opens up a wide range of future studies. These studies include the SSVUBA employment in wireless sensor networks, image processing, machine learning, signal denoising, artificial intelligence, engineering, feature selection, big data, data mining, and other optimization chalenges. As with all stochastic approaches for optimization problems, the limitation of the proposed SSVUBA is that it offers no guarantee that the solutions provided by it will be the global optimal. Another limitation of any random approach, including SSVUBA, is that it is always possible for researchers to develop new algorithms that can provide more effective solutions to optimization issues. Moreover, according to the NFL theorem, another limitation of SSVUBA is that its strong performance in solving a group of optimization applications leaves no reason to offer the same performance in all other optimization applications. Conflicts of Interest: The authors declare no conflict of interest. Appendix A The information of the objective functions utilized in the simulation section is shown in Tables A1-A3. 5,100,4) 30 0
Alteration of MDM2 by the Small Molecule YF438 Exerts Antitumor Effects in Triple-Negative Breast Cancer This study uncovers the essential role of MDM2 in TNBC progression and suggests that targeting the HDAC1–MDM2–MDMX axis with a hydroxamate-based HDACi could be a promising therapeutic strategy for TNBC. Triple-negative breast cancer (TNBC) exhibits a high mortality rate and is the most aggressive subtype of breast cancer. As previous studies have shown that histone deacetylases (HDAC) may represent molecular targets for TNBC treatment, we screened a small library of synthetic molecules and identified a potent HDAC inhibitor (HDACi), YF438, which exerts effective anti-TNBC activity both in vitro and in vivo. Proteomic and biochemical studies revealed that YF438 significantly downregulated mouse double minute 2 homolog (MDM2) expression. In parallel, loss of MDM2 expression or blocking MDM2 E3 ligase activity rendered TNBC cells less sensitive to YF438 treatment, revealing an essential role of MDM2 E3 ligase activity in YF438-induced inhibition of TNBC. Mechanistically, YF438 disturbed the interaction between HDAC1 and MDM2, induced the dissociation of MDM2-MDMX, and subsequently increased MDM2 self-ubiquitination to accelerate its degradation, which ultimately inhibited growth and metastasis of TNBC cells. In addition, analysis of clinical tissue samples demonstrated high expression levels of MDM2 in TNBC, and MDM2 protein levels closely correlated with TNBC progression and metastasis. Collectively, these findings show that MDM2 plays an essential role in TNBC progression and targeting the HDAC1–MDM2–MDMX signaling axis with YF438 may provide a promising therapeutic option for TNBC. Furthermore, this novel underlying mechanism of a hydroxamate-based HDACi in altering MDM2 highlights the need for further development of HDACi for TNBC treatment. Significance: This study uncovers the essential role of MDM2 in TNBC progression and suggests that targeting the HDAC1–MDM2–MDMX axis with a hydroxamate-based HDACi could be a promising therapeutic strategy for TNBC.
Default Pathway of var2csa Switching and Translational Repression in Plasmodium falciparum Antigenic variation is a subtle process of fundamental importance to the survival of a microbial pathogen. In Plasmodium falciparum malaria, PfEMP1 is the major variable antigen and adhesin expressed at the surface of the infected erythrocyte, which is encoded for by members of a family of 60 var-genes. Peri-nuclear repositioning and epigenetic mechanisms control their mono-allelic expression. The switching of PfEMP1 depends in part on variable transition rates and short-lived immune responses to shared minor epitopes. Here we show var-genes to switch to a common gene that is highly transcribed, but sparsely translated into PfEMP1 and not expressed at the erythrocyte surface. Highly clonal and adhesive P. falciparum, which expressed distinct var-genes and the corresponding PfEMP1s at onset, were propagated without enrichment or panning. The parasites successively and spontaneously switched to transcribe a shared var-gene (var2csa) matched by the loss of PfEMP1 surface expression and host cell-binding. The var2csa gene repositioned in the peri-nuclear area upon activation, away from the telomeric clusters and heterochromatin to transcribe spliced, full-length RNA. Despite abundant transcripts, the level of intracellular PfEMP1 was low suggesting post-transcriptional mechanisms to partake in protein expression. In vivo, off-switching and translational repression may constitute one pathway, among others, coordinating PfEMP1 expression. Introduction Pathogens constrained to survive within a mammalian host are under evolutionary pressure to acquire mechanisms that favor a chronic infection. Asexual Plasmodium falciparum endure by means of antigenic variation. Manifestations of this success are the recrudescence of parasites, the occurrence of super-infections, the establishment of chronic asymptomatic infections, and the paucity of sterile immunity. The maintenance of antigen expression is coordinated by the spleen, given that parasites of splenectomized human and animal hosts do not sequester and do not express PfEMP1 on the infected erythrocyte surface . How P. falciparum manages to coordinate the expression of its variable antigens to subsist in the host is intriguing and in part unexplained. Immune evasion of Plasmodium falciparum infected erythrocytes (IE) is predominately mediated by the antigenically variable and highly polymorphic cell surface antigen Plasmodium falciparum erythrocyte membrane protein 1 (PfEMP1). PfEMP1 is a major adhesin that mediates binding to a variety of host-receptors causing sequestration of IEs in the microvasculature of various organs and severe disease in children and pregnant women. Pregnancy-associated malaria (PAM) constitutes one of the malaria syndromes where the involved PfEMP1 species and host receptors have been characterized. VAR2CSA, which is also of particular interest for the present study, has previously been identified as the major PfEMP1 implicated in PAM and shown to interact with chondroitin sulphate A (CSA), non-immune immunoglobulins and possibly other host-receptors, engendering placental sequestration and pathogenesis in PAM . Understanding the mechanisms that regulate the expression of a particular PfEMP1 is an important piece of the puzzle of understanding disease pathogenesis and may help to identify additional targets for combating the disease. Yet, how the parasite switches on and off PfEMP1 expression is at present only partly understood. PfEMP1 is encoded for by members of the multi-gene family var. Each parasite genome harbors approximately 60 var-genes of high sequence diversity but at any given time, only a single PfEMP1 species is expressed in each parasite. It has been demonstrated that this mutually exclusive expression of PfEMP1 is regulated at the level of var-gene transcription initiation unlike mammalian systems where regulation occurs through negative feedback at the level of protein production . Epigenetic mechanisms involving chromatin modification, activation or silencing by sterile genetic elements and repositioning of var loci in sub-nuclear compartments also play important roles in the switching and exclusive expression pattern of var-genes . Further, the parasite carries an epigenetic memory at the level of the chromatin involving the methylation of the histone H3 . It has until recently been controversial whether a single fulllength var-gene is being exclusively transcribed or multiple vargenes are simultaneously present . Using microarrays specifically designed to detect var-genes, with multiple probes covering the entire length of every var-gene in the genome, it was recently found that several full-length var-genes are being transcribed simultaneously but also that short spurious transcripts are present . The untranslated full-length var-genes in fact share the same group of upstream promoter sequences as the translated, implying that ''loose'' transcription might be attributable to cross var-gene transcriptional activation within the same group . Still there is one var transcript that is dominant both in ring-and trophozoite stages which is later translated into the corresponding PfEMP1 . The expression dynamics of var-genes and their switching rates have previously been studied using different approaches including in vitro and in vivo assays and mathematical modeling . To gain a better understanding of the succession of var switching we have here monitored two highly clonal parasites during in vitro growth without enrichment or panning for <200 generations (.1 year). The vargenes were studied using a comprehensive set of tools to monitor var-RNA and DNA (microarray, qRT-PCR, northern blot, fluorescent in situ hybridization (FISH)) and to follow the expression of PfEMP1 (cell-assays, immunoblotting, FACS). The results of the study suggest that var-gene switching, at least for a subset of genes, involves a programmed process where a large majority of parasites switch to transcribe a single member of the var-gene family, PFL0030c or var2csa, which however seems to be translationally repressed. The implications of our findings for the pathogenesis of disease and survival of the parasite in vivo are discussed. Var-gene expression and phenotypic changes in separate 3D7 clones over 200 generations To follow var-gene switching over time, two parasite clones were monitored for <200 generations during which, both RNA expression and phenotypic changes were investigated at least once a month. The parasites were 3D7S8.4 and 3D7AH1S2, which had been serially selected for either rosette formation or adhesion to CD36 and then cloned by micromanipulation. The rosetting clone 3D7S8.4 transcribed the var-gene PFD0630c at onset whilst the 3D7AH1S2 clone selected for CD36 adhesion transcribed PFF0845c. Both var-genes are internally located on chromosomes 4 and 6, respectively. The global transcriptional profiles of these early generation clones have previously been established during multiple array experiments and the results have been confirmed using northern blot and RT-PCR . When following the RNA expression with qPCR over time we found that the transcription of both initially identified var-genes (PFD0630c/PFF0845c) were gradually switched off, with an off-rate of 10.15 and 2.43% per generation, respectively. A total loss of PFD0630c transcription was observed after 80-130 generations in 3D7S8.4, while it took 175-200 generations for 3D7AH1S2 to completely loose transcription of PFF0845c. Instead, transcripts of var2csa (PFL0030c) began to appear in both clones. The loss of the dominant var-genes was intimately linked to an increase of var2csa (PFL0030c) transcription both in 3D7S8.4 and 3D7AH1S2, with on-rates of 5.24% and 1.35% per generation, respectively (Figure 1 & 2). Down-regulation of the initial var transcription was strictly paralleled by off-switching of corresponding adhesive phenotypes. Rosetting and CD36 binding began to decline already after 25 generations of growth and had disappeared at 80-100 generations in 3D7S8.4, and no CSA binding was detected in this parasite ( Figure 2). CD36 binding was one third of the original level in 3D7AH1S2 after 100 generations of growth and was not seen in the late generation parasites (Table 1 and data not shown). Having found that both 3D7S8.4 and 3D7AH1S2 converge to var2csa transcription, matched by the loss the binding ability, we decided to explore the underlying mechanisms in some detail. 3D7S8.4, which rapidly lost its binding phenotype and had a faster off-and on-switching rate of the dominant var-genes, was therefore chosen for further analyses. To confirm the data generated by qRT-PCR we regularly followed the parasite using microarrays ( Figure 3). As can be seen from Figure 3, the var-gene PFD0630c was dominantly transcribed in the 15 generation parasites (Fig. 3A), while expressed to a lesser extent in 33 generation parasites and completely absent in those grown for <200 generations. The var2csa gene appeared to be dominantly transcribed in the long-term cultured non-selected parasites. The oligonucleotide coverage of var2csa was high, with four detecting the DBL-1x, DBL-4e, DBL-5e domains and the upstream open reading frame (uORF; chr12_glimmerm_22). The remaining two oligonucleotides targeted the DBL-6e domain at the 39 end. Significantly higher signals from all six oligonucleotides were observed in the long-term cultured 3D7S8.4 parasites (199 generations) as compared to the recently cloned one (15 generations; Figure 3B), implying that the var2csa transcripts were full length from the 59 to the 39 end of the exon-I of the gene. transcription. An exception was clone 3D7S8.4.1, which dominantly expressed another var-gene (PF08_0103). The pseudogene var1csa/var common (PFE1640w) was constitutively expressed in all parasite samples, though at a fairly low level ( Figure 4B), in line with previous observations on the ubiquitous nature of constitutive transcription of this var species across parasite isolates . To investigate whether the var2csa expressed in the sub-clones was a correctly spliced transcript, we examined the RNA using qRT-PCR with primers mapping to opposite sides of the intron region (var2csa x-intron; Figure 4A). No difference in amplification was observed between the var2csa exon specific primers and the var2csa x-intron primers suggesting the presence of a spliced transcript. Northern blot was further used to verify the full-length feature of the var2csa transcript. Abundant var2csa transcripts of approximately 13 kb were present in the sub-clones, a molecular size corresponding to that of var-gene mRNA in CSA selected parasites ( Figure 4C and not shown). We thus conclude that the samples do not contain any DNA contaminants or unspliced transcripts implying functionality of the var2csa mRNA detected. Sparse PfEMP1 production and lack of PfEMP1 surface expression albeit high level of var2csa transcripts No specific binding of the IE of long-term cultured parasites (<200 generations) was seen to either cells or different host receptors albeit the high level of var2csa transcription detected. Rosetting was absent as was binding to CD31, CD36, ICAM-1, HS, CSA, TSP, CHO-cells and placental tissue (Table 1). To confirm the lack of VAR2CSA surface expression, IEs were further studied by flow cytometry using different sera and IgG preparations. IgG specific for DBL1x, DBL2x, DBL3x, DBL5e and DBL6e of VAR2CSA raised in rabbits as well as pooled plasma from P. falciparum-exposed men or multi-gravid women from Ghana were used. The IEs of the original rosetting clone 3D7S8.4 studied at 15 generations was recognized to a similar degree by the plasma pools from men and pregnant women, but not by the different anti-VAR2CSA IgG. However, the nonadhesive IEs of 3D7S8.4 grown for <200 generations in vitro was neither recognized by the pooled patient sera of either sex, nor by the anti-VAR2CSA IgGs. The same was true for IEs of the subclones 3D7S8.4.1, 3D7S8.4.2 and 3D7S8.4.17 ( Figure 5A & B). In contrast, IE of the control parasite FCR3CSA and Gb337CSA that had been repeatedly selected for adhesion to CSA showed good differential reactivity with the plasma pool obtained from pregnant women versus those of men and was well-recognized by IgG raised to the DBL3x, the DBL5e and the DBL6e domain of VAR2CSA ( Figure 5A & B and data not shown). Having found that there was no serum-or IgG reactivity with the IE surface of the long-term cultivated parasites, we subsequently performed immunoblot analysis to investigate . This finding implies that the translational products of var2csa in the long-term cultured parasites are significantly controlled through posttranscriptional mechanisms . . The transcription levels of var common and of var-genes that were dominantly expressed at particular time points are shown. The adhesive phenotypes of the IE of the two parasites were followed for rosetting, CSA-and CD36-binding as described in Table 1 Activation and silencing of the var2csa gene involve subnuclear repositioning Repositioning of var loci within the sub-nucleus has been found associated with the expression of subtelomerically located var-genes . We consequently studied the var-genes expressed in the cloned parasites (PFD0630c; PFF0845c) and the location of the telomere-repeats (rep20) using two-color fluorescent in situ hybridization (FISH; Figure 6). The great majority (.90%) of PFD0630c and PFF0845c were located at the rim of the nuclei (referred to as zone A by Ralph et al. ). Neither of the genes, which were of the upsC type and located internally on chromosomes 4 and 6, did however reposition in respect to telomere-repeats/heterochromatin as their transcriptional states changed. Their peri-nuclear gene locations ( Figure 6C) were the same in parasites obtained immediately after cloning and in those obtained after long-term growth. Approximately half of the genes were located within the telomeric clusters/heterochromatin and approximately half outside of it (,600 nuclei /gene counted). In contrast the var2csa gene was found to be associated with the telomeric clusters (rep20) at onset of the experiment while after long-term cultivation (194-206 generations), the activated var2csa gene dissociated away from the telomeric clusters and heterochromatin in more than 80% of the parasites ( Figure 6D). As for the upsC var-genes, var2csa was located at the rim of the nuclei. Switching to var2csa is independent of genomic rearrangement Using the microarray we monitored the parasite for global transcriptional changes throughout the study. Some of the genes were found down-regulated (Table S1) upon long-term in vitro propagation. Still, the skeleton-binding protein 1 (SBP1), previously shown to be required for PfEMP1 trafficking , was found transcribed at similar levels throughout the experiment and normal levels of RNA were present for genes encoding KAHRP and PfEMP3 when the parasites at early generations switched to transcribe var2csa (Table S1, Figure S1) suggesting that the lack of binding of the IE and surface expression was not due to deficient PfEMP1 transport at least in the ,50-80 generation parasites ( Figure 1). However, the transcription of the knob-associated histidine rich protein (KAHRP) and PfEMP3 was found absent in the 199 generation parasites due to a possible late spontaneous deletion of the left arm of chromosome 2 ( ). Complementary comparative genomic hybridizations between 3D7S8.4 of 19 and 56 generations revealed only four genes (PFC1000w, PFE0635c, PF08_0138, PF10_0229) to have undergone either deletion or sequence polymorphisms in the later generations (not shown). This suggests that switching to var2csa happened before any major genomic rearrangements occurred, involving genes encoding proteins such as KAHRP or PfEMP3. Discussion If it were so that a microbe would express antigenic variants in an un-coordinated manner the host would shortly raise an immune response to each and every variant and clear the infection. Parasites have consequently developed means to handle this host challenge. P falciparum, for example, successfully evades immune recognition of PfEMP1 by combining allelic exclusion of the var-genes with switching, yet the exact mechanisms by which this occurs are not known. To increase our understanding on vargene switching we performed a systematic analysis of the transcriptional profile of var-genes over time in phenotypically distinct 3D7 parasite clones. In the absence of selective pressure, 95% of the parasites derived from a single clone eventually switched to transcribe a single member of the var-gene family, PFL0030c or var2csa. However, albeit the high levels of var2csa transcripts, the relative abundance of translated product was sparse and no PfEMP1 surface expression was observed, implying that var2csa may be a target for post-transcriptional regulation. Sequence analysis of the 3D7 genome has revealed the var-gene repertoire to fall into different classes depending on chromosomal location, the orientation of the gene as well as on the upstream flanking sequence, the latter known as upsA, -B, -C, -D and -E. The two var-genes expressed in our parasites at onset belonged to the chromosomal internal cluster upsC var-genes (PFD0630c and PFF0845c). A recent study demonstrated that different var-genes have different intrinsic switching rates and that var-genes in centromeric location have higher ''on'' rates and lower ''off'' rates . We found that this phenomenon does not apply to all central var-genes, at least not in the case of PFD0630c. In the present study, off-switching in 3D7S8.4 parasites was observed already at 33 generations of growth (,10 weeks). Furthermore, although the same var-genes (PFF0845c/MAL6p1.252) and the same genotypic parasites (here 3D7AH1S2) were monitored in both studies, the ''off'' rates were considerably dissimilar. In the present study the parasites had switched from the dominant var-gene (PFF0845c) already at 48 generations (,14 weeks), whereas in a study by Frank et al the expression of the same dominant var-gene remained unchanged up to 66 generations (19 weeks). These findings may indicate that the switching rate is not inherited on genomic level but is rather under epigenetic control. An inter-relationship between the ''off'' and the ''on'' rates was found where ''off'' rates of the original var-genes were about twice those of the var2csa ''on'' rates, arguing that switching to other intermediate var-genes also potentially occurred. Still, var2csa expression was dominant in the late generation parasites, which may in part be due to differences in proliferation rates in-between the var2csaand non-var2csa expressing clones (Figure 7). It is possible that the higher multiplication rates in the var2csaexpressing clones endowed these parasites with a selective growth advantage over other switch variants. Further, we do not know whether the var2csa locus in the late generation parasites (3D7S8.4.2-17 sub-clones) would remain activated until an episode of exogenous challenge or whether they spontaneously would switch at a slow rate to another var variant. Upon panning of some of the sub-clones on placental sections or on CD36 expressing cells we could not retrieve any major binding IE populations (data not shown). This may suggest that this group of parasites does not revert back to PfEMP1 expression or that other types of stimuli may be required. It has to be noted that compared to other strains, such as HB3 and FCR3, it is difficult to retrieve 3D7 parasites panned on CSA . On the other hand, up-regulation of var2csa transcription in 3D7 without selection is readily observed . As both parasite clones studied herein expressed upsC var-genes at onset and both eventually converged to var2csa transcription, and given that the var switching history of a particular parasite has been shown to result in a particular switching rate , it is possible that switching to var2csa is favored in parasites previously transcribing a particular group of var-genes (upsC). This, however, remains to be confirmed with a larger set of parasites both ex vivo and in vitro. Recent data suggest that transcription of in particular upsC var-genes predominate in children with asymptomatic infections . Interestingly var2csa transcription, although mainly associated with pregnancy isolates, has occasionally been observed in samples from children with acute malaria and in peripheral-blood parasites of experimentally infected humans . Moreover, performing a pilot study on other in vitro adapted parasites (FCR3, TM180 and 7G8), we found trace amounts of var2csa in FCR3 after 50 generations of growth without any selective pressure (data not shown). Repositioning of var loci within the peri-nuclear area has been found to be associated with the expression of subtelomerically located var-genes. Similarly, the var2csa gene was here discovered to reposition from the telomeric clusters and the condensed heterochromatin to occupy a suggested site of transcription, supporting the notion that translocation of the var2csa gene into a suggested transcription site may in certain cases silence the transcription of other var-genes. However, although var2csa repositioned and was highly transcribed, the sparse presence of translational products and absence of surface expression imply an additional layer of control, which remains to be further investigated. The exploitation of transcriptional repression as a post-transcriptional regulatory mechanism has previously been described only for transcripts of P. berghei gametocyte origin . There is today good evidence to suggest that the var2csa gene encodes an important PfEMP1-species, a ligand associated with the sequestration of IEs in PAM . This gene is however atypical to other var-genes and apart from having a different domain-architecture and an uncommon upstream flanking region it is remarkably conserved across different plasmodium species. A var2csa ortholog is for example present in P. reichenowi, a parasite which has been predicted to have diverged from P. falciparum several million years ago . The expression of var2csa transcripts in isolates of children and adults during malaria infections and the obvious lack of antibody responses to the corresponding encoded protein in these groups of patients suggest the presence of var2csa off-switching in vivo. Yet, under what circumstances would a functional transcript avoid to produce a translational product, and is it compatible with parasite growth in vivo during an infection that an IE does not express a surface PfEMP1? One might speculate that in vivo a temporal expansion of parasites expressing a posttranscriptionally regulated gene may coordinate PfEMP1 expression by permitting splenic clearance of the off-switch IEs thus giving only the remaining minute populations of other variants a possibility to survive. Such a pathway, among others (see Figure 8), were determined using an unpaired Students t-test at a 5% significance level. Analysis was performed in SPSS version 12.0. *P,0.05; Significantly higher multiplication rate was observed in the late S2clone. The same trend was also observed in the other VAR2CSA expressing late generation clone S8.4.2, the difference here however was not statistically significant (P = 0.067). doi:10.1371/journal.pone.0001982.g007 would protect against the rapid exhaustion of the variant-antigen repertoire. It is possible that among the var-genes solely var2csa can be translationally repressed to avoid exposure of the corresponding PfEMP1 to the host immune system prior to pregnancy. The suppression may be released upon pregnancy by specific factors or conditions in the placenta allowing for expansion of parasites translating the PfEMP1var2CSA and expressing it at the IE surface. In the future, it will be interesting to study the prevalence of off-switching in different isolates including those obtained from splenectomized donors, and further assess what role posttranscriptional regulation has for different genes across the life cycle stages of P.falciparum. Parasites and RNA preparation The parasite 3D7AH1S2 was generated by multiple panning of 3D7AH1 parasites on CD36-CHO transfectants, followed by micro-manipulation cloning while 3D7S8.4 was selected twice from 3D7 parasites for the rosetting phenotype by micromanipulation cloning. FCR3CSA was selected by repeated panning of FCR3 IE on CSA-coated Petri dishes . 17 subclones (3D7S8.4.1-17) were randomly picked by micromanipulation from 3D7S8.4 after 200 generations of in vitro growth. Phenotypic characterization of the parasites was performed as described elsewhere using trophozoite IE at 5-8% parasitemia. For RNA preparation, parasites were first tightly synchronized during three consecutive rounds of 5% D-sorbitol treatment. Parasites were then collected as trophozoite-IE (2464 hours) and total RNA was isolated using Trizol reagent (Invitrogen, Carlsbad, CA) and stored at 280uC. Microarray The P. falciparum genome microarray was used . 20 mg of total RNA was labeled following an amino-allyl dye coupling protocol. Hybridization and washing were performed using a Lucidea automated slide processor (Amersham Biosciences) and slides were scanned with a GenePix 4000 B scanner (Axon Instruments). At least one dye-swap experiment per generation post-cloning was performed. Signals and local background intensities from each spot were retrieved using GenePix Pro 5.1 software (Axon Instruments). Spots that passed the quality controls (default settings), together with visual inspection, were analyzed using GeneSpring 6.1 (Silicon Genetics). Local background filtering was applied and LOWESS was adopted to normalize the data . Protocols and microarray data are publicly available at www.ebi.ac.uk/arrayexpress: A-MEXP-75 and E-MEXP-1199. Quantitative real-time RT-PCR Total RNA was treated with Deoxyribonuclease I (Ambion) to remove contaminating DNA. cDNA synthesis was performed with 1 mg of total RNA and reverse transcribed using Superscript III Reverse Transcriptase (Invitrogen) with random primers and OligoDT (Invitrogen) as described by the manufacturer. For qRT-PCR, we employed the primer set of Salanti et al. and conducted the experiment as previously described . An additional primer pair targeting the flanking regions of var2csa (var2csa x-intron: forward 59AATAATACCAGTGACATTCTGCAAAA39 and reverse 59ACACGTAAAAGGTCCACAGGTG 39) was added for evaluation of splicing and DNA contamination. All experiments included primers for two housekeeping genes, seryl-tRNA synthetase and fructose-bisphosphate aldolase, as endogenous controls. Levels of var transcription were calculated by the DCt method, in which the Ct (cycle threshold) for each var-gene was compared with that for the endogenous controls. Northern Blot 3D7 var2csa (PFL0030c) was PCR amplified from 3D7 genomic DNA with primers PFL0030c/F (AAATGGAAATCCGAATGGG) and PFL0030c/R (TGAGTCAAGGGTGTGTTCTTGGGGG-TAAACC) and then ligated into a T7 39 element employing the TOPO-Tools system (Invitrogen) as recommended by the manufacturer. Digitonin-labeled anti-sense RNA was produced using the Dig RNA labeling kit (Roche). Total RNA (2 mg) from trophozoite stage parasites was transferred to nylon membranes (Roche). Blots were pre-hybridized in Dig Easy Hyb buffer and then probed with 100ng/ml Digitonin-labeled anti-sense RNA in Dig Easy Hyb buffer (Roche) at 65uC overnight. After hybridization membranes were washed twice with 2x SSC, 0.1% SDS at RT and twice with 0.56SSC, 0.1% SDS at 65uC. Hybridized RNA was detected with the Dig Luminescent detection kit (Roche) as described by the supplier. SDS-PAGE and immunoblot analysis Pigmented IEs were harvested using a MACS magnetic cell sorter (Miltenyi BioTec) and solubilized in reducing SDS sample buffer. The total parasite extracts were separated on 3-8% tris-acetate gradient gels (Invitrogen) and transferred to nitrocellulose membranes. Membranes were blocked in 5% non-fat milk powder and probed with a mouse monoclonal raised against the conserved Cterminal acidic-terminal-sequence (ATS) of PfEMP1s (1:250) (a kind gift from A. Cowman). The membranes were stripped and re-probed with a mouse anti-Pfhsp70 (1:2000) (a kind gift from C. Fernandez). The antibody was raised against the C-terminal part of hsp70 and differentially recognizes P. falciparum and not human Hsp70 . Detection by enhanced chemiluminescence (ECL; Amersham Biosciences) was performed after secondary detection with sheep anti-mouse Ig-HRP conjugate (1:5000, Amersham Biosciences). Flow cytometry Variant surface antigen expression was tested by flow cytometry using plasma pools from P. falciparum-exposed women or men, or non-infected human serum. Specific VAR2CSA surface expression was assessed using antisera to VAR2CSA DBL1-3x, ID2 (inter-domain between DBL2x and DBL3x) and DBL5-6e. In brief, 2610 5 late-stage IEs were purified (to .75% parasitemia) by exposure to a strong magnetic field (Miltenyi BioTec), stained with ethidium bromide (Sigma) and sequentially incubated with 5 ml of plasma or antisera, 0.4 ml of goat anti-human IgG (Dako) and 4 ml of fluorescein isothiocyanate (FITC)-conjugated rabbit anti-goat IgG (Dako) in a total volume of 100 ml. Samples were washed 2 times in PBS with 2% fetal calf serum between each antibody incubation step as described elsewhere . All plasma samples were tested simultaneously with each parasite isolate. Fluorescent in situ hybridization (FISH) FISH was conducted according to previously described methodology with minor modifications . The dsDNA probes targeting PFD0630c, PFF0845c and PFL0030c/var2csa were amplified using the specific primers PFD0630c/F (59-GAT GAC GAC AAG CCA AAT ACC-39), PFD0630c/R (59-ACA TAA TCC GCC TCC AGT TC-39), PFF0845c/F (59-CGT TGG ATG ACT GAA TGG TCC G-39), PFF0845c/R (59-TCA CCG AGG TCT ATG CTG AAC TGG-39), PFL0030c/F (59-AAG GAT AGA ATG GAA TGG AAT GAG C-39) and PFL0030c/R (59-CAC CAA TCG TCA ACT TTT TCG G-39). PCR products were cloned into pCRII-TOPO vector and transformed into One Shot TOP10 Chemically Competent Cells (Invitrogen) according to the supplier's recommendations. Plasmids containing var-gene fragments were labeled using the Fluorescein-High Prime and Rep20 containing pUC9 plasmids (a kind gift from A. Scherf) with Biotin-High Prime kit (Roche Applied Science). Highly synchronous parasites (1862 hours post invasion) were isolated from their host erythrocytes using saponin (0.05% w/v) and deposited as monolayers on Poly-L-lysine coated microscope slides (Menzel-Glä ser). Air-dried monolayers were fixed with 4% paraformaldehyde (PFA) for 15 minutes at room temperature and treated with RNase (20 mg/ml in 26SSC) for 30 minutes at 37uC. 100 ng of labeled and heat denatured probe (in 50% deionized formamide, 10% dextran sulphate, 16SSC and 250 mg/ml herring sperm DNA) were added to parasite preparations before hybridization at 95uC for 3 minutes and at 37uC for 12 hours. After hybridization, parasites were washed twice in 50% deionized formamide/26SSC at 45uC, twice in 26SSC first at 45uC then 50uC and once in 46SSC at room temperature. Blocking with 1% BSA in PBS was followed by incubation with Avidin-Rhodamine (Roche Applied Science) in 1% BSA in PBS for detection of the biotinylated Rep20 probes. Parasites were finally washed once in 1% BSA in PBS and twice in a solution of 100 mM Tris-HCl/150 mM NaCl/0.5% (v/ v) Tween 20 at room temperature and mounted in Vectashield (Vector Laboratories). Preparations were visualized using a Leica DMRE microscope and imaged with a Hamamatsu C4880 cooled CCD camera. Multiplication rate determination All parasite clones were initially synchronized with 5% Dsorbitol treatment. Each clone was diluted to a low level of parasitemia (geomean = 0.3%) at schizont stage and thin blood smears were prepared. Cultures were subsequently incubated for 12-16 hours to allow invasion of RBCs and blood smears of the ring-stage parasites were prepared. The smears were stained with Giemsa and the number of IEs with schizonts (parasitemia start ) or rings (parasitemia new ) were assessed visually by light microscopy. A minimum of 1000 IEs were counted per smear (geomean = 1500). Data from a minimum of three biological replicates were generated for each clone. The multiplication rate for each clone was obtained by calculating the fold change in parasitemia (parasitemia new /parasitemia start ) for each 48 hour cycle. Relative estimation of transcript versus protein abundance The plot in Figure 5D was generated using the quantitative realtime PCR data to calculate the relative transcript copy number of the dominant transcripts in each parasite clone. Protein expression levels of the corresponding transcripts were assessed using a semiquantitative approach based on measurements of the signal intensities of the PfEMP1 bands on western blots. Relative quantity (RQ) was obtained by dividing the target quantity with the quantity of the endogenous control in each sample. Seryl t-RNA synthetase and Pfhsp70 were used as endogenous controls for transcript versus protein data respectively.
def select_from_component(cls, component: Any): for flavor in cls.interfaces: if isinstance(component, flavor): return cls.interfaces[flavor] return None
def forward(self, words=None, z=None): if hasattr(self, 'fake'): del self.fake, self.text_encode_fake, self.len_text_fake, self.one_hot_fake self.label_fake.sample_() if words is None: words = [self.lex[int(i)] for i in self.label_fake] if self.opt.capitalize: for i, word in enumerate(words): if random.random()<0.5: word = list(word) word[0] = unicodedata.normalize('NFKD',word[0].upper()).encode('ascii', 'ignore').decode("utf-8") word = ''.join(word) words[i] = word words = [word.encode('utf-8') for word in words] if z is None: if not self.opt.single_writer: self.z.sample_() else: if z.shape[0]==1: self.z = z.repeat((len(words), 1)) self.z = z.repeat((len(words), 1)) else: self.z = z self.words = words self.text_encode_fake, self.len_text_fake = self.netconverter.encode(self.words) self.text_encode_fake = self.text_encode_fake.to(self.device) if self.opt.one_hot: self.one_hot_fake = make_one_hot(self.text_encode_fake, self.len_text_fake, self.opt.n_classes).to(self.device) try: self.fake = self.netG(self.z, self.one_hot_fake) except: print(words) else: self.fake = self.netG(self.z, self.text_encode_fake)
import styled, { keyframes } from 'styled-components/macro' import { Page } from 'styles' export const HomeStyled = styled(Page)` text-align: center; ` const gradiant = keyframes` 0% { background-position: 0% 0%; } 50% { background-position: 100% 100%; } 100% { background-position: 0% 0%; } ` export const HomeTitle = styled.div` width: 100%; text-align: center; margin: 100px auto 0px auto; font-size: 80px; font-weight: bold; background-image: url('/images/gradiant.png'); -webkit-background-clip: text; background-clip: text; background-size: 300% 300%; -webkit-text-fill-color: transparent; animation: ${gradiant} 30s ease-in-out infinite; ` export const HomeButtons = styled.div` display: grid; grid-template-columns: 1fr 1fr; grid-gap: 20px; width: 600px; max-width: 80vw; margin: auto; `
/** * @author Christoph Strobl * @author Mark Paluch * @since 2.0 */ class LettuceReactiveClusterListCommands extends LettuceReactiveListCommands implements ReactiveClusterListCommands { /** * Create new {@link LettuceReactiveClusterListCommands}. * * @param connection must not be {@literal null}. */ LettuceReactiveClusterListCommands(LettuceReactiveRedisConnection connection) { super(connection); } /* (non-Javadoc) * @see org.springframework.data.redis.connection.lettuce.LettuceReactiveListCommands#bPop(org.reactivestreams.Publisher) */ @Override public Flux<PopResponse> bPop(Publisher<BPopCommand> commands) { return getConnection().execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKeys(), "Keys must not be null!"); Assert.notNull(command.getDirection(), "Direction must not be null!"); if (ClusterSlotHashUtil.isSameSlotForAllKeys(command.getKeys())) { return super.bPop(Mono.just(command)); } return Mono.error(new InvalidDataAccessApiUsageException("All keys must map to the same slot for BPOP command.")); })); } /* (non-Javadoc) * @see org.springframework.data.redis.connection.lettuce.LettuceReactiveListCommands#rPopLPush(org.reactivestreams.Publisher) */ @Override public Flux<ByteBufferResponse<RPopLPushCommand>> rPopLPush(Publisher<RPopLPushCommand> commands) { return getConnection().execute(cmd -> Flux.from(commands).concatMap(command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getDestination(), "Destination key must not be null!"); if (ClusterSlotHashUtil.isSameSlotForAllKeys(command.getKey(), command.getDestination())) { return super.rPopLPush(Mono.just(command)); } Mono<ByteBuffer> result = cmd.rpop(command.getKey()) .flatMap(value -> cmd.lpush(command.getDestination(), value).map(x -> value)); return result.map(value -> new ByteBufferResponse<>(command, value)); })); } }
<reponame>windllsnow/Java_test //介面 public interface A01 { int x =100;//常數 不可改 void getAData(); }
// TransposeDryRun print diff to replace import statement ( not overwriting ) func (t *Transposer) TransposeDryRun(matchPattern *regexp.Regexp, searchRoot string, ignorePaths []string, transposeFunc func(packageName string) string) error { inspectResults, err := t.Inspector.Inspect(matchPattern, searchRoot, ignorePaths) if err != nil { return errors.WithStack(err) } return errors.WithStack(t.Rewriter.Rewrite(inspectResults, true, transposeFunc)) }
#ifndef UNIT_VERILOG_TEST_PARSE_W_LISTENER_HPP #define UNIT_VERILOG_TEST_PARSE_W_LISTENER_HPP #include "./test_design.hpp" #include "verilog/types/design.hpp" #include "verilog/types/design_reader.hpp" #include "parse/util/static_string.hpp" #include "SV2012BaseListener.h" #include "SV2012Lexer.h" #include "SV2012Parser.h" #include "verilog/antlr_runtime.hpp" #include <catch2/catch.hpp> #include <range/v3/view/zip.hpp> #include <string> #include <string_view> using namespace Verilog::Test; using namespace Verilog; using namespace antlr4; using namespace antlr4::tree; namespace Verilog::Test { using namespace Verilog; template <typename TListener> void parse_w_listener(std::shared_ptr<DesignReader> test_out_p, std::string parse_input, std::string parse_source, bool debug_output = false) { INFO("Setup parse"); ANTLRInputStream input(parse_input); SV2012Lexer lexer(&input); lexer.removeErrorListeners(); CommonTokenStream tokens(&lexer); auto parser_sp = std::make_shared<SV2012Parser>(&tokens); parser_sp->removeErrorListeners(); INFO("Parse input into parse tree"); auto *tree = parser_sp->source_text(); if (debug_output) { Parse::Util::debug_print("DEBUG: parse tree start :\n"); Parse::Util::debug_print(tree->toStringTree(parser_sp.get())); Parse::Util::debug_print("DEBUG: parse tree end :\n"); } INFO("Initialize listener"); TListener listen(parser_sp, test_out_p, parse_source); INFO("Walk tree with listener"); ParseTreeWalker walker; walker.walk(&listen, tree); } } // namespace Verilog::Test #endif // UNIT_VERILOG_TEST_PARSE_W_LISTENER_HPP
// LatexFromTexts returns latex from texts. func LatexFromTexts(drawex core.Drawex, texts []*component.Text) *string { var latex string for _, item := range texts { latex = stringutil.Concat(latex, drawex.DrawText(item)) } return &latex }
My intention in writing about Bitcoin in these pages each week is to make the subject as boring as possible. Not the articles, of course; I hope that they will be lively and entertaining, with just the right balance of humor and gravitas and some serious discussion of weighty issues, leading to fabulously successful recommendations. Such is the way that those who write for a living dream. No, what I want to make boring is the subject matter itself. It seems to me that the day is long overdue when Bitcoin commentary moves beyond the sensational and into the mundane. It is a currency and an actively traded instrument, albeit a unique one, and as such the price action and the influences on that action are of the most interest to me. That said, though, my first attempt at taking a neutral, trader’s approach, or rather the reaction to it, showed the danger inherent in that approach. That reaction showed me that it is dangerous to make assumptions about the knowledge of others when writing on the subject and to be anything other than crystal clear about the things that make it unique. I will, in future, attempt to be clearer about the advantages and disadvantages of the market and the currency itself, even as I attempt to focus on the things that drive the price. If you have an interest in Bitcoin, though, and wish to trade it, you have to have some belief in the basic utility of it as a currency. It is possible to short Bitcoin if that is your view, as was pointed out in the lively discussion that followed last week’s piece referenced above, but in order to start out trading you will probably need an account that contains both a fiat currency of some kind and Bitcoins. That presupposes that you accept that while the relative value of Bitcoins to other currencies may fluctuate, the concept itself has legitimacy and that the Bitcoins you buy and sell have some intrinsic value. If you cannot accept this then what I write is not for you. Similarly, if you have a fundamental distrust of fiat currency issued by central banks and see Bitcoin as a store of value to guard against inflation, or as a potential savior of the world, then trading, as opposed to amassing, Bitcoin will be anathema, and, once again, my writing will not appeal. If, on the other hand, like me, you believe that the 75,000 businesses that Bitpay and Coinbase report as accepting Bitcoin payments indicates that it is around to stay, and that the algorithm that limits issuance, as detailed here, ensures some intrinsic value, then it is only the relative value of the currency that matters to you, and trading Bitcoin becomes more like trading any other currency. In that case, you will need an account with a broker that offers Bitcoin trading. I cannot make specific recommendations here, but this Forex News comparison should give you a good idea of the pros and cons of the leading brokers. It should be said that the volatility that has been seen in the Bitcoin market, particularly over the last couple of years, makes trading in the market inherently risky, so, from a trading perspective, only money that will not materially affect your lifestyle should be used. This is still a young market, with much controversy surrounding it, and, as of yet, little regulation of the major players. That is not a unique situation. The interbank currency market where I started my career in the early 1980s was also lightly regulated and the value of currencies backed only by the full faith and credit of the issuing nations was still not understood and questioned by some at that time. That market, however, grew to be the largest in the world by volume. With that disclaimer out of the way, though, I do believe that Bitcoin represents a good trading opportunity for those with a trader’s mindset and skill set, and I look forward to analyzing the market in as boring a way as possible over the coming weeks and months. Boring may be hard, though. I am already aware that the Bitcoin community is passionate and vocal and I encourage you to remain so. Sometimes I will no doubt take a bearish view and sometimes bullish. Feel free to disagree with me either way and voice your reasons in the comments section. There are, after all, always two sides to every trade and the Bitcoin market is no exception.
/* * Insert a constraint edge between the vertices with the given ids. */ void triangulation::insert_edge( unsigned long v_start_id, unsigned long v_end_id, unsigned long e_id) { vrtx& v_start = (*_vertices)[v_start_id]; vrtx& v_end = (*_vertices)[v_end_id]; sym_edge<vrtx,edge>* e_left = v_start.e; while (triangulation::right_of(v_end, *e_left)) { e_left = &(e_left->origin_prev()); if (e_left == v_start.e) break; } while (!(triangulation::right_of(v_end, *e_left))) { e_left = &(e_left->origin_next()); if (e_left == v_start.e) break; } sym_edge<vrtx,edge>* e_right = v_start.e; while (triangulation::left_of(v_end, *e_right)) { e_right = &(e_right->origin_next()); if (e_right == v_start.e) break; } while (!(triangulation::left_of(v_end, *e_right))) { e_right = &(e_right->origin_prev()); if (e_right == v_start.e) break; } sym_edge<vrtx,edge>* e = NULL; if (triangulation::right_of(v_end, *e_left)) { e_right = &(e_left->origin_prev()); e = e_right; } else if (triangulation::left_of(v_end, *e_right)) { e_left = &(e_right->origin_next()); e = e_left; } else { e = e_left; if (&(e->destination()) != &v_end) e = &(e->origin_next()); } if (&(e->destination()) == &v_end) { unsigned long n_e = _edges->size(); if (e->data().id < n_e) throw ex_invalid_argument("duplicate constraint edges specified"); e->data().id = e_id; e->sym().data().id = e_id; } else if (v_end.p.orientation(e->origin().p, e->destination().p) == 0) { throw ex_invalid_argument( "constraint edge specified to pass through 3 collinear vertices" ); } else { list< sym_edge<vrtx,edge> > removed_edges; sym_edge<vrtx,edge>* e_l = e_left; sym_edge<vrtx,edge>* e_r = e_right; unsigned long n_e = _edges->size(); while (true) { e = &(e_r->left_next()); if (e->data().id < n_e) throw ex_invalid_argument("constraint edges cross"); e->disconnect(); removed_edges.add(*e); sym_edge<vrtx,edge>* e_l_next = &(e_l->right_prev()); sym_edge<vrtx,edge>* e_r_next = &(e_r->left_next()); vrtx& v_next = e_l_next->destination(); int orient = v_next.p.orientation(v_start.p, v_end.p); if (orient > 0) { e_l = e_l_next; } else if (orient < 0) { e_r = e_r_next; } else if (&v_next == &v_end) { e_l = e_l_next; e_r = e_r_next; break; } else { throw ex_invalid_argument( "constraint edge specified to pass through 3 collinear vertices" ); } } sym_edge<vrtx,edge>& e_base = removed_edges.remove_head(); e_base.reconnect(e_left->sym(), e_l->sym()); e_base.data().id = e_id; e_base.sym().data().id = e_id; v_start.e = &e_base; v_end.e = &(e_base.sym()); triangulation::triangulate_polygon(e_base, removed_edges); triangulation::triangulate_polygon(e_base.sym(), removed_edges); } }
// SetupViper returns a viper configuration object func SetupViper(envPrefix string) *viper.Viper { viper.SetConfigName("config") viper.AddConfigPath(".") if envPrefix != "" { viper.SetEnvPrefix(envPrefix) } viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) viper.AutomaticEnv() viper.SetDefault("proxy.port", 8080) viper.SetDefault("proxy.api-prefix", "/") flag.String("endpoint", "", "The gRPC Backend service endpoints to proxy.") pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() viper.BindPFlags(pflag.CommandLine) err := viper.ReadInConfig() if err != nil { errors.New("Could not read config", err).FailIfErr() } endPoint := viper.GetString("endpoint") if viper.InConfig("proxy.api-prefix") { viper.Set("proxy.api-prefix", sanitizeApiPrefix(viper.GetString("proxy.api-prefix"))) } if endPoint == "" { errors.New("", errors.NewErr("please provide a non-empty endpoint in your configuration")).FailIfErr() } errors.New("failed to ping grpc endpoint", health.New(endPoint).Once().Do()).FailIfErr() return viper.GetViper() }
/** * Read a boolean from a stream */ private static boolean readBoolean(InputStream aStream) throws IOException { int b = aStream.read(); if (b == 0) { return false; } if (b != 1) { throw new IOException("Malformed boolean."); } return true; }
/* * Copyright 2015 the original author or phly. * 未经正式书面同意,其他任何个人、团体不得使用、复制、修改或发布本软件. */ package com.phly.common.base.tag; import java.io.IOException; import java.util.List; import javax.servlet.jsp.JspException; import javax.servlet.jsp.tagext.SimpleTagSupport; import org.apache.commons.lang.StringUtils; import com.phly.common.base.model.SysModuleVO; /** * 生成后台管理菜单 * @Description TODO * @author linyong * @since 2015年5月23日 上午11:31:40 */ public class DefaultMenuTag extends SimpleTagSupport { private static final String defaultDashboardTitle = "首页"; private static final String defaultDashboardUri = "index.html"; private boolean isShowDashboard; private String dashboardTitle; private String dashboardUri; private String type; private List<SysModuleVO> moduleList; private String markMenu(List<SysModuleVO> moduleList) { if (moduleList != null && moduleList.size() > 0) { if(StringUtils.isNotEmpty(type) && StringUtils.equalsIgnoreCase(type, "left")){ return leftMenu(moduleList,isShowDashboard); }else{ return topMenu(moduleList); } } return ""; } /** * 生成左边菜单 <li class="current"> <a href="index.html"> <i class="icon-dashboard"></i> Dashboard </a> </li> <li> <a href="#"> <i class="icon-desktop"></i> UI Features <span class="label label-info pull-right">7</span> <i class="arrow icon-angle-left"></i> </a> <ul class="sub-menu"> <li> <a href="general.html"> <i class="icon-angle-right"></i>General </a> </li> <li> <a href="buttons.html"> <i class="icon-angle-right"></i>Buttons </a> </li> <li> <a href="tabs.html"> <i class="icon-angle-right"></i>Tabs </a> </li> <li> <a href="accordions.html"> <i class="icon-angle-right"></i> Accordions </a> </li> </ul> </li> * @param moduleList * @return */ private String leftMenu(List<SysModuleVO> moduleList,boolean isShowDashboard) { StringBuffer menu = new StringBuffer(); //生成首页菜单 if(isShowDashboard){ menu.append( "<li class=\"current\">"+ "<a href=\"javascript:void(0);\" onclick=\"toModule('"+getDashboardUri()+"')\" >" + "<i class=\"icon-home\"></i> "+getDashboardTitle()+ "</a>"+ "</li>" ); } for (SysModuleVO sysModuleVO : moduleList) { List<SysModuleVO> childModule = sysModuleVO.getChildModuleList(); if (childModule != null && childModule.size() > 0) { menu.append( "<li>"+ "<a href=\"javascript:void(0);\" onclick=\"toModule('"+sysModuleVO.getModuleUrl()+"')\" >" + "<i class=\""+getModuleIcon(sysModuleVO)+"\"></i> "+sysModuleVO.getModuleName()+ "<span class=\"label label-info pull-right\">"+ (StringUtils.isNotEmpty(sysModuleVO.getInfoTotal())?sysModuleVO.getInfoTotal():"")+"</span>"+ "<i class=\"arrow icon-angle-left\"></i>"+ "</a>"+ "<ul class=\"sub-menu\">"+ leftMenu(childModule, false)+ "</ul>"+ "</li>" ); } else { menu.append( "<li>"+ "<a href=\"javascript:void(0);\" onclick=\"toModule('"+sysModuleVO.getModuleUrl()+"')\" >" + "<i class=\""+getModuleIcon(sysModuleVO)+"\"></i> "+sysModuleVO.getModuleName()+ "<span class=\"label label-info pull-right\">"+ (StringUtils.isNotEmpty(sysModuleVO.getInfoTotal())?sysModuleVO.getInfoTotal():"")+"</span>"+ "</a>"+ "</li>" ); } } return menu.toString(); } private String getModuleIcon(SysModuleVO vo){ if(StringUtils.isEmpty(vo.getIcon())){ if(vo.getLevel() == 1){ return "icon-table"; }else{ return "icon-angle-right"; } } return vo.getIcon(); } private String topMenu(List<SysModuleVO> moduleList) { StringBuffer menu = new StringBuffer(); for (SysModuleVO sysModuleVO : moduleList) { List<SysModuleVO> childModule = sysModuleVO.getChildModuleList(); if (childModule != null && childModule.size() > 0) { menu.append("<li class=\"dropdown\">" + "<a href=\"javascript:void(0);\" onclick=\"toModule('"+sysModuleVO.getModuleUrl()+"')\" class=\"dropdown-toggle\" data-toggle=\"dropdown\">" + sysModuleVO.getModuleName() + " <b class=\"caret\"></b></a>" + "<ul class=\"dropdown-menu\">" + markMenu(childModule) + "</ul></li>"); } else { menu.append("<li><a href=\"javascript:void(0);\" onclick=\"toModule('"+sysModuleVO.getModuleUrl()+"')\" >" + sysModuleVO.getModuleName() + "</a></li>"); } } return menu.toString(); } @Override public void doTag() throws JspException, IOException { getJspContext().getOut().print(markMenu(this.moduleList)); } public String getType() { return type; } public void setType(String type) { this.type = type; } public boolean isShowDashboard() { return isShowDashboard; } public void setShowDashboard(boolean isShowDashboard) { this.isShowDashboard = isShowDashboard; } public List<SysModuleVO> getModuleList() { return moduleList; } public void setModuleList(List<SysModuleVO> moduleList) { this.moduleList = moduleList; } public String getDashboardTitle(){ if(StringUtils.isNotEmpty(this.dashboardTitle)){ return this.dashboardTitle; }else{ return defaultDashboardTitle; } } public void setDashboardTitle(String dashboardTitle) { this.dashboardTitle = dashboardTitle; } public String getDashboardUri() { if(StringUtils.isNotEmpty(this.dashboardUri)){ return this.dashboardUri; }else{ return defaultDashboardUri; } } public void setDashboardUri(String dashboardUri) { this.dashboardUri = dashboardUri; } }
/** * Tests the same object reference for equal hashCode */ @Test public void testHashCodeSameObj() { ModifyDnRequestImpl req = new ModifyDnRequestImpl(); req.setMessageId( 5 ); assertTrue( req.hashCode() == req.hashCode() ); }
Correction appended The estranged boyfriend of a 22-year-old woman shot multiple times Saturday in Southeast Portland was found dead of an apparent self-inflicted gunshot wound, police say. The man's body was discovered at 7:45 a.m. behind a fence near Southeast 134th Drive and Holgate Boulevard, police said, a little more than an hour after the woman was shot outside a nearby home in the Gilbert-Powellhurst neighborhood. A witness identified the woman's 24-year-old estranged boyfriend as the shooter, said Sgt. Pete Simpson, a Portland police spokesman. Officers recovered a handgun near the man's body, Simpson said. Neither person was identified. The woman was taken to a Portland hospital with "traumatic injuries." An update on her condition was unavailable Saturday afternoon. -- Betsy Hammond Correction: The timing between when the man's body was found and the injured woman was found was incorrect in an earlier version of this report due to an editing error.
/** * This task converts MathML files to images. * * @version $Revision$ */ public class MathMLConverter extends MatchingTask { private static final String CURRENT_DIR = "./"; private static final char EXTENSION_SEP = '.'; /** * */ private File mdestDir; private File mbaseDir; private File minFile; private File moutFile; private String moutType = "image/png"; private boolean mforce; private final MutableLayoutContext context; private final FileUtils fileUtils; /** * Creates a new MathMLConverter Task. */ public MathMLConverter() { this.context = new LayoutContextImpl(LayoutContextImpl .getDefaultLayoutContext()); this.fileUtils = FileUtils.getFileUtils(); } /** * Executes the task. * */ @Override public void execute() { DirectoryScanner scanner; String[] list; String[] dirs; if (this.mbaseDir == null) { this.mbaseDir = this.getProject().resolveFile( MathMLConverter.CURRENT_DIR); this.log("Base is not sets, sets to " + this.mbaseDir, Project.MSG_WARN); } // if we have an in file and out then process them if ((this.minFile != null) && (this.moutFile != null)) { this.log("Transforming file: " + this.minFile + " --> " + this.moutFile, Project.MSG_VERBOSE); try { Converter.getInstance().convert(this.minFile, this.moutFile, this.moutType, this.context); } catch (final IOException io) { throw new BuildException(io); } return; } /* * if we get here, in and out have not been specified, we are in batch * processing mode. */ // -- make sure Source directory exists... if (this.mdestDir == null) { throw new BuildException("m_destDir attributes must be set!"); } scanner = this.getDirectoryScanner(this.mbaseDir); this.log("Transforming into " + this.mdestDir, Project.MSG_INFO); // Process all the files marked for styling list = scanner.getIncludedFiles(); this.log("Included files: " + Arrays.toString(list), Project.MSG_VERBOSE); this.process(this.mbaseDir, Arrays.asList(list), this.mdestDir); // Process all the directories marked for styling dirs = scanner.getIncludedDirectories(); this.log("Included directories: " + Arrays.toString(dirs), Project.MSG_VERBOSE); for (final String dir : dirs) { list = this.fileUtils.resolveFile(this.mbaseDir, dir).list(); this.process(this.mbaseDir, Arrays.asList(list), this.mdestDir); } } /** * Sets support for anti alias (default is <i>true</i>). * * @param antiAlias * Flag for support anti alias. */ public void setAntiAlias(final boolean antiAlias) { this.setOption(Parameter.ANTIALIAS, antiAlias); } /** * Sets minimal size for turn on anti alias (default is <i>10.0</i>). * * @param antiAliasMinSize * Minimal size in float number. */ public void setAntiAliasMinSize(final float antiAliasMinSize) { this.setOption(Parameter.ANTIALIAS_MINSIZE, antiAliasMinSize); } /** * Sets background color. * * @param color * String representation of color. */ public void setBackgroundColor(final String color) { if (this.isNullOrEmpty(color)) { this.log("Attribute \"backgroundcolor\" is empty, not used", Project.MSG_WARN); } else { this.setOption(Parameter.MATHBACKGROUND, color); } } /** * Sets support for debug (default is <i>false</i>). * * @param debug * Flag for support debug. */ public void setDebug(final boolean debug) { this.setOption(Parameter.DEBUG, debug); } /** * Sets display style (default is <i>BLOCK</i>. * * @param display * String value of display style. * * @see net.sourceforge.jeuclid.context.Display */ public void setDisplay(final String display) { this.setOption(Parameter.DISPLAY, display); } /** * Sets list of supported font families for <i>Double-Struck</i>. * * @param fonts * List separated by comma. */ public void setFontsDoublestruck(final String fonts) { this.setOption(Parameter.FONTS_DOUBLESTRUCK, fonts); } /** * Sets list of supported font families for <i>Fraktur</i>. * * @param fonts * List separated by comma. */ public void setFontsFraktur(final String fonts) { this.setOption(Parameter.FONTS_FRAKTUR, fonts); } /** * Sets font size of text. * * @param fontSize * Font size as float value. */ public void setFontSize(final float fontSize) { this.setOption(Parameter.MATHSIZE, fontSize); } /** * Sets list of supported font families for <i>Monospaced</i>. * * @param fonts * List separated by comma. */ public void setFontsMonospaced(final String fonts) { this.setOption(Parameter.FONTS_MONOSPACED, fonts); } /** * Sets list of supported font families for <i>Sans-Serif</i>. * * @param fonts * List separated by comma. */ public void setFontsSansSerif(final String fonts) { this.setOption(Parameter.FONTS_SANSSERIF, fonts); } /** * Sets list of supported font families for <i>Script</i>. * * @param fonts * List separated by comma. */ public void setFontsScript(final String fonts) { this.setOption(Parameter.FONTS_SCRIPT, fonts); } /** * Sets list of supported font families for <i>Serif</i>. * * @param fonts * List separated by comma. */ public void setFontsSerif(final String fonts) { this.setOption(Parameter.FONTS_SERIF, fonts); } /** * Sets foreground color. * * @param color * String representation of color. */ public void setForegroundColor(final String color) { if (this.isNullOrEmpty(color)) { this .log( "Attribute \"foregroundcolor\" is empty, use default color", Project.MSG_WARN); } else { this.setOption(Parameter.MATHCOLOR, color); } } /** * Sets &lt;mfrac&gt; keep scriptlevel. * * @param keepScriptLevel * if true, element will NEVER increase children's scriptlevel * (in violation of the spec). */ public void setMfracKeepScriptLevel(final boolean keepScriptLevel) { this.setOption(Parameter.MFRAC_KEEP_SCRIPTLEVEL, keepScriptLevel); } /** * Sets scripts level (default is <i>0</i>). * * @param level * Script level. */ public void setScriptLevel(final int level) { this.setOption(Parameter.SCRIPTLEVEL, level); } /** * Sets minimal size of smallest font size (default is <i>8.0</i>). * * @param minSize * Size of font. */ public void setScriptMinSize(final float minSize) { this.setOption(Parameter.SCRIPTMINSIZE, minSize); } /** * Sets size of multiplier (default is <i>0.71</i>). * * @param multSize * Size of multiplier. */ public void setScriptSizeMult(final float multSize) { this.setOption(Parameter.SCRIPTSIZEMULTIPLIER, multSize); } /** * Set whether to check dependencies, or always generate. * * @param force * True, if the task should always generate the images. */ public void setForce(final boolean force) { this.logProperty("force", force); this.mforce = force; } /** * Set the base directory. * * @param dir * Base directory */ public void setBasedir(final File dir) { this.logProperty("basedir", dir); this.mbaseDir = dir; } /** * Set the destination directory into which the result files should be * copied to. * * @param dir * Destination directory */ public void setDestdir(final File dir) { this.logProperty("destdir", dir); this.mdestDir = dir; } /** * Sets an out file. * * @param outFile * Output file */ public void setOut(final File outFile) { this.logProperty("out", outFile); this.moutFile = outFile; } /** * Sets an input xml file to be converted. * * @param inFile * Input file */ public void setIn(final File inFile) { this.logProperty("in", inFile); this.minFile = inFile; } /** * Sets output file mimetype. * * @param mimetype * mimetype for output file. */ public void setType(final String mimetype) { this.logProperty("type", mimetype); this.moutType = mimetype; } /** * Processes the given input XML file and stores the result in the given * resultFile. * * @param baseDir * Base directory * @param xmlFiles * Source file * @param destDir * Destination directory */ private void process(final File baseDir, final List<String> xmlFiles, final File destDir) { for (final String xmlFile : xmlFiles) { File outFile = null; File inFile = null; final String suffix = MathMLConverter.EXTENSION_SEP + ConverterRegistry.getInstance().getSuffixForMimeType( this.moutType); this.log("Found extension: " + suffix, Project.MSG_DEBUG); try { inFile = this.fileUtils.resolveFile(baseDir, xmlFile); final int dotPos = xmlFile .lastIndexOf(MathMLConverter.EXTENSION_SEP); if (dotPos > 0) { outFile = this.fileUtils.resolveFile(destDir, xmlFile .substring(0, dotPos) + suffix); } else { outFile = this.fileUtils.resolveFile(destDir, xmlFile + suffix); } this.log("Input file: " + inFile, Project.MSG_DEBUG); this.log("Output file: " + outFile, Project.MSG_DEBUG); if (this.mforce || !this.fileUtils.isUpToDate(inFile, outFile)) { this.fileUtils.createNewFile(outFile, true); Converter.getInstance().convert(inFile, outFile, this.moutType, this.context); } } catch (final IOException ex) { // If failed to process document, must delete target document, // or it will not attempt to process it the second time this.log("Failed to process " + inFile, Project.MSG_ERR); FileUtils.delete(outFile); throw new BuildException(ex); } } } /** * Convert string value of parameter and sets to current context. * * @param param * Type of parameter. * @param value * String value of parameter. */ private void setOption(final Parameter param, final String value) { this.setOption(param, param.fromString(value)); } /** * Sets parameter for current context. * * @param param * Type of parameter. * @param value * Object with value of parameter. */ private void setOption(final Parameter param, final Object value) { this.logProperty(param.getOptionName(), value); this.context.setParameter(param, value); } /** * Tests if string is null or is empty. * * @param s * Tested string. * * @return <code>true</code> if string is null or empty else * <code>false</code>. */ private boolean isNullOrEmpty(final String s) { // TODO: use .isEmpty() when JEuclid moves to 1.6 return s == null || s.length() == 0; } /** * Logs property, which is sets. * * @param param * Parameter name. * @param value * Parameter value. */ private void logProperty(final String param, final Object value) { this.log("Sets property \"" + param + "\" with value: " + value, Project.MSG_DEBUG); } }
Rails provides a very convenient way of sending forms via AJAX using the Rails UJS helpers e.g. < %= form_for @articles, remote: true do |f| %> ... < % end %> Then in your controller you can process that form and respond with javascript: // app/views/articles/create.js.erb $ ( "<%= escape_javascript(render @article) %>" ). appendTo ( "#article" ); This makes for an impressive demo of the capabilities of Rails, but there are problems with it: Hooking into the lifecycle is awkward At some point you might want to do some client side validation or something else before sending the form. To do this you will need to hook into the lifecycle by doing something like: $ ( '#new_account_form' ). on ( 'ajax:before' , function () { // do some validation // return false if not valid }); So now you have some JavaScript in the front-end and some in the server. This is of course harder to maintain than having the JavaScript in only one place. They end up doing too much Once you have some JavaScript returned by the server manipulating part of the page, it is just too easy to start doing too much in this response, for example start modifying elements outside the scope of the original element. // app/views/articles/create.js.erb $ ( "<%= escape_javascript(render @article) %>" ). appendTo ( "#article" ); $ ( "#header .article-count" ). html ( <%= Article . size %> ); $ ( "#footer .info" ). html ( "Last updated by" ); In this example the returned JavaScript is doing way too much. Of course you can also do this kind of thing in the front-end, but at least the mess is in one place. What to do I am going to be blunt and say that Rails UJS when combined with server sent JavaScript encourages bad design. They are good for small trivial things, but soon they get out of control. Some alternatives are: Using Rails UJS but without the server sent JavaScript Rails remote is still useful, using it is easier than just plain DIY. You can hook into the lifecycle of remote calls and get html from the server, then insert that HTML yourself without having to send back JavaScript. e.g. $ ( '#new_account_form' ) . on ( 'ajax:before' , function () { // some validation }) . on ( 'ajax:success' , function ( data , status , xhr ) { // insert the data returned into the page $ ( '#articles' ). append ( data ); }); Your server will only return HTML. So what is the improvement? At least your JavaScript code is in one place making it easier to refactor later. Other alternatives Other good alternatives are not to use Rails UJS at all, just plain AJAX or using a JavaScript framework. These can encourage better structured applications, but that is too big of a topic to cover here. Conclusion Rails UJS + server sent JavaScript is great for trivial things, but it doesn't scale very well for medium to large applications. So I believe that completely avoiding JavaScript responses from the server is a good first step for making more robust and maintainable applications (there is a lot more to it than that, of course). Your server should only respond with either HTML or JSON, never JavaScript.
<filename>app/src/main/java/com/merteroglu/trajectory/Model/ReducedResponse.java package com.merteroglu.trajectory.Model; import java.util.ArrayList; import java.util.List; /** * Created by Mert on 26.02.2018. */ public class ReducedResponse { ArrayList<Coordinate> reducedCoordinates; double reducedRate; long responseTime; public ReducedResponse() { reducedCoordinates = new ArrayList<>(); reducedRate = 0; responseTime = 0; } public ReducedResponse(ArrayList<Coordinate> reducedCoordinates, double reducedRate, long responseTime) { this.reducedCoordinates = reducedCoordinates; this.reducedRate = reducedRate; this.responseTime = responseTime; } public ArrayList<Coordinate> getReducedCoordinates() { return reducedCoordinates; } public void setReducedCoordinates(ArrayList<Coordinate> reducedCoordinates) { this.reducedCoordinates = reducedCoordinates; } public double getReducedRate() { return reducedRate; } public void setReducedRate(double reducedRate) { this.reducedRate = reducedRate; } public long getResponseTime() { return responseTime; } public void setResponseTime(long responseTime) { this.responseTime = responseTime; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ReducedResponse that = (ReducedResponse) o; if (Double.compare(that.reducedRate, reducedRate) != 0) return false; if (Double.compare(that.responseTime, responseTime) != 0) return false; return reducedCoordinates != null ? reducedCoordinates.equals(that.reducedCoordinates) : that.reducedCoordinates == null; } @Override public String toString() { String coordinates = ""; for (Coordinate c : reducedCoordinates) { coordinates += c.toString() + "\n"; } return "ReducedResponse{" + ", reducedRate=" + reducedRate + ", responseTime=" + responseTime + "reducedCoordinates=" + coordinates + '}'; } }
Laura Sanchez shouldn’t be dead. And, statistically, the Bloods gang members connected to the woman’s drive-by slaying shouldn’t have been caught and sent to prison. Her March 18, 2007, murder on the fringes of the 90011 ZIP code was one of hundreds of homicides in an area of South Los Angeles that stretches south from Washington Boulevard to Slauson Avenue and east from South Main Street to Long Beach Boulevard. And although the circumstances of her death are similar to other cases, a federal indictment of one of her killers opened a window on the drug rivalries that dominate life in a housing project near where she was killed. “It’s a war zone here that hasn’t been declared a war zone,” said Adela Barajas, Sanchez’s sister-in-law and a community activist seeking to improve her neighborhood, officially renamed South Los Angeles that she still refers to as South Central Los Angeles. • Full Coverage: Read more from the special project Getting Away With Murder Policed largely by officers assigned to the Los Angeles Police Department’s Newton Division, officials have sought to rebrand the area following the 1992 Rodney King riots. South Central is no longer the preferred designation, but the name change has done little to quell the decades-long turf rivalries over drug distribution networks that have taken dozens of lives. Shootin’ Newton An 18-month investigation by the Los Angeles News Group found that nearly half of all homicides that occurred between Jan. 1, 2000, and Dec. 31, 2010 across Los Angeles County remain unsolved. The investigation also found that detectives from the Newton Division — known to some as “Shootin’ Newton” — have a higher success rate than their counterparts in 77th and Southwest divisions, which are also located in South Los Angeles. About 100,000 Angelenos call the 90011 ZIP code area home. Their neighborhoods are dominated by rental housing units, Protestant storefront churches, corner markets, liquor stores, taco stands and fried chicken restaurants. • Video: A slain mother’s legacy At Jefferson High School, the largest public high school in the area, just 58 percent of students graduated after four years in 2014, less than the average for Los Angeles Unified School District schools, according to district records. About 40 percent of residents live below the poverty line, more than double the rate across L.A. County, according to U.S. Census data. About half of the people residing in 90011 were born outside of the U.S. Laura Sanchez, 34, a mother of four, was shot to death as she returned home from a rehearsal for her daughter Denise’s quinceanera. The shooters were aiming at Sanchez’s 17-year-old son, Joey, court documents show. When Sanchez told her son to duck, she was hit instead. The fatal bullet struck Sanchez in the back then pierced her left lung and heart. Two minutes later, she was dead. Eight years later her family still struggles with the loss. “It’s devastating. It’s a life-changing event,” said her sister-in-law Barajas. “She was the glue of our family events.” Laura Sanchez’s death in the 4500 block of South Long Beach Avenue was the result of a gang feud, court documents show. According to court documents, the Pueblo Bishop Bloods, who claim a neighborhood five blocks south, were in a dispute with Athens Park Surenos that resulted in the wounding of a Pueblo teen. Later that night, seeking revenge, Pueblo gang members headed north after they couldn’t find anyone from the Athens Park gang. Eventually they found Joey Sanchez riding in his mom’s van. Joey was not affiliated with a gang, the records said. Laura Sanchez told her son she thought they were being followed. She had been paranoid about neighborhood violence as her children grew older because her mother had been murdered in a drive-by shooting nine years before, Barajas said. Riding in a three-car caravan, the group of Pueblo members stopped on Long Beach Avenue in front of Sanchez’s home. One of the cars pulled up beside Sanchez’s Astrovan. Shots were fired, and a mother protecting her son was dead. “There’s a lot of fear. A lot of questioning,” Barajas said. “You’re sort of lost, you’re trying to make sense like, Why would they shoot a mother? Why would they shoot her?” Jerem While most detectives say gang-related deaths are among the hardest to solve, only one of LAPD’s homicide bureaus, Hollenbeck Division, provided data on gang-related deaths. Of the 326 homicide cases investigated by the bureau east of downtown L.A., half of the solved cases were gang-related and 70 percent of the unsolved cases were gang related. Jerem Angeles’ killing in Long Beach on the same day Sanchez died is one of those, and eight years later, his killer is still at large. Jessie Angeles does not know who killed his son, a 24-year-old of Filipino descent, but he believes gangs were involved. “I’m just hoping one day I will get a call from Long Beach police and hopefully get some closure,” Jessie Angeles said. Police said Jerem Angeles was shot in the back around 2:45 a.m. in the 2000 block of Lemon Avenue, a residential street. When police arrived on the scene, Jerem Angeles’ friends had taken him to the hospital where he was pronounced dead, police said. “Detectives initially investigating this case exhausted all leads, had no suspect information, and no additional or new information on the case has been received,” Long Beach police spokeswoman Nancy Pratt said in an email. Jessie Angeles said he doesn’t know much about his son’s death. He said his son’s fiancee was with his son when Jerem was shot. Jerem was killed on the night of the couple’s engagement party, but his fiancee did not cooperate with police during the investigation. He said his son was a hard worker, but was involved with gangs despite promises to turn his life around. “He gave me false hope,” Jessie Angeles said. Justice It took three trials for Sanchez’s family to get some answers. Jailhouse recordings, video surveillance and confidential informants led to first-degree murder charges for four Pueblo Bishop Bloods gang members five months after Sanchez was killed. Barajas said they were surprised to learn it was African-American gang members who killed Sanchez. “We lived here all our lives and never had any kind of problems with anyone who lives in the projects or any gang members or anything like that so it was really shocking to find out it was them,” Barajas said. “There was disbelief. We thought, ‘Are you sure?’ “There’s this myth of law enforcement making an arrest to say, ‘OK, we solved this case’.” LAPD detectives matched the vehicles from video surveillance taken at the murder scene to vehicles in the Pueblo del Rio Housing Project belonging to admitted gang members, court documents show. Joey Sanchez and other witnesses also identified the vehicles as those used in the shooting. One Pueblo gang member, who later denied he was interviewed by detectives, said several gang members left a gun at his home and said they had just killed a woman while they were trying to shoot a 38th Street gang member, according to court documents. Eventually juries convicted Jerry “KO” Sorrels, Daymon “D-Dog” Garrett, Roderick A. Jenkins, Jamal “PJ” Payne for killing Laura Sanchez. They are all serving a minimum of 25 years in state prison. In 2010 a federal Racketeer Influenced and Corrupt Organizations Act indictment for murder, robbery and drug trafficking charged a fifth man, Marquis “Baby Uzi” Edwards, with Sanchez’s murder. Edwards, also a Pueblo gang member, pleaded guilty and is serving 40 years in prison in a high-security federal penitentiary in Pennsylvania. U.S. District Court Judge S. James Otero sentenced Edwards to 10 years more than the sentence prosecutors recommended “to protect the community.” Otero said Edwards participated in the murder “to both maintain and enhance his membership in the gang,” according to court transcripts. Culture of pride Newton homicide detectives say they have an advantage over other homicide bureaus in South L.A. because the bureau has a history of detectives staying in the division for most of their careers, said Newton’s incoming detective supervisor Richard Arciniega. Divisions like 77th, Southeast and Southwest, which had a lower solve rate for homicides than Newton between 2000 and 2010, tend to have young detectives and a high turnover rate, he said. Another advantage is that the homicide unit is at the division station, Arciniega added. Southwest and Southeast homicide units are housed at the 77th Division station. Arciniega said he’s been a police officer for 32 years and a detective for 17 years and he’s spent nearly all of his career in Newton. He emulates previous supervisors who’ve spent their careers in Newton. “I was offered spots elsewhere, but this is my home,” he said. Newton Division Capt. Jorge Rodriguez said the detectives take pride in their work and want to make the community better. He said the Newton Division that covers 9 square miles and about 150,000 residents is the poorest of the LAPD divisions in South L.A. “It’s the opportunity to make the community better,” Rodriguez said. “It’s that gratification, that high you chase, when you can say the community is safer.” Arciniega said the key to solving gang-related homicides is the relationships officers establish with gang members. He testified at a death row sentencing hearing for a gang member and said he and the convicted killer grew up together. “He was a young gangster and I was a young gang cop,” Arciniega said he testified. And the man facing death row acknowledged that. Arciniega said he’s missed countless dinners or family events because he got a call that patrol officers brought in someone who had some information on a homicide case he was working. If he waited until the next morning, the opportunity would be gone, he said. Aftermath Survivor Joey Sanchez believes his mother should be alive and he should be dead. “Joey has a lot of guilt. He used to say, ‘Why not me? Why my mother?’” Barajas said. He graduated high school the spring his mother died. He attended college for a year and a half but dropped out because he couldn’t concentrate. He struggles with substance abuse that worsens around the anniversary of his mom’s death. Sanchez’s youngest son Brian, who was 5 years old when his mother died, will text his aunt in the middle of the night asking to go to the cemetery the next morning. Barajas said Brian Sanchez finds relief from the grief through running, which he picked up after a race for survivors of violence. The week Laura Sanchez died, the children wanted to do something to honor their mother. They rallied through the neighborhood, but wanted to continue to do more. Barajas reached out to then District 9 City Councilwoman Jan Perry who helped her bring in resources to revamp Fred Roberts Recreation Center, a neglected park near where Laura Sanchez was killed, with a new building, play equipment, basketball courts and programming. Barajas founded LAURA (Life After Uncivilized Ruthless Acts), a nonprofit foundation for homicide victims that also focuses on civic engagement and activities for youth. She takes about 20 youngsters camping each year and offers peer-to-peer counseling. “She’s giving young people a role model in herself and I think a road map for how to become a community activist,” Perry said. “Out of something really horrible came something positive.”
/** * Add elements from a source collection to a target collection unless they or the source collection itself are null. * * @param target target collection * @param source source collection * @return true if the target collection changed * @throws NullPointerException if the target collection is null */ public static <T> boolean addAllIgnoreNull(final Collection<T> target, final Collection<T> source) { if(target == null) throw new NullPointerException("The target collection must not be null"); if(source == null) return false; boolean changed = false; for(T element : source) changed |= addIgnoreNull(target, element); return changed; }
def _create_tr_ts(n_tr, n_ts): digits = (1, 5, 9) ds = CDataLoaderMNIST().load('training', digits=digits) splitter = CTrainTestSplit(train_size=n_tr, test_size=n_ts, random_state=0) tr, ts = splitter.split(ds) tr.X /= 255 ts.X /= 255 return tr, ts
<reponame>Todorov99/server package models // Device model type Device struct { ID string `json:"id,omitempty" yaml:"id,omitempty"` Name string `json:"name,omitempty" yaml:"name,omitempty"` Description string `json:"description,omitempty" yaml:"description,omitempty"` Sensors []Sensor `json:"sensors,omitempty" yaml:"sensors,omitempty"` }
<gh_stars>1-10 #pragma repy def foo(ip,port,sockobj, ch,mainch): data = sockobj.recv(4096) if data != "Hello": print 'Error: data did not match "Hello"' exitall() sockobj.send("bye") stopcomm(mainch) stopcomm(ch) if callfunc == 'initialize': ip = getmyip() waitforconn(ip,<connport>,foo) sleep(.1) sockobj = openconn(ip,<connport>) # on many systems it will raise an exception here... it also may or may # not raise an exception on different runs. waith = waitforconn(ip,<connport>,foo) sockobj.send("Hello") data = sockobj.recv(4096) if data != "bye": print 'Error: data did not match "bye"' exitall(1) sockobj.close() # Armon: If we do not explicitly stop the second waitforconn, we will never exit # This is because the second waitforconn has a different handle, and so when foo calls stopcomm(), # That is not actually stopping the listener stopcomm(waith)
// Suggestions for an address. Contains lists of suggestions for administrative // area, locality, and dependent locality fields of an address. class AddressSuggestions { public: AddressSuggestions() {} AddressSuggestions(const AddressSuggestions&) = delete; AddressSuggestions& operator=(const AddressSuggestions&) = delete; ~AddressSuggestions() {} void AllRegionsMatchForField(AddressField address_field) { all_regions_match_input_.insert(address_field); } The |address_field| parameter should be either ADMIN_AREA, LOCALITY, or DEPENDENT_LOCALITY. bool AddRegions(AddressField address_field, const std::set<const RegionData*>& regions_match_key, const std::set<const RegionData*>& regions_match_name) { DCHECK(address_field >= ADMIN_AREA); DCHECK(address_field <= DEPENDENT_LOCALITY); AddressField parent_address_field = static_cast<AddressField>(address_field - 1); bool all_parents_match = parent_address_field == COUNTRY || all_regions_match_input_.find(parent_address_field) != all_regions_match_input_.end(); Cannot build |address_field| level suggestions if there are no matches in |parent_address_field| level regions. const RegionsMatchInput* parents = NULL; if (address_field > ADMIN_AREA && !all_parents_match) { parents = &regions_match_input_[parent_address_field]; if (parents->keys.empty() && parents->names.empty()) return false; } RegionsMatchInput* regions = NULL; if (address_field < DEPENDENT_LOCALITY) regions = &regions_match_input_[address_field]; std::vector<Suggestion>* suggestions = &suggestions_[address_field]; bool added_suggestions = false; Iterate over both |regions_match_key| and |regions_match_name| and build Suggestion objects based on the given RegionData objects. Advance either one iterator at a time (if they point to different data) or both iterators at once (if they point to the same data). for (std::set<const RegionData*>::const_iterator key_it = regions_match_key.begin(), name_it = regions_match_name.begin(); key_it != regions_match_key.end() || name_it != regions_match_name.end();) { const RegionData* key_region = key_it != regions_match_key.end() ? *key_it : NULL; const RegionData* name_region = name_it != regions_match_name.end() ? *name_it : NULL; Regions that do not have a parent that also matches input will not become suggestions. bool key_region_has_parent = all_parents_match || (parents && !parents->keys.empty() && key_region && parents->keys.find(&key_region->parent()) != parents->keys.end()); bool name_region_has_parent = all_parents_match || (parents && !parents->names.empty() && name_region && parents->names.find(&name_region->parent()) != parents->names.end()); if (name_region && (!key_region || name_region < key_region)) { if (name_region_has_parent) { suggestions->push_back(Suggestion(name_region, address_field, false)); added_suggestions = true; if (regions) regions->names.insert(name_region); } ++name_it; } else if (key_region && (!name_region || key_region < name_region)) { if (key_region_has_parent) { suggestions->push_back(Suggestion(key_region, address_field, true)); added_suggestions = true; if (regions) regions->keys.insert(key_region); } ++key_it; } else { if (key_region_has_parent) { suggestions->push_back(Suggestion(key_region, address_field, true)); added_suggestions = true; if (regions) { regions->keys.insert(key_region); regions->names.insert(name_region); } } ++key_it; ++name_it; } } return added_suggestions; } Swaps the suggestions for the smallest sub-region into |suggestions|. |this| is not usable after this call due to using the swap() operation. The |suggestions| parameter should not be NULL. void SwapSmallestSubRegionSuggestions(std::vector<Suggestion>* suggestions) { DCHECK(suggestions); for (int i = DEPENDENT_LOCALITY; i >= ADMIN_AREA; --i) { std::vector<Suggestion>* result = &suggestions_[static_cast<AddressField>(i)]; if (!result->empty()) { suggestions->swap(*result); return; } } } private: The sets of non-owned regions used for looking up regions that match user input by keys and names. struct RegionsMatchInput { std::set<const RegionData*> keys; std::set<const RegionData*> names; }; The regions that match user input at ADMIN_AREA and LOCALITY levels. std::map<AddressField, RegionsMatchInput> regions_match_input_; The set of fields for which all regions match user input. Used to avoid storing a long list in |regions_match_input_| and later looking it up there. std::set<AddressField> all_regions_match_input_; Suggestions at ADMIN_AREA, LOCALITY, and DEPENDENT_LOCALITY levels. std::map<AddressField, std::vector<Suggestion> > suggestions_; }
/** Stealing from the entity first positive effect. If entity does not have any, then stealing health. @param stealer Entity which will receive some bonuses. @param target Entity which will lose something. */ protected static void steal( LivingEntity stealer, LivingEntity target ) { Collection< EffectInstance > targetEffects = target.getActivePotionEffects(); if( !( targetEffects.size() > 0 && stealEffect( stealer, target, targetEffects ) ) ) stealHealth( stealer, target ); }
""" ## Function similar to that in matlab ## Author: <NAME>, <NAME> # """ import numpy as np def subspace_min (x, l, u, x_cp, d, G): n = len(x) Z = np.diag(n) fixed = (x_cp <= l+1e-8) or (x_cp >= u-1e8) if all(fixed) is True: x = x_cp return x Z = Z[: , fixed is False] rgc = np.matmul(np.transpose(Z), (d + np.matmul(G, (x_cp-x)))) rB = np.matmul(np.matmul(np.transpose(Z), G), Z) d[fixed is False] = np.linalg.solve(rB, rgc) d[fixed is False] = -d[fixed is False] alpha = 1 temp1 = alpha for i in np.where(fixed is False): dk = d[i] if dk < 0: temp2 = l[i] - x_cp[i] if temp2 >= 0: temp1 = 0 else: if dk*alpha < temp2: temp1 = temp2/dk else: temp2 = u[i] - x_cp[i] if temp1 <= 0: temp1 = 0 else: if dk*alpha > temp2: temp1 = temp2/dk alpha = min(temp1, alpha) x = x_cp + Z*alpha*d[fixed is False] return x
<gh_stars>10-100 from datetime import datetime from django.contrib.gis.geos import Point from django.test import TestCase import pytz from robber import expect from pinboard.serializers.mobile.relevant.document_serializer import DocumentSerializer from data.factories import ( OfficerFactory, AllegationFactory, AllegationCategoryFactory, OfficerAllegationFactory, AttachmentFileFactory, ) from pinboard.factories import PinboardFactory class DocumentSerializerTestCase(TestCase): def test_serialization(self): pinned_officer = OfficerFactory( id=1, rank='Police Officer', first_name='Jerome', last_name='Finnigan', allegation_count=10, trr_percentile='99.99', complaint_percentile='88.88', civilian_allegation_percentile='77.77', internal_allegation_percentile='66.66', ) relevant_allegation = AllegationFactory( crid='1', incident_date=datetime(2002, 2, 21, tzinfo=pytz.utc), most_common_category=AllegationCategoryFactory(category='Operation/Personnel Violations'), point=Point([0.01, 0.02]), ) AttachmentFileFactory( id=1, file_type='document', title='relevant document 1', allegation=relevant_allegation, show=True, preview_image_url="https://assets.documentcloud.org/CRID-1-CR-p1-normal.gif", url='http://cr-1-document.com/', ) pinboard = PinboardFactory( id='66ef1560', title='Test pinboard', description='Test description', ) pinboard.officers.set([pinned_officer]) OfficerAllegationFactory(officer=pinned_officer, allegation=relevant_allegation) expect(pinboard.relevant_documents.count()).to.eq(1) expect(DocumentSerializer(pinboard.relevant_documents.first()).data).to.eq({ 'id': 1, 'preview_image_url': "https://assets.documentcloud.org/CRID-1-CR-p1-normal.gif", 'url': 'http://cr-1-document.com/', 'allegation': { 'crid': '1', 'category': 'Operation/Personnel Violations', 'incident_date': '2002-02-21', 'officers': [{ 'id': 1, 'rank': 'Police Officer', 'full_name': '<NAME>', 'percentile_trr': '99.9900', 'percentile_allegation': '88.8800', 'percentile_allegation_civilian': '77.7700', 'percentile_allegation_internal': '66.6600', }], 'point': { 'lon': 0.01, 'lat': 0.02, }, } })
vector<int> kth_permutation(vector<int> perm, int k) { int64_t factorial = 1LL; int n = (int) perm.size(); for(int64_t num = 2; num < n; ++num) factorial *= num; // (n-1)! k--; // k-th to 0-indexed vector<int> answer; answer.reserve(n); while(true) { answer.push_back(perm[k / factorial]); perm.erase(perm.begin()+(k/factorial)); if((int) perm.size() == 0) break; k %= factorial; factorial /= (int) perm.size(); } return answer; } vector<int> kth_permutation(int n, int k, int start=0) { vector<int> perm(n); iota(perm.begin(), perm.end(), start); return kth_permutation(perm, k); } string kth_perm_string(int n, int k) { assert(1 <= n && n <= 26); vector<int> perm = kth_permutation(n, k); string alpha = ""; for(char i='a'; i <= ('a'+n); ++i) alpha.push_back(i); string answer=""; for(int &idx: perm) answer.push_back(alpha[idx]); return answer; }
<gh_stars>10-100 /* Z88DK Z80 Macro Assembler Copyright (C) <NAME>, InterLogic 1993-99 Copyright (C) <NAME>, 2011-2019 License: The Artistic License 2.0, http://www.perlfoundation.org/artistic_license_2_0 Repository: https://github.com/z88dk/z88dk Handle library file contruction, reading and writing */ #pragma once #include "strutil.h" extern char Z80libhdr[]; /* make library from list of files; convert each source to object file name */ extern void make_library(const char *lib_filename, argv_t *src_files); // check if the given filename exists and is a library file of the correct version extern bool check_library_file(const char *src_filename);
// ==================== // 몬테카를로 트리 탐색 생성 // ==================== // 패키지 임포트 import * as tf from '@tensorflow/tfjs'; import { sum } from 'lodash'; import MNode from './MNode'; import Reversi from './Reversi'; // 파라미터 준비 const PV_EVALUATE_COUNT = 50; // 추론 1회당 시뮬레이션 횟수(오리지널: 1600회) const DN_INPUT_SHAPE = [6, 6, 2]; // 볼츠만 분포 function boltzman(xs: number[], temperature: number) { const _xs = xs.map((x) => x ** (1 / temperature)); return _xs.map((x) => x / sum(_xs)); } // 추론 export async function predict(model: tf.LayersModel, state: Reversi) { // 추론을 위한 입력 데이터 셰이프 변환 const [a, b, c] = DN_INPUT_SHAPE; let x = tf.tensor([state.pieces, state.enemyPieces]); x = x.reshape([c, a, b]).transpose([1, 2, 0]).reshape([1, a, b, c]); // x.shape = [1, 6, 6, 2] // 추론 const [y1, y2] = model.predict(x, { batchSize: 1 }) as tf.Tensor<tf.Rank>[]; x.dispose(); // 정책 얻기 const legalActions = state.legalActions(); const [y1Array] = (await y1.array()) as number[][]; let policies = y1Array.filter((_, i) => legalActions.includes(i)); const [[value]] = (await y2.array()) as number[][]; const sumPolicies = sum(policies); // policies 를 합계 1의 확률 분포로 변환 policies = sumPolicies ? policies.map((p) => p / sumPolicies) : policies; y1.dispose(); y2.dispose(); return { policies, value }; } // 노드 리스트를 시행 횟수 리스트로 변환 export function nodesToScores(nodes: MNode[]) { return nodes.map((node) => node.n); } // 몬테카를로 트리 탐색 스코어 얻기 async function pvMctsScores(model: tf.LayersModel, reversi: Reversi, temperature: number) { // 몬테카를로 트리 탐색 노드 정의 // 현재 국면의 노드 생성 const rootNode = new MNode(reversi, 0); const _ = new Array<number>(PV_EVALUATE_COUNT).fill(0); // 여러 차례 평가 실행 // eslint-disable-next-line no-restricted-syntax for await (const __ of _) { await rootNode.evaluate(model); } // 합법적인 수의 확률 분포 let scores: number[] = nodesToScores(rootNode.childNodes); if (temperature === 0) { // 최대값인 경우에만 1 const action = (await tf.argMax(scores).data())[0]; scores = (await tf.zeros([scores.length]).array()) as number[]; scores[action] = 1; return scores; } // 볼츠만 분포를 기반으로 분산 추가 scores = boltzman(scores, temperature); return scores; } function randomChoice(legalActions: number[], p: number[]) { const n = Math.random(); let s = 0; for (let i = 0; i < legalActions.length; i += 1) { s += p[i]; if (s >= n) { return legalActions[i]; } } return legalActions[legalActions.length - 1]; } // 몬테카를로 트리 탐색을 활용한 행동 선택 function pvMctsAction(model: tf.LayersModel, temperature = 0.0) { async function _pvMctsAction(reversi: Reversi) { const scores = await pvMctsScores(model, reversi, temperature); const choiced = randomChoice(reversi.legalActions(), scores); return choiced; } return _pvMctsAction; } export default pvMctsAction;
Factors affecting the financial structure of Slovak and Czech enterprises in the Manufacture of machinery and equipment industry The financial structure of enterprises is influenced by many factors. The aim of this paper is to analyze, evaluate and assess the effect of selected internal corporate factors and external macro-environment factors on the financial structure of enterprises. All the studied enterprises were operating in the Manufacture of machinery and equipment industry. We have also analyzed whether there were any differences in the action of these factors between enterprises operating in various countries - Slovakia and Czech Republic (i.e. what was the impact of the environment on the corporate financial structure). The analysis has been done for the years 2009 - 2014. In addition to the standard scientific methods, the methods of multiple regression and correlation analysis have been used as well. The research results pointed to the fact that although the influence of some macro-environment factors has been proved, that were the internal corporate factors which had dominant position in shaping the financial structure of Slovak and Czech enterprises (concerning the Manufacture of machinery and equipment industry).© 2016 The Authors. Published by Elsevier B.V. Peer-review under responsibility of Academic World Research and Education Center. Keywords: Financial structure; internal corporate factors; external factors of macroenvironment.
Yesterday’s article on the excommunication of papal critic Professor José Galat, formerly the rector of La Gran Colombia University and founder of Spanish language TV station Teleamiga, has stirred up quite a firestorm in the comment box. And not without reason. Galat is not, however, just a papal critic. As reported by Maike Hickson, he takes his criticisms to the point of unfounded conclusions: Galat himself recently made statements on his own television show, where, citing the “Sankt Gallen Mafia,” of whom Belgian cardinal Godfried Danneels is among the most famous members, he claimed that Pope Francis was unlawfully elected. He also claimed that Pope Francis is distorting many aspects of the Catholic Church’s fundamental teaching. [emphasis added] Of the existence of the so-called “Sankt Gallen Mafia,” which is said to have colluded to elect Jorge Bergoglio pope, there is no real question. By the admission of some of their own members, the group existed. Quite a good deal is known about their operations. Other details have come to light about international pressure against Pope Benedict XVI to resign. Questions have also come up about possible canonical irregularities in the election of Pope Francis. Of the assertion that Francis is distorting the teaching of the Church, there can also be no question. From significant segments of Amoris Laetitia to certain assertions in Evangelii Gaudium to his many, many less-well-known statements that appear to run contrary to what the Church has perennially taught, this pope has done great damage to the faithful in their ability to comprehend and accept authentic Catholic teaching. Whatever one thinks of all these things, it seems to me that within reasonable parameters, there is room for a measure of skepticism. Skepticism about the abdication, skepticism about the election, skepticism about whether material heresy may have crossed the line into formal heresy, and so on. It seems that no honest Catholic today feels sure about very much except that we’re dealing with a crisis in the papacy of unprecedented proportions. But skepticism, difficulties, doubts, and questions are just that. They do not rise to the level of certitude. They do not give to any of us the right to make formal declarations of fact when we don’t even have all of the information needed to make a determination, let alone the authority to do so. These things, by their nature, give rise to uncertanties, not the other way around. And we should give these uncertanties to God in prayer, asking Him to guide us and to aid and restore His Church. In other words: it’s a big mess, but fixing it is above our pay grade. We each have our tasks. Let’s leave the big problems to the big players. By way of analogy, one of the things we (rightly) push back against in the midst of the ecclesiastical assault on Holy Matrimony is the idea that via the “internal forum” a couple can determine that their marriage was never valid, even if a tribunal has not been involved, or has reached a decision upholding the union. But how is this any different than those of us who run around telling ourselves and anyone who will listen that Francis isn’t just a lousy pope, he’s an antipope? Do we really think we can defend the Church’s juridical authority in the former case and totally ignore it in the latter? Further — and I think this is what really lies at the heart of the matter — do we have so little trust that God is guiding His Church that we think we have to jump in and do it for Him? Are we, finding ourselves int he midst of this storm, reacting like the apostles before us? Do we wish to prod Him from His (apparent) sleep, crying out, “Master, does it not concern thee that we perish?” Have we forgotten Our Lord’s reaction to such squeamishness? And rising up, he rebuked the wind, and said to the sea: Peace, be still. And the wind ceased: and there was made a great calm. And he said to them: Why are you fearful? have you not faith yet? And they feared exceedingly: and they said one to another: Who is this (thinkest thou) that both wind and sea obey him? (Mark 4:39-40) We have a Church that is both human and divine. It has been guaranteed the guidance of the Holy Spirit to keep the faithful from being bound to error, but we were never promised the impeccability of the men who would lead it. St. Paul warned the bishops of precisely the problem we face at this very moment: Take heed to yourselves, and to the whole flock, wherein the Holy Ghost hath placed you bishops, to rule the church of God, which he hath purchased with his own blood. I know that, after my departure, ravening wolves will enter in among you, not sparing the flock. And of your own selves shall arise men speaking perverse things, to draw away disciples after them. (Acts 20:28-31) And in the Scriptures, we were even given, under divine inspiration, an example of an errant pope: But when Cephas was come to Antioch, I withstood him to the face, because he was to be blamed. – Galatians 2:11 In his commentary on Galatians 2, St. Thomas Aquinas explained the nature of Peter’s error, and thus, Paul’s rebuke: Apropos of what is said in a certain Gloss, namely, that I withstood him as an adversary, the answer is that the Apostle opposed Peter in the exercise of authority, not in his authority of ruling. St. Thomas goes on: The occasion of the rebuke was not slight, but just and useful, namely, the danger to the Gospel teaching. Hence he says: Thus was Peter reprehensible, but I alone, when I saw that they, who were doing these things, walked not uprightly unto the truth of the gospel, because its truth was being undone, if the Gentiles were compelled to observe the legal justifications, as will be plain below. That, they were not walking uprightly is so, because in cases where danger is imminent, the truth must be preached openly and the opposite never condoned through fear of scandalizing others: “That which I tell you in the dark, speak ye in the light” (Mt 10:27); “The way of the just is right: the path of the just is right to walk in” (Is 26:7). The manner of the rebuke was fitting, i.e., public and plain. Hence he says, I said to Cephas, i.e., to Peter, before them all, because that dissimulation posed a danger to all: “Them that sin, reprove before all” (1 Tim 5:20). This is to be understood of public sins and not of private ones, in which the procedures of fraternal charity ought to be observed. Note well the distinction made here: a differentiation between Peter’s exercise of authority and his authority of ruling. The former is subject to rebuke, even on a matter that was “a danger to the Gospel teaching”; the latter, however, is not. Peter was still the pope, even though he was leading the faithful astray, and his authority from Christ was unquestioned by Paul. It would be much easier for us to deal with the multiple “dangers to Gospel teaching” presented by Francis if he were not a legitimate pope. But we ourselves have no authority of ruling, and no one may judge a pope. Whatever suspicions we may have, we have been told by the Church that Benedict XVI resigned and we have been told that Francis was elected by the conclave. There is no rival claimant to the Petrine Throne. The Universal Church has accepted Francis as pope. He is, whether we like it or not, the man we must accept as the Roman Pontiff. He holds the keys of St. Peter. If, like the marriage tribunal I used in the example above, he is determined at some point to have in some way nullified his office, then we may rest assured that we will come to know it after the fact. But we do not know it today. This is a cross. There is no question. It is a heavy one, and for some, it has scandalized them to the point of losing their faith. This is certainly a tragedy, and it is one that Pope Francis will have to answer for. No matter how much he causes us to grind our teeth, we should be praying for him, because to stand accountable before the Lord for using the highest office in the Church to confuse and scatter the flock is…well, about the most terrifying thing you can imagine. Speculation on these matters might feel cathartic, but it helps nothing. It does not remove him from office. It does not change what is being done. And for some who are already struggling with their faith, or why they’ve converted, running into endless debates on who is pope and who isn’t and why and why not just compounds the confusion they already feel about the chaos in the Church. It has the potential to lead people astray, or to cause them to give up completely. This is why we have the comment policy we do, and why we enforce it even when it sometimes seems a bit heavy handed. I’m not looking forward to standing before God and having to answer for why I let reckless and idle speculation run wild here. We’re careful in the stories we report to give you the information we have about the problems that exist, but not to draw conclusions that we have no right to come to. We ask for that same prudence to be extended to your discussion of these articles. The selective application of ecclesiastical penalties against Professor Galat when so many dissenters are empowered or promoted to positions of influence in the Church is surely an injustice. On the other hand, if Galat could have just refrained from arrogating to himself the authority to say with certainty what we cannot know with certainty, he might never have wound up in trouble in the first place. Whatever happens with Galat — and we should hope and pray that he receives justice, not the jackboot — it’s a lesson for all of us. One we’d do well not to forget.