content
stringlengths 10
4.9M
|
---|
// Computes the cross product of two vectors
// out has to different from v1 and v2 (no in-place)!
void Vector_Cross_Product(float out[3], const float v1[3], const float v2[3])
{
out[0] = (v1[1] * v2[2]) - (v1[2] * v2[1]);
out[1] = (v1[2] * v2[0]) - (v1[0] * v2[2]);
out[2] = (v1[0] * v2[1]) - (v1[1] * v2[0]);
} |
<filename>tests/test_rtcrtptransceiver.py
from unittest import TestCase
from aiortc.rtcrtpparameters import RTCRtpCodecCapability
from aiortc.rtcrtptransceiver import RTCRtpTransceiver
class RTCRtpTransceiverTest(TestCase):
def test_codec_preferences(self):
transceiver = RTCRtpTransceiver("audio", None, None)
self.assertEqual(transceiver._preferred_codecs, [])
# set empty preferences
transceiver.setCodecPreferences([])
self.assertEqual(transceiver._preferred_codecs, [])
# set single codec
transceiver.setCodecPreferences(
[RTCRtpCodecCapability(mimeType="audio/PCMU", clockRate=8000, channels=1)]
)
self.assertEqual(
transceiver._preferred_codecs,
[RTCRtpCodecCapability(mimeType="audio/PCMU", clockRate=8000, channels=1)],
)
# set single codec (duplicated)
transceiver.setCodecPreferences(
[
RTCRtpCodecCapability(
mimeType="audio/PCMU", clockRate=8000, channels=1
),
RTCRtpCodecCapability(
mimeType="audio/PCMU", clockRate=8000, channels=1
),
]
)
self.assertEqual(
transceiver._preferred_codecs,
[RTCRtpCodecCapability(mimeType="audio/PCMU", clockRate=8000, channels=1)],
)
# set single codec (invalid)
with self.assertRaises(ValueError) as cm:
transceiver.setCodecPreferences(
[
RTCRtpCodecCapability(
mimeType="audio/bogus", clockRate=8000, channels=1
)
]
)
self.assertEqual(str(cm.exception), "Codec is not in capabilities")
|
package zmqchan
// TODO: Ensure Close() properly drains the TX channel
import (
"errors"
"fmt"
zmq "github.com/pebbe/zmq4"
"log"
"sync"
"sync/atomic"
)
var uniqueIndex uint64
type ZmqChanSocket struct {
Context *zmq.Context
zSock *zmq.Socket
zTxIn *zmq.Socket
zTxOut *zmq.Socket
zControlIn *zmq.Socket
zControlOut *zmq.Socket
wg sync.WaitGroup
ChanSocketImpl
}
// func barrier()
//
// Magic voodoo to provide a 'complete memory barrier' as seemingly required
// to pass zmq sockets between threads
func barrier() {
var mutex sync.Mutex
mutex.Lock()
mutex.Unlock()
}
// func getUniqueId() uint64
//
// returns a unique ID
func getUniqueId() uint64 {
return atomic.AddUint64(&uniqueIndex, 1)
}
// func (s *ZmqChanSocket) runChannels()
//
// this function
// 1. Reads the TX channel, and places the output in the zTxIn pipe pair
// 2. Reads the Control channel, and places the output in the zControlIn pipe pair
//
func (s *ZmqChanSocket) runChannels() {
defer func() {
s.zTxIn.Close()
s.zControlIn.Close()
log.Println("RUNC: quitting")
s.wg.Done()
}()
for {
select {
case msg, ok := <-s.TxChan:
if !ok {
// it's closed - this should never happen
log.Println("RUNC: read from Tx Chan ERROR")
s.errors <- errors.New("ZMQ tx channel unexpectedly closed")
// it's closed - this should not ever happen
return
} else {
log.Println("RUNC: read from Tx Chan")
if _, err := s.zTxIn.SendMessage(msg); err != nil {
s.errors <- err
return
}
log.Println("RUNC: write to Tx Pair socket")
}
case control, ok := <-s.control:
if !ok {
log.Println("RUNC: read from Control Chan ERROR")
s.errors <- errors.New("ZMQ control channel unexpectedly closed")
// it's closed - this should not ever happen
return
} else {
log.Printf("RUNC: read from Control Chan %v", control)
// If it's come externally, send a control message; ignore errors
if control {
log.Println("RUNC: zControl sendmsg start")
s.zControlIn.SendMessage("")
log.Println("RUNC: zControl sendmsg done")
}
return
}
}
}
}
// func (s *zmqChanSocket) runSockets()
//
// this function
// 1. Reads the main socket, and place the output into the rx channel
// 2. Reads the zTxOut pipe pair and ...
// 3. Puts the output into the main socket
// 4. Reads the zControlOut pipe pair
func (s *ZmqChanSocket) runSockets() {
defer func() {
s.zTxOut.Close()
s.zControlOut.Close()
s.zSock.Close()
s.wg.Done()
}()
var toXmit [][]byte = nil
poller := zmq.NewPoller()
idxSock := poller.Add(s.zSock, 0)
idxTxOut := poller.Add(s.zTxOut, 0)
idxControlOut := poller.Add(s.zControlOut, zmq.POLLIN)
for {
zSockflags := zmq.POLLIN
var txsockflags zmq.State = 0
// only if we have something to transmit are we interested in polling for output availability
// else we just poll the input socket
if toXmit == nil {
txsockflags |= zmq.POLLIN
} else {
zSockflags |= zmq.POLLOUT
}
poller.Update(idxSock, zSockflags)
poller.Update(idxTxOut, txsockflags)
if sockets, err := poller.PollAll(-1); err != nil {
s.errors <- err
s.control <- false
return
} else {
if sockets[idxSock].Events&zmq.POLLIN != 0 {
// we have received something on the main socket
// we need to send it to the RX channel
log.Println("RUNS: Read start from main socket")
if parts, err := s.zSock.RecvMessageBytes(0); err != nil {
log.Println("RUNS: Read ERROR from main socket")
s.errors <- err
s.control <- false
return
} else {
s.RxChan <- parts
}
log.Println("RUNS: Read done from main socket")
}
if sockets[idxSock].Events&zmq.POLLOUT != 0 && toXmit != nil {
// we are ready to send something on the main socket
log.Println("RUNS: Write start to main socket")
if _, err := s.zSock.SendMessage(toXmit); err != nil {
log.Println("RUNS: Write ERROR to main socket")
s.errors <- err
s.control <- false
return
} else {
toXmit = nil
}
log.Println("RUNS: Write done to main socket")
}
if sockets[idxTxOut].Events&zmq.POLLIN != 0 && toXmit == nil {
// we have something on the input socket, put it in xmit
var err error
log.Println("RUNS: Read start from tx socket")
toXmit, err = s.zTxOut.RecvMessageBytes(0)
if err != nil {
log.Println("RUNS: Read ERROR from tx socket")
s.errors <- err
s.control <- false
return
}
log.Println("RUNS: Read done from tx socket")
}
if sockets[idxControlOut].Events&zmq.POLLIN != 0 {
// something has arrived on the control channel
// ignore errors
log.Println("RUNS: Read start from control socket")
_, _ = s.zControlOut.RecvMessageBytes(0)
log.Println("RUNS: Read done from control socket")
// no need to signal the other end as we know it is already exiting
// what we need to do is ensure any transmitted stuff is sent.
// this is more tricky than you might think. The data could be
// in ToXmit, in the TX socket pair, or in the TX channel.
// block in these cases for as long as the linger value
// FIXME: Ideally we'd block in TOTAL for the linger time,
// rather than on each send for the linger time.
if linger, err := s.zSock.GetLinger(); err == nil {
s.zSock.SetSndtimeo(linger)
}
if toXmit != nil {
log.Println("RUNS: Write of toXmit start")
if _, err := s.zSock.SendMessage(toXmit); err != nil {
s.errors <- err
return
}
log.Println("RUNS: Write of toXmit end")
} else {
toXmit = nil
}
poller.Update(idxControlOut, 0)
poller.Update(idxSock, 0)
poller.Update(idxTxOut, zmq.POLLIN)
for {
if sockets, err := poller.PollAll(0); err != nil {
s.errors <- err
return
} else if sockets[idxTxOut].Events&zmq.POLLIN != 0 && toXmit == nil {
// we have something on the input socket, put it in xmit
var err error
log.Println("RUNS: Read drain start from tx socket")
toXmit, err = s.zTxOut.RecvMessageBytes(0)
if err != nil {
log.Println("RUNS: Read drain ERROR from tx socket")
s.errors <- err
return
}
log.Println("RUNS: Read drain done from tx socket")
log.Println("RUNS: Write of drained data start")
if _, err := s.zSock.SendMessage(toXmit); err != nil {
log.Println("RUNS: Write of drained data error")
s.errors <- err
return
}
log.Println("RUNS: Write of drained data done")
} else {
break
}
}
// now read the TX channel until it is empty
log.Println("RUNS: Emptying the TX channel start")
done := false
for !done {
select {
case msg, ok := <-s.TxChan:
if ok {
log.Println("RUNS: Flush write start")
if _, err := s.zSock.SendMessage(msg); err != nil {
log.Println("RUNS: Flush write error")
s.errors <- err
return
}
log.Println("RUNS: Flush write end")
} else {
log.Println("RUNS: Flush write no channel")
s.errors <- errors.New("ZMQ tx channel unexpectedly closed")
return
}
default:
log.Println("RUNS: Flush write default")
done = true
}
}
log.Println("RUNS: Emptying the TX channel end")
return
}
}
}
}
// func (s *ZmqChanSocket) Close() error
//
// close a ZmqChanSocket. This will kill the internal goroutines, and close
// the main ZMQ Socket. It will also close the error channel, so a select() on
// it will return 'ok' as false. If an error is produced either during the close
// or has been produced prior to the close, it will be returned.
func (s *ZmqChanSocket) Close() error {
log.Println("CLOS: writing to control")
s.control <- true
log.Println("CLOS: waiting for WG")
s.wg.Wait()
var err error = nil
select {
case err = <-s.errors:
default:
}
s.deinitImpl()
return err
}
// func (s *ZmqChanSocket) GetTxChan() chan<- [][]byte
//
// get the TxChannel as a write only channel
func (s *ZmqChanSocket) GetTxChan() chan<- [][]byte {
return s.TxChan
}
// func (s *ZmqChanSocket) GetRxChan() chan<- [][]byte
//
// get the RxChannel as a read only channel
func (s *ZmqChanSocket) GetRxChan() <-chan [][]byte {
return s.RxChan
}
// func newPair(c *zmq.Context) (a *zmq.Socket, b *zmq.Socket, err error)
//
// Create a new socket pair
func newPair(c *zmq.Context) (a *zmq.Socket, b *zmq.Socket, err error) {
addr := fmt.Sprintf("inproc://_zmqchansocket_internal-%d", getUniqueId())
if a, err = c.NewSocket(zmq.PAIR); err != nil {
goto Error
}
if err = a.Bind(addr); err != nil {
goto Error
}
if b, err = c.NewSocket(zmq.PAIR); err != nil {
goto Error
}
if err = b.Connect(addr); err != nil {
goto Error
}
return
Error:
if a != nil {
a.Close()
}
if b != nil {
b.Close()
}
return
}
// func NewZmqChanSocket(zmqContext *zmq.Context, zSock *zmq.Socket, txbuf int, rxbuf in) (*ZmqChanSocket, error)
//
// Produce a new ZmqChanSocket. Pass a contact and a zmq.Socket, plus the buffering parameters for the channels.
//
// If this call succeeds (err == nil), then a ZmqChanSocket is returned, and control of your zmq.Socket is passed
// irrevocably to this routine. You should forget you ever had the socket. Do not attempt to use it in any way,
// as its manipulation is now the responsibility of goroutines launched by this routine. Closing the ZmqChanSocket
// will also close your zmq.Socket.
//
// If this routine errors, it is the caller's responsibility to close the zmq.Socket
//
func NewZmqChanSocket(zSock *zmq.Socket, txbuf int, rxbuf int) (ChanSocket, error) {
s := &ZmqChanSocket{
zSock: zSock,
}
zmqContext, err := zSock.Context()
if err != nil {
return nil, err
}
if s.zControlIn, s.zControlOut, err = newPair(zmqContext); err != nil {
return nil, err
}
if s.zTxIn, s.zTxOut, err = newPair(zmqContext); err != nil {
s.zControlIn.Close()
s.zControlOut.Close()
return nil, err
}
// as we should never read or send to these sockets unless they are ready
// we set the timeout to 0 so a write or read in any other circumstance
// returns a immediate error
s.zSock.SetRcvtimeo(0)
s.zSock.SetSndtimeo(0)
s.zTxIn.SetRcvtimeo(0)
s.zTxIn.SetSndtimeo(0)
s.zTxOut.SetRcvtimeo(0)
s.zTxOut.SetSndtimeo(0)
s.zControlIn.SetRcvtimeo(0)
s.zControlIn.SetSndtimeo(0)
s.zControlOut.SetRcvtimeo(0)
s.zControlOut.SetSndtimeo(0)
s.initImpl(txbuf, rxbuf)
barrier()
s.wg.Add(2)
go s.runSockets()
go s.runChannels()
return s, nil
}
|
Image caption Matthew Tvrdon has been detained indefinitely
A van driver who killed a mother-of-three and injured 17 other people in a series of hit-and-runs around Cardiff has been detained indefinitely under the Mental Health Act.
Matthew Tvrdon, 32, drove on an eight-mile "journey of mayhem" in 30 minutes last October, killing Karina Menzies.
The judge called it horrific and said Tvrdon used his van as a weapon.
Tvrdon, who has paranoid schizophrenia, admitted manslaughter on the grounds of diminished responsibility.
He also admitted seven counts of attempted murder and other charges including three counts of grievous bodily harm with intent.
Without your serious mental illness I am satisfied that you would not have behaved as you did Mr Justice Wyn Williams
Sentencing on Thursday, Mr Justice Wyn Williams said of 31-year-old Ms Menzies' death: "You ran over her quite deliberately while she and her two children were walking outside Ely fire station.
"There is compelling evidence that you ran over her not once but twice. She died from multiple injuries caused by your deliberate actions."
The judge went on to describe all the incidents of that October afternoon calling them "horrific and deliberate".
He said he was prepared to accept that Tvrdon's mental illness "provides the explanation" for his actions.
The judge also said Tvrdon was advised to reduce his medication in 2011 and he did not apportion blame to the defendant for that.
He later said he was prepared to accept the doctor's advice that Tvrdon's culpability for his actions of that day was low because of his illness.
"Without your serious mental illness I am satisfied that you would not have behaved as you did," the judge said.
But he added: "I will make an order that you should be detained at Ashworth maximum security hospital without restriction of time.
"You will be released from that institution only if a mental health tribunal considers it appropriate that you should be released.
"Given the nature and severity of your illness and the harm which you caused when in the grip of that illness you should expect that you will be detained in hospital for a very long period of time."
Tvrdon was told he would be released from Ashworth maximum security hospital in Merseyside only if a mental health tribunal was satisfied he should be released. He was also given a 25-year driving ban.
After the hearing, Ms Menzies' sister Samantha said: "Nothing will be good enough. At least he's away for a long time. I hope he never gets out. It's not really justice in my eyes."
Media playback is unsupported on your device Media caption Matthew Tvrdon's "journey of mayhem" through the west of Cardiff was captured by CCTV cameras
The court heard on the first day of sentencing he knocked people over like "skittles" as he targeted adults and children at five separate locations in the west of Cardiff.
Eye witnesses reported Tvrdon driving aggressively and erratically, heading towards oncoming traffic and swerving across lanes to mount kerbs in an effort to run over innocent people.
In the most serious incident he targeted Ms Menzies as she walked on the forecourt of the fire station with two of her children.
She pushed her children out of the way before the van hit her. Tvrdon then turned around and ran over Ms Menzies again before reversing over her.
A witness described how Ms Menzies was semi-conscious and her two young children were calling out for their mother.
Image caption Karina Menzies was killed while walking with her children near Ely fire station
The court was shown CCTV footage which showed each of the attacks in detail:
The first victims were at Crossways Road in the Ely area of the city. A woman, 29, and her two children - a boy, nine, and girl, eight, were injured.
In Cowbridge Road West, a man, 24, and woman, 22 walking their two-year-old daughter in a buggy near Ely Reptile Centre were targeted.
Karina Menzies and her two children, eight and 23 months, were the next people Tvrdon drove at near Ely fire station in Cowbridge Road West.
At Grand Avenue in Ely, Renee Selio and her two daughters aged 10 and 12 were hit on a zebra crossing.
Tvrdon later attacked three people with a steering lock at the Asda petrol station at Leckwith retail park with the victims suffering minor injuries.
He got back into his van and drove at Gill White, 49, then her daughter Rebecca, 27, dragging them under the vehicle. They suffered multiple injuries.
Tvrdon was eventually stopped by police on the outskirts of Penarth, attempted to resist arrest and tried to assault a police officer with a crook lock, before being sprayed with CS gas.
The defendant, who tested negative for drink and drugs, told police he was under stress at work and had split up from his girlfriend.
Tvrdon had spent time under psychiatric care in 2003 and 2007 after he was diagnosed with paranoid schizophrenia but he had stopped taking his medication under medical advice in 2011.
He had "behaved strangely and bizarrely in the days before the offences", the court was told.
Meanwhile the Welsh government has said it is considering referring the case for an independent review. |
/**
* Perform initial read of repository. However, if data is already available, read is not be performed.
*/
public void initialRead(InitialReadListener listener) {
if (!isDownloadError) {
repository.initialRead(
() -> {
isDownloadError = false;
},
error -> {
isDownloadError = true;
Resources resources = getApplication().getResources();
listener.onError(resources.getString(R.string.read_failed_detail));
}
);
}
} |
/**
* Stores the known hosts file to use.
* Note that the known hosts file is ignored if a {@link HostKeyRepository} is set with a non-{@code null} value.
*
* @param knownHosts The known hosts file to use.
* @return This object.
* @throws NullPointerException If the given file is {@code null}.
* @see #withHostKeyRepository(HostKeyRepository)
* @since 1.2
*/
public SFTPEnvironment withKnownHosts(File knownHosts) {
Objects.requireNonNull(knownHosts);
put(KNOWN_HOSTS, knownHosts);
return this;
} |
<reponame>pegasystems/somaria
import { BlockInput } from "./BlockInput";
import { BlockInputValue } from "./inputs/BlockInputValue";
import { BlockInputIndexedReference } from "./inputs/BlockInputIndexedReference";
import { BlockInputPublishedReference } from "./inputs/BlockInputPublishedReference";
import { BlockInputThemeAttributeReference } from "./inputs/BlockInputThemeAttributeReference";
import { BlockInputExternalReference } from "./inputs/BlockInputExternalReference";
import { RenderingContext } from "./RenderingContext";
export class BlockInputFactory {
public static fromData( input: BlockInputJSON, defaultValue: any, renderingContext: RenderingContext ): BlockInput<any> {
if( input === undefined ) {
input = {
accessType: BlockInput.Value
};
}
switch( input.accessType ) {
case BlockInput.Published:
return new BlockInputPublishedReference( renderingContext, input.blockId, input.reference, defaultValue );
case BlockInput.Indexed:
return new BlockInputIndexedReference( renderingContext, input.blockId, input.index, defaultValue );
case BlockInput.Theme:
return new BlockInputThemeAttributeReference( renderingContext, input.themeAttributeType, defaultValue );
case BlockInput.External:
return new BlockInputExternalReference( renderingContext, input.id, input.themeAttributeType, input.fallbackAccessType, input.value, defaultValue );
default:
case BlockInput.Value:
return new BlockInputValue( input.value, defaultValue );
}
}
} |
import qualified Data.ByteString.Lazy.Char8 as B
import Data.List
main = do
inp <- fmap B.lines $ B.getContents
let [n,w] = map readI $ B.words $ inp!!0
let as = map readI $ B.words $ inp!!1
print (solve w as)
readI :: B.ByteString -> Int
readI s = case B.readInt s of Just (n,_) -> n
solve :: Int -> [Int] -> Int
solve w as =
max c 0
where
c = w - u1 + 1 + (min 0 u0)
bs = scanl (+) 0 as
u1 = maximum bs
u0 = minimum bs |
/**
* Send Device Attributes (Primary DA).
*/
private void sendDeviceAttributes( int arg ) throws IOException
{
if ( arg != 0 )
{
log( "Unknown DA: " + arg );
}
else
{
if ( isVT52mode() )
{
writeResponse( ResponseType.ESC, "/Z" );
}
else
{
writeResponse( ResponseType.CSI, "?62;1;2;4;6;8;9;15c" );
}
}
} |
/**
* Helper method that calls scanQsonvalue() and scanProperties()
*
* @return
*/
public ClassMapping scan() {
scanQsonValue();
if (!isValue) scanProperties();
return this;
} |
// Returns true if this Genome already includes provided node
func (g *Genome) hasNode(node *network.NNode) bool {
if node == nil {
return false
}
if id, _ := g.getLastNodeId(); node.Id > id {
return false
}
for _, n := range g.Nodes {
if n.Id == node.Id {
return true
}
}
return false
} |
#include <bits/stdc++.h>
#define MedoZ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0);
#define ll long long
#define dd double
using namespace std;
int main()
{
MedoZ
int t;
cin>>t;
while(t--)
{
int n;
cin>>n;
dd r=(dd)n*n-4.0*n;
if(r<0)
cout<<"N"<<endl;
else
{
dd a = (n + sqrt(r)) / 2.0;
dd b = (n - sqrt(r)) / 2.0;
cout <<fixed<<setprecision(9)<< "Y " << a << " "<<b<<endl;
}
}
return 0;
}
|
template <typename T>
class fenwick_tree {
public:
int n;
vector<T> fenw;
inline fenwick_tree(const int _n) : n(_n), fenw(_n) {}
template <typename A>
inline fenwick_tree(const vector<A> a) : fenwick_tree((int) a.size()) {
for (int i = 0; i < n; i++) {
add(i, a[i]);
}
}
inline void add(int x, const T delta) {
while (x < n) {
fenw[x] += delta;
x |= x + 1;
}
}
inline T sum(int x) const {
T res = 0;
while (x >= 0) {
res += fenw[x];
x = (x & (x + 1)) - 1;
}
return res;
}
inline T sum(const int l, const int r) const {
return sum(r) - sum(l - 1);
}
inline int order_of_key(const T val) const {
return (val == 0 ? 0 : (int) sum(val - 1));
}
inline T find_by_order(const int pos) const {
T res = -1;
int low = 0, high = n - 1;
while (low <= high) {
int mid = (low + high) >> 1;
if (sum(mid) >= pos + 1) {
res = mid;
high = mid - 1;
} else {
low = mid + 1;
}
}
return res;
}
};
|
import React, { useState } from "react";
import "./AddExam.css";
import { backend } from "./ConfigAssessor";
interface Props {
authorization: string;
}
const AddExam: React.FC<Props> = (props) => {
const [examId, setExamId] = useState(0);
// TO REMOVE
const handler = () => {
setExamId(-1);
};
return (
<main>
<h1>Add Exam</h1>
<p>
Add Exam {examId} {backend}
</p>
<p>
Please go to add question (link) and pick your exam number 1 titled
"exam title" (returned exam number and title
</p>
<button onClick={handler}>TO RMEOVE</button>
</main>
);
};
export default AddExam;
|
def send_event_data_batch(producer: EventHubProducerClient, data: str):
event_data_batch = producer.create_batch(partition_id="0")
event_data_batch.add(EventData(data))
producer.send_batch(event_data_batch) |
<reponame>ContextMapper/context-mapper-archunit-extension
/*
* Copyright 2021 The Context Mapper Project Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.contextmapper.archunit.conjunctions;
import com.tngtech.archunit.lang.syntax.elements.GivenClassesConjunction;
import org.jmolecules.ddd.annotation.Module;
import org.jmolecules.ddd.annotation.*;
import org.jmolecules.event.annotation.DomainEvent;
import static com.tngtech.archunit.lang.syntax.ArchRuleDefinition.classes;
public class JMoleculesClassConjunctions {
public static final GivenClassesConjunction aggregateClasses = classes().that().areAnnotatedWith(AggregateRoot.class);
public static final GivenClassesConjunction moduleClasses = classes().that().areAnnotatedWith(Module.class);
public static final GivenClassesConjunction entityClasses = classes().that().areAnnotatedWith(Entity.class);
public static final GivenClassesConjunction valueObjectClasses = classes().that().areAnnotatedWith(ValueObject.class);
public static final GivenClassesConjunction domainEventClasses = classes().that().areAnnotatedWith(DomainEvent.class);
public static final GivenClassesConjunction serviceClasses = classes().that().areAnnotatedWith(Service.class);
public static final GivenClassesConjunction repositoryClasses = classes().that().areAnnotatedWith(Repository.class);
}
|
/** <!-- writeExample(ExceptionlessOutputStream,int[],double[],int[],double[],int) -->
* Writes an example vector to the specified stream, with all features
* being written in the order they appear in the vector.
*
* @param out The output stream.
* @param featureIndexes The lexicon indexes of the features.
* @param featureValues The values or "strengths" of the features.
* @param labelIndexes The lexicon indexes of the labels.
* @param labelValues The values or "strengths" of the labels.
* @param unpruned The number of features in the vector that aren't
* pruned.
**/
public static void writeExample(ExceptionlessOutputStream out,
int[] featureIndexes,
double[] featureValues, int[] labelIndexes,
double[] labelValues, int unpruned) {
writeExample(out, featureIndexes, featureValues, labelIndexes,
labelValues, unpruned, null);
} |
Efficient multitasking will soon come to mobile devices as a new solution was introduced by XDA-recognized developer astoncheah. The developer made an app as a solution for Android without the need of a custom ROM.
Multitasking on desktop is becoming quicker and more efficient these days but it’s only now the mobile version has been introduced as a potential hit on Android. There may be many attempts in the past to bring better multitasking on an Android mobile device but astoncheah’s program, called the C-Floating, now allows the mobile user to open apps in floating windows. The apps have been designed to hover an opened app to also allow interaction on multi apps at a given time.
The C-Floating solution enables widgets to be added to the floating panes. Monitoring and statistical tools like battery stats and RAM can also be added to the said panes. The app allows a different style of Android Windows and Multitasking Experience–on a whole new level. Add a 3rd party widget, monitor system information effectively, perform quick actions, and float on top of any apps. Regular features include Floating Shortcuts, Floating Recent Apps, Floating Block Screen, and Floating Tables which indicate the battery info, CPU status, RAM info, and Network Data Traffic.
C Floating app is available for download from the Google Play Store. App is free but be prepared for possible in-app purchases for Floating Charts, Floating Widgets, and Floating Shortcuts.
Download C Floating from the Google Play Store.
VIA: XDA |
export const WEBHOOK = process.env.WEBHOOK ?? ''
export const SECRET = process.env.SECRET ?? ''
export const DB_URI = process.env.DB_URI ?? ''
export const V2RAY_URL = process.env.V2RAY_URL ?? ''
export const SSR_URL = process.env.SSR_URL ?? ''
|
<reponame>sitronlabs/SitronLabs_GDEH0213B72_Arduino_Library
/* Self header */
#include "gdeh0213b72.h"
/* Arduino libraries */
#include <Arduino.h>
#include <Wire.h>
/* Config */
#ifndef CONFIG_GDEH0213B72_TIMEOUT
#define CONFIG_GDEH0213B72_TIMEOUT 5000
#endif
#if CONFIG_GDEH0213B72_DEBUG_ENABLED
#define CONFIG_GDEH0213B72_DEBUG_FUNCTION(x) Serial.println(x)
#else
#define CONFIG_GDEH0213B72_DEBUG_FUNCTION(x)
#endif
#define ALLSCREEN_GRAGHBYTES 4000
/* Macros */
#define m_send_command_and_send_data(command, ...) { m_send_command(command); const uint8_t _data[] = {__VA_ARGS__}; for(size_t i = 0; i < sizeof(_data) / sizeof(uint8_t); i++) m_send_data(_data[i]); }
/**
*
* @param pin_busy (8 for example)
* @param pin_res (9 for example)
* @param pin_dc (10 for example)
* @param pin_cs (11 for example)
* @param pin_sck (12 for example)
* @param pin_sdi (13 for example)
*/
int gdeh0213b72::setup(const uint8_t pin_busy, const uint8_t pin_res, const uint8_t pin_dc, const uint8_t pin_cs, const uint8_t pin_sck, const uint8_t pin_sdi) {
int res;
/* Save gpios */
m_pin_busy = pin_busy;
m_pin_res = pin_res;
m_pin_dc = pin_dc;
m_pin_cs = pin_cs;
m_pin_clock = pin_sck;
m_pin_data = pin_sdi;
/* Configure gpios */
pinMode(m_pin_busy, INPUT);
pinMode(m_pin_res, OUTPUT);
pinMode(m_pin_dc, OUTPUT);
pinMode(m_pin_cs, OUTPUT);
pinMode(m_pin_clock, OUTPUT);
pinMode(m_pin_data, OUTPUT);
/* Perform hardware reset */
digitalWrite(m_pin_res, LOW);
delay(10);
digitalWrite(m_pin_res, HIGH);
delay(10);
res = m_busy_wait();
if (res < 0) {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Timed out performing hw reset!");
return res;
}
/* Perform software reset */
m_send_command(0x12);
res = m_busy_wait();
if (res < 0) {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Timed out performing sw reset!");
return res;
}
/* Send configuration */
m_send_command_and_send_data(0x74, 0x54); //set analog block control
m_send_command_and_send_data(0x7E, 0x3B); //set digital block control
m_send_command_and_send_data(0x01, 0xF9, 0x00, 0x00); //Driver output control
m_send_command_and_send_data(0x11, 0x03); //data entry mode
m_send_command_and_send_data(0x44, 0x00, (uint8_t )((m_width + 8 - 1) / 8 - 1)); //set Ram-X address start/end position //0x0C-->(15+1)*8=128
m_send_command_and_send_data(0x45, 0x00, 0x00, (uint8_t )((m_height - 1) % 256), (uint8_t )((m_height - 1) / 256)); //set Ram-Y address start/end position //0xF9-->(249+1)=250
m_send_command_and_send_data(0x3C, 0x03); //BorderWavefrom
m_send_command_and_send_data(0x2C, 0x55); //VCOM Voltage
m_send_command_and_send_data(0x03, k_lut_entire[70]); //
m_send_command_and_send_data(0x04, k_lut_entire[71], k_lut_entire[72], k_lut_entire[73]);
m_send_command_and_send_data(0x3A, k_lut_entire[74]); //Dummy Line
m_send_command_and_send_data(0x3B, k_lut_entire[75]); //Gate time
m_lut_use((unsigned char*) k_lut_entire); //LUT
m_send_command_and_send_data(0x4E, 0x00); // set RAM x address count to 0;
m_send_command_and_send_data(0x4F, 0xF9, 0x00); // set RAM y address count to 0X127;
res = m_busy_wait();
if (res < 0) {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Timed out configuring display!");
return res;
}
/* Return success */
return 0;
}
/**
* Tries to detect the display.
* @return true if the display was detected, or false otherwise.
*/
bool gdeh0213b72::detect(void) {
/* Read status register */
uint8_t reg_status;
m_send_command_and_read_data(0x2F, ®_status, 1);
/* Ensure chip id is as expected */
if ((reg_status & 0x03) != 0x01) {
return false;
}
/* Return success */
return true;
}
/**
*
* @note To Exit Deep Sleep mode, User required to send HWRESET to the driver
*/
int gdeh0213b72::hibernate(void) {
/* Send deep sleep mode command */
m_send_command_and_send_data(0x10, 0x01);
/* Wait a bit
* @note I don't know why? */
delay(100);
/* Return success */
return 0;
}
/**
* Waits for at least the given number of nanoseconds.
* @param[in] ns The minimum number of nanoseconds to wait for.
* @note This has a lot of room for improvement.
*/
void inline __attribute__((always_inline)) gdeh0213b72::m_delay_ns(const uint32_t ns) {
for (uint8_t cycles = 2; cycles < (ns * 1000) / (F_CPU / 1000); cycles++) {
asm("nop");
}
}
/**
* Sends a command.
* @param[in] command The command byte to send.
* @return 0 in case of success, or a negative error code otherwise.
* @see Datasheet section 7.2 "MCU Serial Peripheral Interface (4-wire SPI)".
* @see Datasheet section 13 "Serial Peripheral Interface Timing".
*/
void gdeh0213b72::m_send_command(const uint8_t command) {
/* Assert chip select */
digitalWrite(m_pin_cs, 0);
m_delay_ns(20); // tCSSU 20ns
/* Send command */
digitalWrite(m_pin_dc, 0);
for (uint8_t i = 0; i < 8; i++) {
digitalWrite(m_pin_clock, 0);
digitalWrite(m_pin_data, (command & (1 << (7 - i))));
m_delay_ns(20); // tSCLLOW 20ns
digitalWrite(m_pin_clock, 1);
m_delay_ns(20); // tSCLHIGH 20ns
}
/* Release chip select */
digitalWrite(m_pin_cs, 1);
m_delay_ns(250); // tCSHIGH 250ns
}
/**
* Sends a data bye.
* @param[in] data The data byte to send.
* @return 0 in case of success, or a negative error code otherwise.
* @see Datasheet section 7.2 "MCU Serial Peripheral Interface (4-wire SPI)".
* @see Datasheet section 13 "Serial Peripheral Interface Timing".
*/
void gdeh0213b72::m_send_data(const uint8_t data) {
/* Assert chip select */
digitalWrite(m_pin_cs, 0);
m_delay_ns(20); // tCSSU 20ns
/* Send data */
digitalWrite(m_pin_dc, 1);
for (uint8_t i = 0; i < 8; i++) {
digitalWrite(m_pin_clock, 0);
digitalWrite(m_pin_data, (data & (1 << (7 - i))));
m_delay_ns(20); // tSCLLOW 20ns
digitalWrite(m_pin_clock, 1);
m_delay_ns(20); // tSCLHIGH 20ns
}
/* Release chip select */
digitalWrite(m_pin_cs, 1);
m_delay_ns(250); // tCSHIGH 250ns
}
/**
* In one transaction, sends a command and reads data from the spi shared data line.
* @param[out] data A pointer to a variable that will be updated with the byte read.
* @return 0 in case of success, or a negative error code otherwise.
* @see Datasheet section 7.2 "MCU Serial Peripheral Interface (4-wire SPI)".
* @see Datasheet section 13 "Serial Peripheral Interface Timing".
*/
void gdeh0213b72::m_send_command_and_read_data(const uint8_t command, uint8_t * const data, const size_t length) {
/* Assert chip select */
digitalWrite(m_pin_cs, 0);
m_delay_ns(20); // tCSSU 20ns
/* Send command */
pinMode(m_pin_data, OUTPUT);
digitalWrite(m_pin_dc, 0);
for (uint8_t i = 0; i < 8; i++) {
digitalWrite(m_pin_clock, 0);
digitalWrite(m_pin_data, (command & (1 << (7 - i))));
m_delay_ns(20); // tSCLLOW 20ns
digitalWrite(m_pin_clock, 1);
m_delay_ns(20); // tSCLHIGH 20ns
}
/* Read data */
pinMode(m_pin_data, INPUT_PULLUP);
digitalWrite(m_pin_dc, 1);
for (size_t j = 0; j < length; j++) {
data[j] = 0;
for (uint8_t i = 0; i < 8; i++) {
digitalWrite(m_pin_clock, 0);
m_delay_ns(180); // tSCLLOW 180ns
digitalWrite(m_pin_clock, 1);
data[j] <<= 1;
data[j] |= digitalRead(m_pin_data);
m_delay_ns(180); // tSCLHIGH 180ns
}
}
pinMode(m_pin_data, OUTPUT);
/* Release chip select */
digitalWrite(m_pin_cs, 1);
m_delay_ns(250); // tCSHIGH 250ns
}
/**
* Waits for the display to stop asserting the busy line.
* @return 0 when the busy line is no longer asserted, or a negative error code otherwise.
*/
int gdeh0213b72::m_busy_wait(void) {
uint32_t start = millis();
while (1) {
if (millis() - start > CONFIG_GDEH0213B72_TIMEOUT) {
return -ETIMEDOUT;
}
if (digitalRead(m_pin_busy) == 0) {
return 0;
}
}
}
/**
*
* @param[in] wave_data A pointer to a valid lut, stored in program memory.
*/
void gdeh0213b72::m_lut_use(const uint8_t *wave_data) {
/* Send the write lut register command */
m_send_command(0x32);
for (size_t count = 0; count < 70; count++) {
m_send_data(pgm_read_byte(&wave_data[count]));
}
}
/**
* Holds the waveform configuration needed to perform a entire display redraw.
*/
const PROGMEM uint8_t gdeh0213b72::k_lut_entire[] = { /*
**/0x80, 0x60, 0x40, 0x00, 0x00, 0x00, 0x00, // LUT0: BB: VS 0 ~7
0x10, 0x60, 0x20, 0x00, 0x00, 0x00, 0x00, // LUT1: BW: VS 0 ~7
0x80, 0x60, 0x40, 0x00, 0x00, 0x00, 0x00, // LUT2: WB: VS 0 ~7
0x10, 0x60, 0x20, 0x00, 0x00, 0x00, 0x00, // LUT3: WW: VS 0 ~7
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // LUT4: VCOM: VS 0 ~7
0x03, 0x03, 0x00, 0x00, 0x02, // TP0 A~D RP0
0x09, 0x09, 0x00, 0x00, 0x02, // TP1 A~D RP1
0x03, 0x03, 0x00, 0x00, 0x02, // TP2 A~D RP2
0x00, 0x00, 0x00, 0x00, 0x00, // TP3 A~D RP3
0x00, 0x00, 0x00, 0x00, 0x00, // TP4 A~D RP4
0x00, 0x00, 0x00, 0x00, 0x00, // TP5 A~D RP5
0x00, 0x00, 0x00, 0x00, 0x00, // TP6 A~D RP6
0x15, 0x41, 0xA8, 0x32, 0x30, 0x0A, };
/**
*
*/
gdeh0213b72_fast::gdeh0213b72_fast(void) :
GFXcanvas1(122, 250) {
}
/**
*
* @param[in] i
* @note Modifications will only be visible on the display after a call to one of the draw functions.
*/
void gdeh0213b72_fast::invertDisplay(bool i) {
/* Send display update control 1 command */
if (i) {
m_send_command_and_send_data(0x21, 0x08);
} else {
m_send_command_and_send_data(0x21, 0x00);
}
}
/**
* Requests the display to display the contents of the entire ram.
* @return 0 in case of success, or a negative error code otherwise.
*/
int gdeh0213b72_fast::refresh_entire(void) {
int res;
/* Update display ram with local contents */
m_send_command_and_send_data(0x4E, 0);
m_send_command_and_send_data(0x4F, 0, 0);
m_send_command(0x24);
uint8_t *buffer_start = getBuffer();
for (uint16_t i = 0; i < 4000; i++) {
m_send_data(buffer_start[i]);
}
/* Send display update command */
m_send_command_and_send_data(0x22, 0xC7);
/* Send master activation command */
m_send_command(0x20);
/* Wait for end of update signaled by busy pin */
res = m_busy_wait();
if (res < 0) {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Failed to perform update!");
return res;
}
/* Return success */
return 0;
}
/**
*
*/
gdeh0213b72_slow::gdeh0213b72_slow(void) :
Adafruit_GFX(122, 250) {
}
/**
*
* @param[in] i
* @note Modifications will only be visible on the display after a call to one of the draw functions.
*/
void gdeh0213b72_slow::invertDisplay(bool i) {
/* Send display update control 1 command */
if (i) {
m_send_command_and_send_data(0x21, 0x08);
} else {
m_send_command_and_send_data(0x21, 0x00);
}
}
/**
*
* @param[in] x The number of pixels along the horizontal axis, starting from the bottom.
* @param[in] y The number of pixels along the vertical axis, starting from the top.
* @param[in] color 0 for black, anything else for white.
*/
void gdeh0213b72_slow::drawPixel(int16_t x, int16_t y, uint16_t color) {
/* Handle rotation */
uint16_t ram_y, ram_x;
switch (rotation) {
case 0: { // Default rotation, portrait with flex at the bottom
ram_x = x;
ram_y = y;
break;
}
case 1: { // Rotated 90° clockwise
ram_x = y;
ram_y = 249 - x;
break;
}
case 2: { // Rotated 180° clockwise
ram_x = 121 - x;
ram_y = 249 - x;
break;
}
case 3: { // Rotated 270° clockwise
ram_x = 121 - y;
ram_y = x;
break;
}
default: {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Invalid rotation!");
return;
}
}
/* Ensure parameters are valid */
if (x < 0 || y < 0 || ram_x >= m_width || ram_y >= m_height) {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Invalid coordinates!");
return;
}
/* Retrieve byte containing pixel value from ram
* @note First one is a dummy byte */
uint8_t byte[2];
m_send_command_and_send_data(0x4E, (uint8_t )(ram_x / 8));
m_send_command_and_send_data(0x4F, (uint8_t )(ram_y % 256), (uint8_t )(ram_y / 256));
m_send_command_and_read_data(0x27, byte, 2);
/* Modify byte */
if (color) byte[0] = byte[1] | (1 << (7 - (ram_x % 8)));
else byte[0] = byte[1] & ~(1 << (7 - (ram_x % 8)));
/* Write modified byte to ram */
if (byte[0] != byte[1]) {
m_send_command_and_send_data(0x4E, (uint8_t )(ram_x / 8));
m_send_command_and_send_data(0x4F, (uint8_t )(ram_y % 256), (uint8_t )(ram_y / 256));
m_send_command_and_send_data(0x24, byte[0]);
}
}
/**
* Fills the entire screen with the given color.
* @note Modifications happen in ram, and will only be visible on the display after a call to one of the draw functions.
* @return 0 in case of success, or a negative error code otherwise.
*/
void gdeh0213b72_slow::fillScreen(uint16_t color) {
/* Send command to write into ram */
m_send_command(0x24);
/* Write contents of ram */
for (size_t i = 0; i < m_height * ((m_width + 8 - 1) / 8); i++) {
m_send_data(color ? 0xFF : 0x00);
}
}
/**
* Requests the display to display the contents of the entire ram.
* @return 0 in case of success, or a negative error code otherwise.
*/
int gdeh0213b72_slow::refresh_entire(void) {
int res;
/* Send display update command */
m_send_command_and_send_data(0x22, 0xC7);
/* Send master activation command */
m_send_command(0x20);
/* Wait for end of update signaled by busy pin */
res = m_busy_wait();
if (res < 0) {
CONFIG_GDEH0213B72_DEBUG_FUNCTION(" [e] Failed to perform update!");
return res;
}
/* Return success */
return 0;
}
|
<reponame>xiang12835/GoF23
/* 工厂方法模式
苹果工厂
*/
public class AppleFactory {
public Fruit create(){
return new Apple();
}
} |
<gh_stars>0
package com.wix.reactnativenotifications.core.notification;
import android.app.Application;
import android.content.BroadcastReceiver;
import android.content.Context;
import android.content.Intent;
import android.util.Log;
import static com.wix.reactnativenotifications.Defs.LOGTAG;
public class PushNotificationPublisher extends BroadcastReceiver {
final static String NOTIFICATION_ID = "notificationId";
@Override
public void onReceive(Context context, Intent intent) {
Log.d(LOGTAG, "Received scheduled notification intent");
int notificationId = intent.getIntExtra(NOTIFICATION_ID, 0);
long currentTime = System.currentTimeMillis();
Application applicationContext = (Application) context.getApplicationContext();
final IPushNotification pushNotification = PushNotification.get(applicationContext, intent.getExtras());
Log.i(LOGTAG, "PushNotificationPublisher: Prepare To Publish: " + notificationId + ", Now Time: " + currentTime);
pushNotification.onPostScheduledRequest(notificationId);
}
} |
package model
type TeamMembershipsResponse struct {
}
|
<reponame>miksumin/ESP-OT<filename>ESP-OT-Lite/OT-core.h
/*
OpenTherm core
Version: 1.0
Author: <NAME>
Date: March 30, 2021
*/
#include <OpenTherm.h> // https://github.com/ihormelnyk/opentherm_library
OpenTherm ot(OT_INPIN, OT_OUTPIN);
char ot_status[32]; // OpenTherm status
bool dhw_present = false; // DHW present flag
//
float boiler_temp = 0.0; // Boiler temperature
float water_temp = 0.0; // Hot water temperature
//
byte boiler_setpoint = 60; // Boiler setpoint
byte boiler_sp_min = 40; // Boiler setpoint min
byte boiler_sp_max = 80; // Boiler setpoint max
//
byte water_setpoint = 50; // Hot water setpoint
byte water_sp_min = 35; // Hot water setpoint min
byte water_sp_max = 70; // Hot water setpoint max
//
bool boiler_mode = false; // Central Heating mode
bool water_mode = false; // Hot Water mode (if present)
//
uint16_t fault_code = 0; // Fault code (in case of fault status)
int8_t fault_status = -1; // Fault status
int8_t boiler_status = -1; // Central Heating status
int8_t water_status = -1; // Hot Water status (if presenr)
int8_t flame_status = -1; // Flame Status
byte ot_modLevel = 0; // Modulation Level
bool set_boiler_temp = false;
bool set_water_temp = false;
bool request_settings = false;
int8_t requestCounter = -1;
unsigned long ot_lastUpdated = 0;
//
void ot_command(String cmd, byte value) {
//
if (cmd == "boiler_sp") {
if (value < boiler_sp_min)
value = boiler_sp_min;
if (value > boiler_sp_max)
value = boiler_sp_max;
//
boiler_setpoint = value;
DebugPrintln("OT new CH setpoint: "+String(boiler_setpoint));
set_boiler_temp = true; // force Tset
}
if (cmd == "water_sp") {
if (value < water_sp_min)
value = water_sp_min;
if (value > water_sp_max)
value = water_sp_max;
//
water_setpoint = value;
DebugPrintln("OT new DHW setpoint: "+String(water_setpoint));
set_water_temp = true; // force TdhwSet
}
}
//
void ICACHE_RAM_ATTR handleInterrupt() {
ot.handleInterrupt();
}
//
void handleStatus(unsigned long response) {
//
bool bValue;
String strConfig;
if ((String(ot_status) == "Request timeout") || (String(ot_status) == "N/A")) {
DebugPrintln("*** Boiler status: ***");
strConfig = " ";
}
//
bValue = ot.isFault(response);
if (fault_status != bValue) {
fault_status = bValue;
DebugPrintln(strConfig+"Boiler Fault status: "+String(fault_status? "ON":"OFF"));
}
bValue = ot.isCentralHeatingActive(response);
if (boiler_status != bValue) {
boiler_status = bValue;
DebugPrintln(strConfig+"Boiler CH status: "+String(boiler_status? "ON":"OFF"));
}
bValue = ot.isHotWaterActive(response);
if (water_status != bValue) {
water_status = bValue;
DebugPrintln(strConfig+"Boiler DHW status: "+String(water_status? "ON":"OFF"));
}
bValue = ot.isFlameOn(response);
if (flame_status != bValue) {
flame_status = bValue;
DebugPrintln(strConfig+"Boiler Flame status: "+String(flame_status? "ON":"OFF"));
}
}
//
void handleConfig(byte boiler_config) {
//
dhw_present = boiler_config & 0x01;
byte control_type = boiler_config & 0x02;
byte cooling_config = boiler_config & 0x04;
byte dhw_config = boiler_config & 0x08;
byte pump_control = boiler_config & 0x10;
byte CH2_present = boiler_config & 0x20;
//
DebugPrintln("*** Boiler config: ***");
DebugPrintln(" DHW present: "+String(dhw_present? "":"not")+" present");
DebugPrintln(" Control type: "+String(control_type? "ON/OFF":"Modulating"));
DebugPrintln(" Cooling config: "+String(cooling_config? "":"not")+" supported");
DebugPrintln(" DHW config: "+String(dhw_config? "storage tank":"instantaneous or not-specified"));
DebugPrintln(" Pump control: "+String(pump_control? "":"not")+" allowed");
DebugPrintln(" CH2 present: "+String(dhw_present? "":"not")+" present");
}
//
void handleResponse(unsigned long response) {
//
byte nValue;
float fValue;
byte boiler_config;
//
byte DataID = ot.getDataID(response);
switch (DataID) {
case OpenThermMessageID::Status:
handleStatus(response);
break;
case OpenThermMessageID::RelModLevel:
nValue = (response >> 8) & 0xFF;
if (ot_modLevel != nValue) {
ot_modLevel = nValue;
DebugPrintln("Modulation level: "+String(ot_modLevel)+"%");
}
break;
case OpenThermMessageID::SConfigSMemberIDcode:
boiler_config = (response >> 8) & 0xFF;
handleConfig(boiler_config);
break;
case OpenThermMessageID::ASFflags:
fault_code = response & 0xFFFF;
break;
case OpenThermMessageID::Tboiler:
fValue = ot.getFloat(response);
if (boiler_temp != fValue) {
boiler_temp = fValue;
DebugPrintln("Boiler CH temperature: "+String(boiler_temp,1)+"°C");
}
break;
case OpenThermMessageID::Tdhw:
fValue = ot.getFloat(response);
if (water_temp != fValue) {
water_temp = fValue;
DebugPrintln("Boiler DHW temperature: "+String(water_temp,1)+"°C");
}
break;
case OpenThermMessageID::TSet:
boiler_setpoint = ot.getFloat(response);
break;
case OpenThermMessageID::TdhwSet:
water_setpoint = ot.getFloat(response);
break;
case OpenThermMessageID::MaxTSetUBMaxTSetLB:
boiler_sp_min = response & 0xFF;
boiler_sp_max = (response >> 8) & 0xFF;
break;
case OpenThermMessageID::TdhwSetUBTdhwSetLB:
water_sp_min = response & 0xFF;
water_sp_max = (response >> 8) & 0xFF;
break;
default: break;
}
}
//
void responseCallback(unsigned long response, OpenThermResponseStatus responseStatus) {
//
static char last_status[32];
static byte ot_try_count = 0;
//
if (responseStatus == OpenThermResponseStatus::NONE)
strcpy(last_status, "OpenTherm is not initialized");
else if (responseStatus != OpenThermResponseStatus::TIMEOUT) {
// we got SUCCESS or INVALID response status
//byte respType = ot.getMessageType(response); // get response msgType
if (ot.getMessageType(response) != OpenThermMessageType::UNKNOWN_DATA_ID) {
if (ot.isValidResponse(response)) {
// SUCCESS response
handleResponse(response); // handle response
//
if (fault_status == 1) {
if (requestCounter == 1)
sprintf(last_status, "Fault code: 0x%04X", fault_code);
}
else {
strcpy(last_status, "OnLine");
}
//
ot_try_count = 0;
}
else
// INVALID response status
sprintf(last_status, "Response invalid: 0x%08X", response);
}
else
sprintf(last_status, "Unknown DataID for request #%d", requestCounter);
}
else {
// TIMEOUT response status
if (ot_try_count < 3) {
requestCounter--; // let's repeat last request for 3 times
ot_try_count++;
}
else {
// status changed to Timeout
stpcpy(last_status, "Request timeout");
requestCounter = -1;
fault_status = -1;
boiler_status = -1;
water_status = -1;
flame_status = -1;
}
}
//
if ((String(ot_status) != String(last_status)) && (String(last_status) != "")) {
if ((String(ot_status) == "Request timeout") && (String(last_status) == "OnLine")) {
// status changed from Timeout to OnLine
ot_command("boiler_sp", boiler_setpoint); // force write Tset
ot_command("water_sp", water_setpoint); // force write TdhwSet
request_settings = true; // request boiler settings
}
//
strcpy(ot_status, last_status);
DebugPrintln("OpenTherm status: "+String(ot_status));
}
}
//
void ot_init() {
ot.begin(handleInterrupt, responseCallback);
DebugPrintln("OpenTherm initialized");
strcpy(ot_status, "N/A");
}
//
void ot_loop() {
//
ot.process(); // OpenTherm process response status
//
if (ot.isReady()) { // check if OpenTherm is ready
//
// wait 1 sec between survey cycles
if ((requestCounter < 0) && (millis() - ot_lastUpdated < 1000)) {
return;
}
//
unsigned long request = 0;
bool result = 0;
//
requestCounter++;
//
switch (requestCounter) {
case 0: // Status request
request = ot.buildSetBoilerStatusRequest(boiler_mode, water_mode, false, false, false);
result = ot.sendRequestAync(request);
break;
case 1: // Get Boiler Fault Flags
if (fault_status == 1) {
request = ot.buildRequest(OpenThermRequestType::READ_DATA, OpenThermMessageID::ASFflags, 0);
result = ot.sendRequestAync(request);
}
break;
case 2: // Set Boiler Temp request
if (set_boiler_temp) {
request = ot.buildSetBoilerTemperatureRequest((float)boiler_setpoint);
result = ot.sendRequestAync(request);
set_boiler_temp = false;
}
break;
case 3: // Get Boiler Temp request
request = ot.buildRequest(OpenThermMessageType::READ_DATA, OpenThermMessageID::Tboiler, 0);
result = ot.sendRequestAync(request);
break;
case 4: // Set DHW Temp request
if ((dhw_present) && (set_water_temp)) {
request = ot.buildSetDHWTemperatureRequest((float)water_setpoint);
result = ot.sendRequestAync(request);
set_water_temp = false;
}
break;
case 5: // Get DHW Temp request
if (dhw_present) {
request = ot.buildRequest(OpenThermMessageType::READ_DATA, OpenThermMessageID::Tdhw, 0);
result = ot.sendRequestAync(request);
}
break;
case 6: // Get Relative Modulation Level
if (flame_status == 1) {
request = ot.buildRequest(OpenThermRequestType::READ_DATA, OpenThermMessageID::RelModLevel, 0);
result = ot.sendRequestAync(request);
}
break;
case 7: // Get Slave Config & MemberID code
if (request_settings) {
request = ot.buildRequest(OpenThermRequestType::READ_DATA, OpenThermMessageID::SConfigSMemberIDcode, 0);
result = ot.sendRequestAync(request);
}
break;
case 8: // Max CH water setpoint bounds
if (request_settings) {
request = ot.buildRequest(OpenThermRequestType::READ_DATA, OpenThermMessageID::MaxTSetUBMaxTSetLB, 0);
result = ot.sendRequestAync(request);
}
break;
case 9: // Max DHW setpoint bounds
if (request_settings) {
request = ot.buildRequest(OpenThermRequestType::READ_DATA, OpenThermMessageID::TdhwSetUBTdhwSetLB, 0);
result = ot.sendRequestAync(request);
}
break;
default:
requestCounter = -1;
request_settings = false;
ot_lastUpdated = millis(); // cycle finished time
break;
}
//
if ((request > 0) && (!result)) {
DebugPrintln("OpenTherm error send request #"+String(requestCounter));
}
//requestCounter++;
}
}
|
<filename>src/connectivity/network/tun/network-tun/buffer.h
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef SRC_CONNECTIVITY_NETWORK_TUN_NETWORK_TUN_BUFFER_H_
#define SRC_CONNECTIVITY_NETWORK_TUN_NETWORK_TUN_BUFFER_H_
#include <fuchsia/net/tun/cpp/fidl.h>
#include <lib/fzl/vmo-mapper.h>
#include <array>
#include <ddktl/protocol/network/device.h>
#include <fbl/span.h>
namespace network {
namespace tun {
class Buffer;
// A data structure that stores keyed VMOs and allocates buffers.
//
// `VmoStore` stores up to `MAX_VMOS` VMOs keyed by an identifier bound to the range [0,
// `MAX_VMOS`). `VmoStore` can be used to allocate buffers backed by the VMOs it contains.
//
// This class is used to fulfill the VMO registration mechanism used by
// `ddk.protocol.network.device`.
class VmoStore {
public:
~VmoStore() = default;
// Reads `len` bytes at `offset` from the VMO identified by `id` into `data`, which must be a
// `uint8_t` iterator.
// Returns an error if the specified region is invalid or `id` is not registered.
template <class T>
zx_status_t Read(uint8_t id, size_t offset, size_t len, T data) {
fbl::Span<uint8_t> vmo_data;
zx_status_t status = GetMappedVmo(id, &vmo_data);
if (status != ZX_OK) {
return status;
}
if (offset + len > vmo_data.size()) {
return ZX_ERR_OUT_OF_RANGE;
}
std::copy_n(vmo_data.begin() + offset, len, data);
return ZX_OK;
}
// Writes `len` bytes at `offset` into the VMO identified by `id` from `data`, which must be an
// `uint8_t` iterator.
// Returns an error if the specified region is invalid or `id` is not registered.
template <class T>
zx_status_t Write(uint8_t id, size_t offset, size_t len, T data) {
fbl::Span<uint8_t> vmo_data;
zx_status_t status = GetMappedVmo(id, &vmo_data);
if (status != ZX_OK) {
return status;
}
if (offset + len > vmo_data.size()) {
return ZX_ERR_OUT_OF_RANGE;
}
std::copy_n(data, len, vmo_data.begin() + offset);
return ZX_OK;
}
// Registers and maps `vmo` identified by `id`.
// `id` comes from a `NetworkDeviceInterface` and is part of the NetworkDevice contract.
// Returns an error if the identifier is invalid or already in use, or the mapping fails.
zx_status_t RegisterVmo(uint8_t id, zx::vmo vmo);
// Unregister a previously registered VMO with `id`, unmapping it from memory and releasing the
// VMO handle.
// Returns an error if the identifier is invalid or does not map to a registered VMO.
zx_status_t UnregisterVmo(uint8_t id);
// Copies `len` bytes from `src_store`'s VMO with `src_id` at `src_offset` to `dst_store`'s VMO
// with `dst_id` at `dst_offset`.
//
// Equivalent to:
// T data;
// src_store.Read(src_id, src_offset, len, back_inserter(data));
// dst_store.Write(dst_id, dst_offset, len, data.begin());
static zx_status_t Copy(VmoStore* src_store, uint8_t src_id, size_t src_offset,
VmoStore* dst_store, uint8_t dst_id, size_t dst_offset, size_t len);
Buffer MakeTxBuffer(const tx_buffer_t* tx, bool get_meta);
Buffer MakeRxSpaceBuffer(const rx_space_buffer_t* space);
private:
struct StoredVmo {
zx::vmo vmo;
fzl::VmoMapper mapper;
};
zx_status_t GetMappedVmo(uint8_t id, fbl::Span<uint8_t>* out_span);
std::array<fit::optional<StoredVmo>, MAX_VMOS> vmos_;
};
// A device buffer.
// Device buffers can be created from VMO stores. They're used to store references to buffers
// retrieved from a NetworkDeviceInterface, which point to data regions within a VMO.
// `Buffer` can represent either a tx (application-filled data) buffer or an rx (empty space for
// inbound data) buffer.
class Buffer {
public:
// Reads this buffer's data into `vec`.
// Used to serve `fuchsia.net.tun/Device.ReadFrame`.
// Returns an error if this buffer's definition does not map to valid data (see `VmoStore::Write`
// for specific error codes).
zx_status_t Read(std::vector<uint8_t>* vec);
// Writes `data` into this buffer.
// If this `data` does not fit in this buffer, `ZX_ERR_OUT_OF_RANGE` is returned.
// Returns an error if this buffer's definition does not map to valid data (see `VmoStore::Write`
// for specific error codes).
// Used to serve `fuchsia.net.tun/Device.WriteFrame`.
zx_status_t Write(const std::vector<uint8_t>& data);
// Copies data from `other` into this buffer, returning the number of bytes written in `total`.
zx_status_t CopyFrom(Buffer* other, size_t* total);
inline fuchsia::hardware::network::FrameType frame_type() const { return frame_type_.value(); }
inline uint32_t id() const { return id_; }
inline std::unique_ptr<fuchsia::net::tun::FrameMetadata> TakeMetadata() {
return std::move(meta_);
}
protected:
friend VmoStore;
// Creates a device buffer from a tx request buffer.
Buffer(const tx_buffer_t* tx, bool get_meta, VmoStore* vmo_store);
// Creates a device buffer from an rx space buffer.
Buffer(const rx_space_buffer_t* space, VmoStore* vmo_store);
private:
const uint32_t id_{};
// Pointer to parent VMO store, not owned.
VmoStore* const vmo_store_;
const uint8_t vmo_id_;
std::array<buffer_region_t, MAX_VIRTUAL_PARTS> parts_{};
const size_t parts_count_{};
std::unique_ptr<fuchsia::net::tun::FrameMetadata> meta_;
const fit::optional<fuchsia::hardware::network::FrameType> frame_type_;
};
} // namespace tun
} // namespace network
#endif // SRC_CONNECTIVITY_NETWORK_TUN_NETWORK_TUN_BUFFER_H_
|
To quote George Will: Well.
Someone in the White House counsel’s office leaked this to Mike Cernovich, then to Eli Lake. Given Rice’s Benghazi track record, I’m trying to think of anyone in the previous White House whose fingerprints on “unmasking” members of Team Trump would be more likely to inflame the politics around it aside from Barack Obama himself.
White House lawyers last month discovered that the former national security adviser Susan Rice requested the identities of U.S. persons in raw intelligence reports on dozens of occasions that connect to the Donald Trump transition and campaign, according to U.S. officials familiar with the matter… The intelligence reports were summaries of monitored conversations — primarily between foreign officials discussing the Trump transition, but also in some cases direct contact between members of the Trump team and monitored foreign officials. One U.S. official familiar with the reports said they contained valuable political information on the Trump transition such as whom the Trump team was meeting, the views of Trump associates on foreign policy matters and plans for the incoming administration… Rice herself has not spoken directly on the issue of unmasking. Last month when she was asked on the “PBS NewsHour” about reports that Trump transition officials, including Trump himself, were swept up in incidental intelligence collection, Rice said: “I know nothing about this,” adding, “I was surprised to see reports from Chairman Nunes on that account today.”
That’s weird. Since when is Susan Rice known to lie?
Good point from Lake elsewhere in his story in noting that this helps explain why Nunes had to make that mysterious trip to the White House grounds to view intelligence about surveillance of Trump transition staffers. If he was looking at requests from Susan Rice to “unmask” American citizens in foreign intel reports, it stands to reason that they’d be on NSC computers there. Assuming it’s all true, you already know what Rice’s defense will be: (1) This was incidental surveillance, not targeted surveillance of the sort Trump alleged in his tweets about Obama wiretapping him. The targets were foreign intel officials, not Trump staffers. The surveillance itself was thus legal. (2) The “unmasking” was also legal. The law allows for American citizens to be identified in transcripts of calls when knowing their identity is important to understanding the foreign intelligence of the call. Rice will claim that given the hubbub about Russian meddling in the campaign and FBI suspicions about Trump staffers contacting Russian officials last year, she had no choice but to “unmask” Trump transition officials to know who was talking to whom about what. Rice’s credibility is pure garbage on the right post-Benghazi, though; there’s not a Republican in America who won’t assume that the “unmasking” here was nothing more than partisan politics, designed to keep tabs on what the next administration. Trump will claim vindication in his suspicions of dirty pool by the Obama White House and he’ll have lots of support.
Watch the clip below for the latest from Fox News, which reported several days ago that someone “very well known, very high up, very senior, in the intelligence world” ordered the “unmasking” of Trump staffers. Notably, Adam Housley didn’t name Rice as the culprit this morning; he acknowledged that her name was floating around in other people’s reports but he hadn’t independently confirmed that Rice was responsible. Also notable: Housley claims that the “unmasking” began up to a year before the inauguration, which would start the timeline months before Trump became the Republican nominee, and continued “for a significant amount of time.” Assuming that’s true, why were the feds “unmasking” Trump officials so early in the election process? And if it’s also true, as Nunes has claimed, that the intelligence reports in which Trumpers were “unmasked” had nothing to do with Russia or its meddling in the campaign, why was it so important to know the identities of the Trump staffers in those reports? What exactly was the foreign intelligence value? Were Hillary staffers, many of whom undoubtedly also communicated with foreign officials for various reasons during the campaign, similarly unmasked in a serial way?
One other possibility, of course, is that Cernovich’s and Lake’s source is blowing smoke, naming Rice as the culprit knowing that that’ll rile up the right when in fact some other Obama official was to blame. What was it again that Trump told us about not trusting anonymous sources? Exit question via Erick Erickson: According to Lake, it was NSC deputy Ezra Cohen-Watnick who first uncovered Rice’s role in “unmasking” the Trump transition officials. Meanwhile, reports emerged a few weeks ago that the CIA wanted Cohen-Watnick removed from the NSC. Is that a coincidence or not? |
Anti-Trust Policy and National Growth: Some Evidence from Italy
Antitrust problems affecting markets for intermediate goods or services raise the input costs of firms operating in the downstream sectors, which often face tough international competition. Such firms lose market shares, thus worsening the economic performance of the country. We try to document the importance of this link between competition problems and growth by analysing Italian sectoral data. We find that sectors which depend more heavily on inputs and services produced in sectors suffering from competition problems perform worse in terms of net exports, export growth and output growth. |
<reponame>PhilippeDeSousa/EpitechBundle<filename>Tek3/MAT/304pacman/pacman.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
from enum import Enum
class CaseType(Enum):
Empty = -1
Pacman = -2
Wall = -3
Ghost = -4
class PacClass:
def __init__(self, _map, _wall, _space):
self.Map = _map
self.Wall = _wall
self.Space = _space
self.Tail = list(tuple())
self.lenX = int
self.lenY = int
def displayMap(Pacman):
for y in range(Pacman.lenY):
for x in range(Pacman.lenX):
if Pacman.Map[y][x] == CaseType.Empty.value:
print(Pacman.Space, end="")
elif Pacman.Map[y][x] == CaseType.Wall.value:
print(Pacman.Wall, end="")
elif Pacman.Map[y][x] == CaseType.Pacman.value:
print('P', end="")
elif Pacman.Map[y][x] == CaseType.Ghost.value:
print('F', end="")
else:
print(Pacman.Map[y][x], end="")
print()
def checkParams():
if len(sys.argv) != 4:
print("Error: 4 arguments required")
sys.exit(84)
elif len(sys.argv[2]) > 1 or len(sys.argv[3]) > 1:
print("Error: invalid char")
sys.exit(84)
try:
path = sys.argv[1]
open(path)
except IOError:
print("Error: no such file or directory")
sys.exit(84)
if os.stat(path).st_size == 0:
print ("Error: empty file.")
sys.exit(84)
def checkMap(_map):
sizeY = len(_map); sizeX = len(_map[0])
pac = 0; gh = 0;
for y in range(sizeY):
for x in range(sizeX):
if _map[y][x] == 'F':
gh += 1
elif _map[y][x] == 'P':
pac += 1
elif _map[y][x] != 'P' and _map[y][x] != 'F' and _map[y][x] != '0' and _map[y][x] != '1':
print("Error: Character unothorized in map")
sys.exit(84)
if pac == 0 or gh == 0 or pac > 1 or gh > 1:
print("Error: Map format is wrong")
sys.exit(84)
def setMap():
file = open(sys.argv[1], 'r')
_map = file.read().strip()
_map = _map.split('\n')
checkMap(_map)
sizeY = len(_map); sizeX = len(_map[0])
intMap = [[CaseType.Empty.value for x in range(sizeX)] for y in range(sizeY)]
for y in range(sizeY):
for x in range(sizeX):
if _map[y][x] == '1':
intMap[y][x] = CaseType.Wall.value
elif _map[y][x] == 'F':
intMap[y][x] = CaseType.Ghost.value
elif _map[y][x] == 'P':
intMap[y][x] = CaseType.Pacman.value
elif _map[y][x] == '0':
intMap[y][x] = CaseType.Empty.value
return intMap
def findGhost(Pacman):
for y in range(Pacman.lenY):
for x in range(Pacman.lenX):
if (Pacman.Map[y][x] == CaseType.Ghost.value):
return x, y
return 0, 0
def checkPos(Pacman, posy, posx, cycle):
if Pacman.Map[posy][posx] == CaseType.Ghost.value:
return
elif Pacman.Map[posy][posx] == CaseType.Pacman.value:
displayMap(Pacman)
sys.exit(0)
elif Pacman.Map[posy][posx] == CaseType.Empty.value:
tmp = (posy, posx)
Pacman.Tail.append(tmp)
Pacman.Map[posy][posx] = cycle % 10
def checkAround(Pacman, posy, posx, cycle):
checkPos(Pacman, posy - 1, posx, cycle)
checkPos(Pacman, posy, posx + 1, cycle)
checkPos(Pacman, posy + 1, posx, cycle)
checkPos(Pacman, posy, posx - 1, cycle)
def algoLoop(Pacman, Cycle):
if Cycle == 0:
posx, posy = findGhost(Pacman)
checkAround(Pacman, posy, posx, Cycle + 1)
else:
for _ in range(len(Pacman.Tail)):
checkAround(Pacman, Pacman.Tail[0][0], Pacman.Tail[0][1], Cycle + 1)
Pacman.Tail.pop(0)
return Cycle + 1
def main():
checkParams()
_map = setMap()
Pacman = PacClass(_map, sys.argv[2], sys.argv[3])
Pacman.lenX = len(_map[0]); Pacman.lenY = len(_map)
Cycle = 0
while 1:
Cycle = algoLoop(Pacman, Cycle)
displayMap(Pacman)
if __name__ == "__main__":
main() |
#include<iostream>
#include<vector>
using namespace std;
class Graph
{
public:
vector<int> *adj;
int vertex;
vector<bool> visited;
Graph(int noOfV)
{
vertex = noOfV;
adj = new vector<int>[noOfV];
}
void addEdge(int u,int v)
{
adj[u].push_back(v);
adj[v].push_back(u);
}
void printGraph()
{
for(int i=0;i<vertex;i++)
{
cout<<i<<" : ";
for(int C:adj[i])
{
cout<<C<<" -> ";
}
cout<<"\n";
}
}
void connectedComponentes()
{
visited = vector<bool>(vertex,false);
// DFS(0);
int cnt=0;
for(int i=0;i<vertex;i++)
{
if(!visited[i])
{
cnt++;
DFS(i);
cout<<"\n";
}
}
cout<<"Total Connected Components = "<<cnt;
}
void DFS(int u)
{
visited[u] = true;
cout<<u<<" ";
for(int i:adj[u])
{
if(!visited[i])
{
DFS(i);
}
}
}
};
int main()
{
Graph g(5);
g.addEdge(0,1);
g.addEdge(1,2);
g.addEdge(3,4);
g.connectedComponentes();
// g.DFS(0);
} |
/**
* Utility function to read from a 8-bit register
*/
uint8_t read_8(char address) {
uint8_t buffer[3];
buffer[0] = address;
twi_master_transfer(BMP180_ADDRESS, buffer, 1, false);
twi_master_transfer(BMP180_ADDRESS | 1, buffer, 1, true);
return buffer[0];
} |
<reponame>trungdung22/solana-dice
/**
* Transactions sent to the tic-tac-toe program contain commands that are
* defined in this file:
* - keys will vary by the specified Command
* - userdata is a Command enum (as a 32 bit value) followed by a CommandData union.
*/
#pragma once
#include "program_types.h"
typedef enum {
/*
* init dashboard
*
* key[0] - dashboard account
*/
Command_InitDashboard = 0,
/*
* Make bet
*
* key[0] - dashboard account
* key[1] - game account
* key[2] - player account
*/
Command_Bet,
/*
* set seed hash
*
* key[0] - dashboard account
* key[1] - game account
* key[2] - casino account
*/
Command_SetSeedHash,
/*
* set seed
*
* key[0] - dashboard account
* key[1] - game account
* key[2] - player account
*/
Command_SetSeed,
/*
* Initialize a game account
*
* key[0] - dashboard account
* key[1] - game account
* key[2] - casino account
* key[3] - player account
*/
Command_Reveal,
/*
* Used by Player X to advertise their game
*
* key[0] - dashboard account
* key[1] - game account
* key[2] - player account
*
* CommandData: none
*/
Command_Withdraw,
/*
* Used by Player X to advertise their game
*
* key[0] - dashboard account
* key[1] - casino account
*
* CommandData: none
*/
Command_Casino_Withdraw,
/*
* Force the enum to be 64 bits
*/
Command_MakeEnum64Bits = 0xffffffffffffffff,
} Command;
typedef union {
struct {
uint64_t bet_lamports;
uint8_t roll_under;
} bet;
uint8_t seed[SEED_SIZE];
uint8_t casino_seed_hash[HASH_SIZE];
uint64_t casino_withdraw_amount;
SolPubkey key;
} CommandData;
|
import minifyCSS from "minifycss";
import config from "../project.config";
export default function transformCSS(code: string): string {
const replaced = replaceIcons(replaceColors(code));
return config.prod ? minifyCSS(replaced) : replaced;
}
function replaceColors(code: string): string {
const { colors } = config;
const regex = RegExp(`\\b(${Object.keys(colors).join("|")})\\b`, "g");
return code.replace(regex, (_, color: keyof typeof colors) => colors[color]);
}
function replaceIcons(code: string): string {
const { icons } = config.docs;
const regex = RegExp(`url\\((${Object.keys(icons).join("|")})\\)`, "g");
return code.replace(
regex,
(_, icon: keyof typeof icons) =>
`url("${config.docs.root}/${config.docs.iconsDir}/${icons[icon]}")`
);
}
|
package gql
import (
"context"
"fmt"
"sync"
"github.com/grailbio/base/log"
"github.com/grailbio/gql/hash"
"github.com/grailbio/gql/marshal"
"github.com/grailbio/gql/symbol"
)
// JoinMaxTables is the max # of tables that can appear in a single join
// expression.
const joinMaxTables = 4
// joinSubTable is an internal representation of a leaf table during join. When
// a table is self-joined, e.g., join({t0:xxx, t1:xxx}, ...), "xxx" will appear
// in two joinSubTables, {0, 2, symbol.Intern("t0"), xxx}, and {1, 2,
// symbol.Intern("t1"), xxx}.
type joinSubTable struct {
index int // order of appearance (0,1,2..) in the 1st arg of join() expression.
total int // total # of tables being joined.
name symbol.ID // the name specified in the 1st arg of the join expression
table Table // the leaf table.
}
// JoinSubTableList is an ordered set of leaf tables. Tables are stored in the
// order listed in the 1st arg of join().
type joinSubTableList struct {
tables [joinMaxTables]*joinSubTable // Some entries may be nil.
n int // # of non-nil entries in tables[].
}
func (tl *joinSubTableList) list() []*joinSubTable {
l := make([]*joinSubTable, 0, tl.n)
for _, t := range tl.tables {
if t != nil {
l = append(l, t)
}
}
return l
}
// Len returns the number of tables stored in the list.
func (tl *joinSubTableList) len() int { return tl.n }
// Add adds a table to the list. If the table already exists, it panics.
func (tl *joinSubTableList) add(t *joinSubTable) {
if tl.tables[t.index] != nil {
panic(*t)
}
tl.tables[t.index] = t
}
// Merge adds every table in the other list. If the table already exists, it
// panics.
func (tl *joinSubTableList) merge(other *joinSubTableList) {
for _, t := range other.tables {
if t != nil {
tl.add(t)
}
}
}
// getByIndex returns the i'th table in the list.
//
// REQUIRES: 0 < = i < # of tables listed in the join expression.
func (tl *joinSubTableList) getByIndex(i int) *joinSubTable {
return tl.tables[i]
}
// getByIndex returns the table with the given name. Name is the struct field
// tag attached to the table in the 1st arg of the join expression. For example,
// For "join({t0: table0, t1: table1}, ...)", getByName(symbol.Intern("t0"))
// will return table0. It panics if no table with the given name is found.
func (tl *joinSubTableList) getByName(tableName symbol.ID) *joinSubTable {
for i, t := range tl.tables {
if t.name == tableName {
if t.index != i {
panic(t)
}
return t
}
}
log.Panicf("Table %v not found", tableName.Str())
return nil
}
// JoinColumn represents a column named in a join "where" expression.
type joinColumn struct {
table *joinSubTable // leaf table.
col symbol.ID // column in table.
keyExpr *Func // closure to extract the key from a row.
}
func (jc joinColumn) equals(other joinColumn) bool {
return jc.table == other.table && jc.col == other.col
}
// String implements the stringer interface.
func (jc joinColumn) String() string {
return fmt.Sprintf("%s#%d.%s", jc.table.name.Str(), jc.table.index, jc.col.Str())
}
// joinConstraint represents one "where" clause.
type joinConstraint struct {
// Op is the constraint between the two tables. Currently it is always one of
// the '==' variants; eqeqSymbolID for '==', eqeqqSymbolID for '?==', etc.
op symbol.ID
// left & right hand sides of the constraint.
tables [2]joinColumn
// boolean expression that tells whether the joined row satisifies the constraint.
filterExpr *Func
}
// String implements the stringer interface
func (jc joinConstraint) String() string {
return fmt.Sprintf("{L:%v, R:%v, filter: %v}", jc.tables[0], jc.tables[1], jc.filterExpr)
}
// JoinNode is a node in the dataflow graph created by join. It corresponds to
// a leaf table, or result of joining two or more leaf nodes. A row produced by
// a joinNode is always a struct of form {table1: row1, table2: row2,
// ...}. For example, join({t0: X, t1: Y, t2: Z}, ...) creates a two-level
// join tree:
//
// joinnodeB
// / \
// / \
// joinnodeA t2
// / \
// t0 t1
//
// joinNoneA emits rows of form {t0: row in X, t1: row in Y}. joinnodeB emits
// rows of form {t0: row in X, t1: row in Y, t2: row in Z}. Each row emit by
// joinNode is a nested struct. To read column C in table X, you must refer to
// it as t0.C.
type joinNode interface {
Table
// IsSorted checks if this node yields rows sorted by column c.sortCol of
// c.table.
isSorted(c joinColumn) bool
// SubTables returns the list of tables (in the original join expression) that
// contribute to rows yielded by this node.
subTables() *joinSubTableList
}
// JoinLeafNode is a simple node that reads rows from a Table and wraps then
// into a struct of form {tableName: row}.
type joinLeafNode struct {
table *joinSubTable
attrs TableAttrs
}
func newJoinLeafNode(table *joinSubTable) *joinLeafNode {
return &joinLeafNode{table: table, attrs: TableAttrs{Name: "join:" + table.name.Str()}}
}
// Attrs implements Table.
func (t *joinLeafNode) Attrs(ctx context.Context) TableAttrs { return t.attrs }
// Hash implements Table.
func (t *joinLeafNode) Hash() hash.Hash {
h := hash.Hash{
0x84, 0x3b, 0xf6, 0x5d, 0xba, 0x56, 0x82, 0x7e,
0x8c, 0x6a, 0x02, 0x10, 0xac, 0xa9, 0xaa, 0xcd,
0x46, 0xbe, 0xc0, 0x34, 0x55, 0x08, 0x61, 0xea,
0x65, 0xe0, 0x64, 0xbd, 0x97, 0x0c, 0xed, 0xec}
return h.Merge(t.table.table.Hash())
}
// Len implements Table.
func (t *joinLeafNode) Len(ctx context.Context, mode CountMode) int {
return t.table.table.Len(ctx, mode)
}
// Marshal implements Table, but it shall never be called.
// Marshaling of a join table is done always at the root level.
func (t *joinLeafNode) Marshal(ctx MarshalContext, enc *marshal.Encoder) { panic("Not implemented") }
// Prefetch implements Table.
func (t *joinLeafNode) Prefetch(ctx context.Context) {}
// isSorted implements joinNode.
func (t *joinLeafNode) isSorted(c joinColumn) bool { return false }
// subTables implements joinNode.
func (t *joinLeafNode) subTables() *joinSubTableList {
s := &joinSubTableList{}
s.add(t.table)
return s
}
// Scanner implements Table.
func (t *joinLeafNode) Scanner(ctx context.Context, start, limit, total int) TableScanner {
if start > 0 {
return &NullTableScanner{}
}
return &joinLeafScanner{
sc: t.table.table.Scanner(ctx, 0, 1, 1),
tableName: t.table.name,
}
}
// JoinLeafScanner implements the TableScanner for joinLeafNode.
type joinLeafScanner struct {
sc TableScanner
tableName symbol.ID // for debugging only.
curValue Value // current row. a struct of form {tableName: row}.
}
// Scan implements TableScanner.
func (t *joinLeafScanner) Scan() bool {
if !t.sc.Scan() {
return false
}
s := &simpleStruct2Impl{
nFields: 1,
}
InitStruct(s)
s.names[0] = t.tableName
s.values[0] = t.sc.Value()
t.curValue = NewStruct(s)
return true
}
// Value implements TableScanner.
func (t *joinLeafScanner) Value() Value { return t.curValue }
// JoinSortingNode is a joinNode that sorts rows of another joinNode.
type joinSortingNode struct {
table joinNode // Source table
sortCol joinColumn // The column to sort
sorted Table
attrs TableAttrs
}
func newJoinSortingNode(ctx context.Context, table joinNode, sortCol joinColumn) joinNode {
log.Printf("joinsort: table %+v, col %s", table.Attrs(ctx), sortCol.col.Str())
if t := table.subTables().getByIndex(sortCol.table.index); t == nil {
log.Panicf("join: subtable %+v not found", sortCol)
}
if table.isSorted(sortCol) {
return table
}
n := &joinSortingNode{
table: table,
sortCol: sortCol,
attrs: TableAttrs{Name: fmt.Sprintf("join:sort(%s/%s)", table.Attrs(ctx).Name, sortCol.col.Str())},
}
n.sorted = NewMinNTable(ctx, astUnknown /*TODO:fix*/, TableAttrs{Name: "join"}, n.table, sortCol.keyExpr, -1, 0)
return n
}
// Attrs implements Table.
func (t *joinSortingNode) Attrs(ctx context.Context) TableAttrs { return t.attrs }
// Hash implements Table.
func (t *joinSortingNode) Hash() hash.Hash {
h := hash.Hash{
0xaa, 0xe4, 0xb1, 0xa1, 0xfe, 0x70, 0x9f, 0x32,
0xef, 0x9e, 0x59, 0xea, 0x15, 0x04, 0xde, 0x04,
0xfc, 0x98, 0xd1, 0x31, 0x57, 0xda, 0x29, 0x0c,
0x1a, 0xd1, 0xee, 0xab, 0xec, 0x8b, 0xd8, 0x3b}
h = h.Merge(t.table.Hash())
h.Merge(t.sortCol.keyExpr.Hash())
return h
}
// Len implements Table.
func (t *joinSortingNode) Len(ctx context.Context, mode CountMode) int {
return t.table.Len(ctx, mode)
}
// Marshal implements Table, but it shall never be called.
// Marshaling of a join table is done always at the root level.
func (t *joinSortingNode) Marshal(ctx MarshalContext, enc *marshal.Encoder) { panic("Not implemented") }
// Prefetch implements Table.
func (t *joinSortingNode) Prefetch(ctx context.Context) {}
// isSorted implements joinNode.
func (t *joinSortingNode) isSorted(c joinColumn) bool { return t.sortCol.equals(c) }
// subTables implements joinNode.
func (t *joinSortingNode) subTables() *joinSubTableList { return t.table.subTables() }
// Scanner implements Table.
func (t *joinSortingNode) Scanner(ctx context.Context, start, limit, total int) TableScanner {
if start > 0 {
return &NullTableScanner{}
}
return t.sorted.Scanner(ctx, 0, 1, 1)
}
func newJoinKeyAST(tableName symbol.ID, sortCol symbol.ID) *ASTStructFieldRef {
return &ASTStructFieldRef{
Parent: &ASTColumnRef{Col: tableName},
Field: sortCol}
}
func newJoinKeyClosure(ast *ASTStructFieldRef) *Func {
// Finalize sortKeyAST. Otherwise, FreeVars and other methods are unmappy.
//
// TODO(saito) Don't fake bindings. The below code doesn't allow accessing
// global variables.
frame := aiBindings{Frames: []aiFrame{
aiGlobalConsts,
aiFrame{symbol.AnonRow: AIAnyType}},
}
types := newASTTypes() // TODO(saito) this is screwy. Why are we throwing away the typeinfo?
types.add(ast, &frame)
return NewUserDefinedFunc(ast, &bindings{},
[]FormalArg{{Name: symbol.AnonRow, Positional: true, Required: true}}, ast)
}
// JoinSortingMergeNode joins two tables via mergesort
type joinSortingMergeNode struct {
parent *joinTable
attrs TableAttrs
// Child are two tables to join.
child [2]joinNode
// Constraint defines the natural-join condition. constraint.table[x] defines
// the value to extract from child[x] (x={0,1}). The two values extracted from
// child[*] are compared using constraint.filterExpr.
constraint joinConstraint
}
func newJoinSortingMergeNode(ctx context.Context, parent *joinTable, child0, child1 joinNode, constraint joinConstraint) joinNode {
child := [2]joinNode{
newJoinSortingNode(ctx, child0, constraint.tables[0]),
newJoinSortingNode(ctx, child1, constraint.tables[1]),
}
return &joinSortingMergeNode{
parent: parent,
attrs: TableAttrs{Name: fmt.Sprintf("join:sortmerge(lhs:=%s,rhs:=%s,cond=%+v)", child[0].Attrs(ctx).Name, child[1].Attrs(ctx).Name, constraint)},
child: child,
constraint: constraint,
}
}
// Attrs implements Table.
func (t *joinSortingMergeNode) Attrs(ctx context.Context) TableAttrs { return t.attrs }
// Hash implements Table.
func (t *joinSortingMergeNode) Hash() hash.Hash {
h := hash.Hash{
0x02, 0xc8, 0x1b, 0xf9, 0x22, 0x43, 0xfd, 0x8a,
0xc3, 0x58, 0xac, 0xf8, 0x70, 0xd1, 0xc5, 0xaf,
0x50, 0xee, 0x2a, 0x68, 0x9f, 0xc7, 0x3e, 0x33,
0xe4, 0x55, 0x78, 0x61, 0x8f, 0x73, 0xa4, 0xb7}
h.Merge(t.parent.hash)
h = h.Merge(t.child[0].Hash())
h = h.Merge(t.child[1].Hash())
h = h.Merge(t.constraint.filterExpr.Hash())
return h
}
// Len implements Table.
func (t *joinSortingMergeNode) Len(ctx context.Context, mode CountMode) int {
if mode == Exact {
panic("not implemented")
}
l0 := t.child[0].Len(ctx, mode)
if l1 := t.child[0].Len(ctx, mode); l1 < l0 {
return l1
}
return l0
}
// Marshal implements Table.
func (t *joinSortingMergeNode) Marshal(ctx MarshalContext, enc *marshal.Encoder) {
panic("Not implemented")
}
// Prefetch implements Table.
func (t *joinSortingMergeNode) Prefetch(ctx context.Context) {}
// isSorted implements joinNode.
func (t *joinSortingMergeNode) isSorted(c joinColumn) bool {
if t.constraint.op != eqeqSymbolID {
// for non-inner joins, the some of the keys may be NA, and they screw the
// sort order.
return false
}
for _, table := range t.constraint.tables {
if table.equals(c) {
return true
}
}
// TODO(saito): For "A.a = B.a && B.a == C.b && A.a = D.a", it creates a tree
//
// Merge3(Merge2(Merge1(A,B, {A.a=B.a}) ,C,{B.a=C.b}),{A.a=D.a})
//
// Now, when joinSortingNode asks Merge2 if it is sorted by A.a, It will check
// if A.a is equal to B.a or C.b and return false.
return false
}
// subTables implements joinNode.
func (t *joinSortingMergeNode) subTables() *joinSubTableList {
subTables := &joinSubTableList{}
for _, child := range t.child {
subTables.merge(child.subTables())
}
return subTables
}
// Scanner implements Table.
func (t *joinSortingMergeNode) Scanner(ctx context.Context, start, limit, total int) TableScanner {
if start > 0 {
return &NullTableScanner{}
}
sc := &joinSortingMergeScanner{
ctx: ctx,
parent: t.parent,
subTables: t.subTables(),
filterExpr: t.constraint.filterExpr,
label: t.Attrs(ctx).Name,
}
for i := 0; i < 2; i++ {
c := &sc.child[i]
c.sc = t.child[i].Scanner(ctx, 0, 1, 1)
c.keyExpr = t.constraint.tables[i].keyExpr
if !c.sc.Scan() {
c.eof = true
continue
}
c.nextRow = c.sc.Value()
sc.readNextRows(c)
}
return sc
}
// JoinSortingMergeChild scans one table while doing merge-join.
type joinSortingMergeChild struct {
sc TableScanner // reader of the source table.
keyExpr *Func // for extracting the join key from a row in "sc".
eof bool
key Value // current join key.
values []Value // set of rows with the same "key"
nextRow Value // stores one row read ahead.
}
// JoinSortingMergeScanner implements TableScanner for joinSortingMergeNode.
type joinSortingMergeScanner struct {
ctx context.Context
parent *joinTable
label string // For debugging only
// tables contributing the values to the rows produced by this scanner.
// subTables.len() may be > 2, since it counts the number of leaf tables.
subTables *joinSubTableList
// Join condition. E.g., "==" for inner join.
filterExpr *Func
// Child tables.
child [2]joinSortingMergeChild
// For enumerating a cartesian product when there are multiple rows with the
// same joinkey.
rowCP *joinCartesianProduct
}
// ReadNextRows reads the set of rows that the same join key for the given
// child. It updates c.key, c.values, c.eof. It assumes that the underlying
// child scanner yields sorts rows by the join key.
func (t *joinSortingMergeScanner) readNextRows(c *joinSortingMergeChild) {
if c.eof {
panic(c)
}
if !c.nextRow.Valid() {
c.eof = true
return
}
c.values = nil
curRow := c.nextRow
c.values = append(c.values, curRow)
c.key = c.keyExpr.Eval(t.ctx, curRow)
for c.sc.Scan() {
// Find all values with same key as c.key and combine them into c.values.
c.nextRow = c.sc.Value()
curKey := c.keyExpr.Eval(t.ctx, c.nextRow)
cmp := Compare(t.parent.ast, curKey, c.key)
if cmp < 0 {
log.Panicf("%s: Unsorted keys: %v < %v", t.label, curKey, c.key)
}
if cmp != 0 {
return
}
c.values = append(c.values, c.nextRow)
}
c.nextRow = Value{} // Mark EOF
}
// ReadNext reads the next record from the underlying table. Returns the
// (value, true) on success, returns false on EOF or an error.
func (t *joinSortingMergeScanner) readNext() *joinCartesianProduct {
for {
// Find the smallest key.
minIdx := -1
var minKey Value
for i := 0; i < 2; i++ {
c := &t.child[i]
if c.eof {
continue
}
if c.key.Type() == InvalidType {
log.Panicf("expr %v, val %v", c.keyExpr, PrintValueList(c.values))
}
if minIdx < 0 || Compare(t.parent.ast, c.key, minKey) < 0 {
minIdx = i
minKey = c.key
}
}
if minIdx == -1 { // exhausted all subtables.
return nil
}
// Merge the child rows @ minKey.
valsPerSubTable := [2][]Value{}
for i := 0; i < 2; i++ {
c := &t.child[i]
if c.eof {
continue
}
if c := Compare(t.parent.ast, c.key, minKey); c != 0 {
if c <= 0 {
log.Panicf("CompareValues: %v %v", t, minKey)
}
continue
}
valsPerSubTable[i] = c.values
if len(c.values) <= 0 {
log.Panic(c)
}
t.readNextRows(c)
}
return newJoinCartesianProduct(t.ctx, t.parent, t.subTables, t.filterExpr, valsPerSubTable, t.label)
}
}
// Scan implements TableScanner.
func (t *joinSortingMergeScanner) Scan() bool {
for {
if t.rowCP != nil && t.rowCP.scan() {
return true
}
t.rowCP = t.readNext()
if t.rowCP == nil {
return false
}
if t.rowCP.scan() {
return true
}
}
}
// Value implements TableScanner.
func (t *joinSortingMergeScanner) Value() Value {
return t.rowCP.value()
}
// JoinCrossMergeNode merges tables using brute-force cartesian join. It's used
// only when the two tables have no usable natural-join condition.
type joinCrossMergeNode struct {
parent *joinTable
attrs TableAttrs
child [2]joinNode
// TODO(saito) Don't read all the rows in memory if child1 is very large.
once sync.Once // Are child1Rows filled?
child1Rows []Value // In-memory copy of child[1] contents.
}
func newJoinCrossMergeNode(ctx context.Context, parent *joinTable, child0, child1 joinNode) *joinCrossMergeNode {
n := &joinCrossMergeNode{
parent: parent,
attrs: TableAttrs{Name: fmt.Sprintf("join:cross(%s,%s)", child0.Attrs(ctx).Name, child1.Attrs(ctx).Name)},
child: [2]joinNode{child0, child1},
}
return n
}
func (t *joinCrossMergeNode) init(ctx context.Context) {
t.once.Do(func() {
sc := t.child[1].Scanner(ctx, 0, 1, 1)
for sc.Scan() {
t.child1Rows = append(t.child1Rows, sc.Value())
}
})
}
// Attrs implements Table.
func (t *joinCrossMergeNode) Attrs(ctx context.Context) TableAttrs { return t.attrs }
// Hash implements Table.
func (t *joinCrossMergeNode) Hash() hash.Hash {
h := hash.Hash{
0xd9, 0xd3, 0xe0, 0x1f, 0x56, 0x68, 0x8e, 0xd8,
0xa4, 0xbe, 0x75, 0x44, 0x8a, 0x34, 0xde, 0xa9,
0xec, 0xc6, 0xcf, 0x7b, 0x5b, 0xcc, 0x85, 0x79,
0x44, 0x65, 0x95, 0xe9, 0xa1, 0xa8, 0xa7, 0xc8}
h = h.Merge(t.parent.hash)
h = h.Merge(t.child[0].Hash())
h = h.Merge(t.child[1].Hash())
return h
}
// Len implements Table.
func (t *joinCrossMergeNode) Len(ctx context.Context, mode CountMode) int {
if mode == Exact {
panic("not implemented")
}
t.init(ctx)
return t.child[0].Len(ctx, mode) * len(t.child1Rows) // TODO(saito) fix
}
// Marshal implements Table.
func (t *joinCrossMergeNode) Marshal(ctx MarshalContext, enc *marshal.Encoder) {
panic("Not implemented")
}
// Prefetch implements Table.
func (t *joinCrossMergeNode) Prefetch(ctx context.Context) {}
// isSorted implements joinNode.
func (t *joinCrossMergeNode) isSorted(c joinColumn) bool {
// Cross-merge iterates the child[0] in order.
return t.child[0].isSorted(c)
}
// subTables implements joinNode.
func (t *joinCrossMergeNode) subTables() *joinSubTableList {
subTables := &joinSubTableList{}
for _, child := range t.child {
subTables.merge(child.subTables())
}
return subTables
}
// Scanner implements TableScanner.
func (t *joinCrossMergeNode) Scanner(ctx context.Context, start, limit, total int) TableScanner {
if start > 0 {
return &NullTableScanner{}
}
t.init(ctx)
return &joinCrossMergeScanner{
ctx: ctx,
parent: t.parent,
subTables: t.subTables(),
sc0: t.child[0].Scanner(ctx, 0, 1, 1),
child1Rows: t.child1Rows,
label: t.Attrs(ctx).Name,
}
}
// JoinCrossMergeScanner implements TableScanner for joinCrossMergeNode.
type joinCrossMergeScanner struct {
ctx context.Context
parent *joinTable
label string // For debugging only
// tables contributing the values to the rows produced by this scanner.
// subTables.len() may be > 2, since it counts the number of leaf tables.
subTables *joinSubTableList
// Scanner for child[0].
sc0 TableScanner
// Contents of child[1].
child1Rows []Value
// For enumerating a cartesian product when there are multiple rows with the
// same joinkey.
rowCP *joinCartesianProduct
}
// Scanner implements Table.
func (t *joinCrossMergeScanner) Scan() bool {
for {
if t.rowCP != nil && t.rowCP.scan() {
return true
}
if !t.sc0.Scan() {
return false
}
t.rowCP = newJoinCartesianProduct(t.ctx, t.parent, t.subTables, nil /*todo*/, [2][]Value{[]Value{t.sc0.Value()}, t.child1Rows}, t.label)
if t.rowCP.scan() {
return true
}
}
}
// Value implements Table.
func (t *joinCrossMergeScanner) Value() Value {
return t.rowCP.value()
}
// joinTable is a Table implementation for join().
type joinTable struct {
hash hash.Hash
ast ASTNode // location in the source code. Only for error reporting.
subTables *joinSubTableList
joinExpr *Func
mapExpr *Func // maybe null.
root joinNode // tree of joinNodes.
approxLen int
once sync.Once
materializedTable Table // fully materialized btsv table.
exactLenOnce sync.Once
exactLen int
}
// Len implements Table.
func (t *joinTable) Len(ctx context.Context, mode CountMode) int {
if mode == Approx {
return t.approxLen
}
t.exactLenOnce.Do(func() {
t.exactLen = DefaultTableLen(ctx, t)
})
return t.exactLen
}
// Marshal implements Table.
func (t *joinTable) Marshal(ctx MarshalContext, enc *marshal.Encoder) {
t.init(ctx.ctx)
t.materializedTable.Marshal(ctx, enc)
}
// init creates a materialized btsv table.
func (t *joinTable) init(ctx context.Context) {
t.once.Do(func() {
t.materializedTable = materializeTable(ctx, t,
func(w *BTSVShardWriter) {
sc := t.scanner(ctx)
for sc.Scan() {
w.Append(sc.Value())
}
})
})
}
// joinTableScanner implements TableScanner for joinTable.
type joinTableScanner struct {
ctx context.Context
parent *joinTable
sc TableScanner
joinExpr *Func
mapExpr *Func
value Value
}
func (t *joinTable) scanner(ctx context.Context) TableScanner {
return &joinTableScanner{
ctx: ctx,
parent: t,
sc: t.root.Scanner(ctx, 0, 1, 1),
joinExpr: t.joinExpr,
mapExpr: t.mapExpr,
}
}
// explodeRow creates a N-element array of values (N = # of tables joined) from
// a struct with N fields, yielded by a joinNode.
//
// TODO(saito) This is hacky. There should be a way to evaluate the value
// without exploding.
func (t *joinTableScanner) explodeRow(v Value) []Value {
s := v.Struct(t.parent.ast)
values := make([]Value, t.parent.subTables.len())
for i := 0; i < s.Len(); i++ {
f := s.Field(i)
fi := t.parent.subTables.getByName(f.Name).index
values[fi] = f.Value
}
for i := range values {
if !values[i].Valid() {
values[i] = Null
}
}
return values
}
// Value implements TableScanner.
func (t *joinTableScanner) Value() Value {
return t.value
}
// Scan implements TableScanner.
func (t *joinTableScanner) Scan() bool {
for {
if !t.sc.Scan() {
return false
}
exploded := t.explodeRow(t.sc.Value())
if t.joinExpr != nil {
if !t.joinExpr.Eval(t.ctx, exploded...).Bool(t.parent.ast) {
continue
}
if t.mapExpr != nil {
t.value = t.mapExpr.Eval(t.ctx, exploded...)
} else {
rowVals := []StructField{}
for ti, val := range exploded {
switch val.Type() {
case NullType:
case StructType:
sv := val.Struct(t.parent.ast)
nFields := sv.Len()
for i := 0; i < nFields; i++ {
v := sv.Field(i)
colName := symbol.Intern(t.parent.subTables.getByIndex(ti).name.Str() + "_" + v.Name.Str())
rowVals = append(rowVals, StructField{Name: colName, Value: v.Value})
}
default:
rowVals = append(rowVals, StructField{Name: t.parent.subTables.getByIndex(ti).name, Value: val})
}
}
t.value = NewStruct(NewSimpleStruct(rowVals...))
}
}
return true
}
}
// Scanner implements Table.
func (t *joinTable) Scanner(ctx context.Context, start, limit, total int) TableScanner {
t.init(ctx)
return t.materializedTable.Scanner(ctx, start, limit, total)
}
// Prefetch implements Table.
func (t *joinTable) Prefetch(ctx context.Context) {}
// Hash implements Table.
func (t *joinTable) Hash() hash.Hash { return t.hash }
// Attrs implements Table.
func (t *joinTable) Attrs(ctx context.Context) TableAttrs {
return TableAttrs{Name: "join"}
}
type joinCartesianProduct struct {
ctx context.Context
parent *joinTable
subTables *joinSubTableList
filterExpr *Func
// Set of values from each subtable.
values [2][]Value
row Value // Current row
totalRows int
index int
label string
}
func newJoinCartesianProduct(ctx context.Context, parent *joinTable, subTables *joinSubTableList, filterExpr *Func, values [2][]Value, label string) *joinCartesianProduct {
if parent == nil {
panic("nil parent")
}
totalRows := 1
for _, v := range values {
if len(v) > 0 {
totalRows *= len(v)
}
}
return &joinCartesianProduct{
ctx: ctx,
parent: parent,
subTables: subTables,
filterExpr: filterExpr,
values: values,
totalRows: totalRows,
index: -1,
label: label,
}
}
func (cp *joinCartesianProduct) scan() bool {
for {
cp.index++
if cp.index >= cp.totalRows {
return false
}
v := cp.index
rowComponents := [2]Value{}
nNulls := 0
for ti := 0; ti < 2; ti++ {
nValsInSubTable := len(cp.values[ti])
if nValsInSubTable == 0 {
nNulls++
rowComponents[ti] = Null
continue
}
val := cp.values[ti][v%nValsInSubTable]
rowComponents[ti] = val
v /= nValsInSubTable
if val.Null() != NotNull {
nNulls++
}
}
if nNulls == len(cp.values) {
// This can happen when no equality constraints exist and all the
// subtables are doing batch scans.
continue
}
cp.row = cp.mergeValues(rowComponents[:])
if cp.filterExpr != nil {
cond := cp.filterExpr.Eval(cp.ctx, cp.row).Bool(cp.parent.ast)
if !cond {
continue
}
}
cp.row = cp.mergeValues(rowComponents[:])
return true
}
}
func (cp *joinCartesianProduct) mergeValues(values []Value) Value {
fields := [joinMaxTables]StructField{}
for _, v := range values {
s := v.Struct(cp.parent.ast)
for fi := 0; fi < s.Len(); fi++ {
field := s.Field(fi)
d := &fields[cp.parent.subTables.getByName(field.Name).index]
if d.Name != symbol.Invalid {
log.Panicf("joincp: Duplicate subtable found in %s", PrintValueList(values))
}
*d = field
}
}
j := 0
for i := range fields {
if fields[i].Name == symbol.Invalid {
if t := cp.subTables.getByIndex(i); t != nil {
fields[i].Name = t.name
fields[i].Value = Null
}
}
if fields[i].Name != symbol.Invalid {
fields[j] = fields[i]
j++
}
}
return NewStruct(NewSimpleStruct(fields[:j]...))
}
func (cp *joinCartesianProduct) value() Value {
return cp.row
}
// findEqJoinConstraints constructs joinConstraints given the join "where"
// condition. Note that the extracted constraints may be a subset of what
// "expr" specifies. So the toplevel scanner must post-filter the yielded rows
// using the expr.
func findEqJoinConstraints(expr ASTNode, tables *joinSubTableList) (constraints []joinConstraint) {
findTable := func(expr ASTNode) *joinSubTable {
varRefExpr, ok := expr.(*ASTVarRef)
if !ok {
return nil
}
tableName := varRefExpr.Var
return tables.getByName(tableName)
}
if andand, ok := expr.(*ASTLogicalOp); ok && andand.AndAnd {
constraints = append(constraints, findEqJoinConstraints(andand.LHS, tables)...)
constraints = append(constraints, findEqJoinConstraints(andand.RHS, tables)...)
return
}
funcallExpr, ok := expr.(*ASTFuncall)
if !ok {
return
}
if op := isEqualEqual(funcallExpr.Function); op != symbol.Invalid {
c := joinConstraint{op: op}
if len(funcallExpr.Raw) != 2 { // ==, ==? etc are always binary.
log.Panic(funcallExpr)
}
var keyAST [2]*ASTStructFieldRef
for i := range funcallExpr.Raw {
v, ok := funcallExpr.Raw[i].Expr.(*ASTStructFieldRef)
if !ok {
return
}
subTable := findTable(v.Parent)
if subTable == nil {
return
}
c.tables[i].table = subTable
c.tables[i].col = v.Field
keyAST[i] = newJoinKeyAST(subTable.name, v.Field)
c.tables[i].keyExpr = newJoinKeyClosure(keyAST[i])
}
filterAST := NewASTFuncall(
funcallExpr.Function,
[]ASTParamVal{
ASTParamVal{Name: funcallExpr.Raw[0].Name, Expr: keyAST[0]},
ASTParamVal{Name: funcallExpr.Raw[1].Name, Expr: keyAST[1]}})
// TODO(saito) Don't fake bindings. The below code doesn't allow accessing
// global variables.
frame := aiBindings{Frames: []aiFrame{
aiGlobalConsts,
aiFrame{symbol.AnonRow: AIAnyType}}}
types := newASTTypes() // TODO(saito) this is screwy. Why are we throwing away the typeinfo?
types.add(filterAST, &frame)
c.filterExpr = NewUserDefinedFunc(filterAST, &bindings{},
[]FormalArg{{Name: symbol.AnonRow, Positional: true, Required: true}},
filterAST)
constraints = append(constraints, c)
}
return
}
func (t *joinTable) parseJoinExpr(ctx context.Context, ast ASTNode, tableList Struct, joinExpr ASTNode) (*joinSubTableList, joinNode) {
nTable := tableList.Len()
tables := &joinSubTableList{n: nTable}
nodes := make([]joinNode, nTable)
// Create a joinLeafNode for each leaf table.
for ti := 0; ti < nTable; ti++ {
f := tableList.Field(ti)
st := &joinSubTable{
index: ti,
total: nTable,
name: f.Name,
table: f.Value.Table(t.ast),
}
tables.add(st)
nodes[ti] = newJoinLeafNode(st)
}
constraints := findEqJoinConstraints(joinExpr, tables)
removeConstraint := func(i int) {
copy(constraints[i:], constraints[i+1:])
constraints = constraints[:len(constraints)-1]
}
// Create a join tree from the eqjoin constraints.
var node joinNode // the current root node.
DoneConstraint:
for len(constraints) > 0 {
if node != nil {
nodeSubtables := node.subTables()
for ci, c := range constraints {
if nodeSubtables.getByIndex(c.tables[0].table.index) != nil &&
nodes[c.tables[1].table.index] != nil {
// c.tables[0] appears in the node, and c.tables[1] is a new table.
node = newJoinSortingMergeNode(ctx, t, node, nodes[c.tables[1].table.index], c)
nodes[c.tables[1].table.index] = nil
removeConstraint(ci)
continue DoneConstraint
}
if nodeSubtables.getByIndex(c.tables[1].table.index) != nil &&
nodes[c.tables[0].table.index] != nil {
// c.tables[1] appears in the node, and c.tables[0] is a new table.
node = newJoinSortingMergeNode(ctx, t, nodes[c.tables[0].table.index], node, c)
nodes[c.tables[0].table.index] = nil
removeConstraint(ci)
continue DoneConstraint
}
if nodeSubtables.getByIndex(c.tables[0].table.index) != nil &&
nodeSubtables.getByIndex(c.tables[1].table.index) != nil {
// This constraint can be just added to the existing tree. But
// currently, we post-filter rows using this constraint at the very
// root of the tree. So here, it's ok to remove it.
removeConstraint(ci)
continue DoneConstraint
}
}
}
// Failed to attach any constraints to the existing tree.
//
// Create a node that merges two tables listed in constraints[0].
c := constraints[0]
removeConstraint(0)
var child [2]joinNode
for i := 0; i < 2; i++ {
st := c.tables[i].table
if nodes[st.index] == nil {
// The "if node!=nil" branch above should have handled this case.
Panicf(ast, "expr %v, constraint %+v, index %+v", joinExpr, c, st)
}
child[i], nodes[st.index] = nodes[st.index], nil
}
if node == nil { // First constraint
node = newJoinSortingMergeNode(ctx, t, child[0], child[1], c)
continue DoneConstraint
}
// Unusual case: a join expression looks like A.x==B.y && C.z==D.w We just
// do bruteforce merging.
node = newJoinCrossMergeNode(ctx, t, node, newJoinSortingMergeNode(ctx, t, child[0], child[1], c))
}
// Add the remaining tables and do a brute-force crossjoin.
for _, child := range nodes {
if child != nil {
if node == nil {
node = child
} else {
node = newJoinCrossMergeNode(ctx, t, node, child)
}
}
}
return tables, node
}
func builtinJoin(ctx context.Context, ast ASTNode, args []ActualArg) Value {
t := &joinTable{ast: ast}
tables, node := t.parseJoinExpr(ctx, ast, args[0].Struct(), args[1].Expr.(*ASTLambda).Body)
log.Debug.Printf("join: parse %v -> %v", args[1].Expr, node.Attrs(ctx).Name)
tableNames := make([]symbol.ID, tables.len())
for i := range tableNames {
tableNames[i] = tables.getByIndex(i).name
}
joinExpr := args[1].Func()
mapExpr := args[2].Func()
approxLen := 1
for _, t := range tables.list() {
if len := t.table.Len(ctx, Approx); len > approxLen {
approxLen = len
}
}
// TODO(saito) Enable caching
t.hash = hashJoinCall(tables, joinExpr, mapExpr)
t.subTables = tables
t.joinExpr = joinExpr
t.mapExpr = mapExpr
t.root = node
t.approxLen = approxLen
return NewTable(t)
}
func hashJoinCall(tables *joinSubTableList, joinExpr, mapExpr *Func) hash.Hash {
h := hash.Hash{
0x7c, 0x35, 0xa9, 0xab, 0x32, 0xa2, 0xa6, 0x4a,
0x49, 0x4b, 0x16, 0x34, 0xb4, 0xbc, 0x3a, 0x99,
0xfd, 0x5a, 0xb9, 0x8f, 0x31, 0x53, 0xb1, 0xd8,
0x00, 0x10, 0x5d, 0x6f, 0xce, 0x4b, 0xf4, 0xc9}
for _, table := range tables.list() {
h = h.Merge(table.table.Hash())
}
h = h.Merge(joinExpr.Hash())
if mapExpr != nil {
h = h.Merge(mapExpr.Hash())
}
return h
}
func init() {
RegisterBuiltinFunc("join",
`
join({t0:tbl0,t1:tbl1,t2:tbl2}, t0.colA==t1.colB && t1.colB == t2.colC [, map:={colx:t0.colA, coly:t2.colC}])
Arg types:
- _tbl0_, _tbl1_, ..: table
Join function joins multiple tables into one. The first argument lists the table
name and its mnemonic in a struct form. The 2nd arg is the join condition.
The ::map:: arg specifies the format of the output rows.
Imagine the following tables:
table0:
║colA ║ colB║
├─────┼─────┤
│Cat │ 3 │
│Dog │ 8 │
table1:
║colA ║ colC║
├─────┼─────┤
│Cat │ red │
│Bat │ blue│
Example:
1. ::join({t0:table0, t1:table1}, t0.colA==t1.colA, map:={colA:t0.colA, colB: t0.colB, colC: t1.colC})::
This expression performs an inner join of t0 and t1.
║colA ║ colB║ colC║
├─────┼─────┼─────┤
│Cat │ 3 │ red │
2. ::join({t0:table0, t1:table1}, t0.A?==?t1.A,map:={A:t0.A, A2:t1.A,B:t0.B, c:t1.C})::
This expression performs an outer join of t0 and t1.
║ A║ A2║ B║ c║
├────┼────┼───┼─────┤
│ NA│ bat│ NA│ blue│
│ cat│ cat│ 3│ red│
│ dog│ NA│ 8│ NA│
The join condition doesn't need to be just "=="s connected by "&&"s. It can be
any expression, although join provides a special fast-execution path for flat,
conjunctive "=="s, so use them as much as possible.
Caution: join currently is very slow on large tables. Talk to ysaito if you see
any problem.
TODO: describe left/right joins (use ==?, ?==)
TODO: describe cross joins (set non-equality join conditions, such as t0.colA >= t1.colB)`,
builtinJoin,
func(ast ASTNode, args []AIArg) AIType { return AITableType },
FormalArg{Positional: true, Required: true}, // tables
FormalArg{Positional: true, Required: true, JoinClosure: true}, // join expr
FormalArg{Name: symbol.Map, JoinClosure: true, DefaultValue: NewFunc(nil)}) // map:=expr
}
|
<filename>clients/java/dkv-client/src/main/java/org/dkv/client/ShardedDKVClient.java
package org.dkv.client;
import com.github.benmanes.caffeine.cache.*;
import com.google.common.collect.Iterables;
import dkv.serverpb.Api;
import java.io.Closeable;
import java.util.*;
import static java.util.Collections.addAll;
import static org.dkv.client.DKVNodeType.*;
import static org.dkv.client.Utils.checkf;
/**
* Implementation of a DKV client that can address multiple
* DKV clusters each dedicated to a portion of the keyspace
* called a shard. It depends on a concrete implementation
* of a {@link ShardProvider} for resolving the respective
* DKV shards involved in a given DKV operation.
*
* <p>Once the respective DKV shard is resolved, the implementation
* creates an instance of {@link SimpleDKVClient} and invokes
* the corresponding operation on it. Upon completion, the underlying
* GRPC conduit is closed.
*
* @see DKVShard
* @see ShardProvider
* @see SimpleDKVClient
*/
public class ShardedDKVClient implements DKVClient {
private static final int POOL_SIZE = 1000;
private final ShardProvider shardProvider;
private final DKVClientPool pool;
public ShardedDKVClient(ShardProvider shardProvider) {
checkf(shardProvider != null, IllegalArgumentException.class, "Shard provider must be provided");
this.shardProvider = shardProvider;
this.pool = new DKVClientPool(POOL_SIZE);
}
@Override
public void put(String key, String value) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key: %s", key);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
dkvClient.put(key, value);
}
@Override
public void put(byte[] key, byte[] value) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
dkvClient.put(key, value);
}
@Override
public boolean compareAndSet(byte[] key, byte[] expect, byte[] update) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
return dkvClient.compareAndSet(key, expect, update);
}
@Override
public long incrementAndGet(byte[] key) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
return dkvClient.incrementAndGet(key);
}
@Override
public long decrementAndGet(byte[] key) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
return dkvClient.decrementAndGet(key);
}
@Override
public long addAndGet(byte[] key, long delta) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
return dkvClient.addAndGet(key, delta);
}
@Override
public void put(String key, String value, long expiryTS) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key: %s", key);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
dkvClient.put(key, value, expiryTS);
}
@Override
public void put(byte[] key, byte[] value, long expiryTS) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key: %s", key);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
dkvClient.put(key, value, expiryTS);
}
@Override
public String get(Api.ReadConsistency consistency, String key) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key: %s", key);
DKVNodeType nodeType = getNodeTypeByReadConsistency(consistency);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, nodeType, UNKNOWN);
return dkvClient.get(consistency, key);
}
@Override
public byte[] get(Api.ReadConsistency consistency, byte[] key) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
DKVNodeType nodeType = getNodeTypeByReadConsistency(consistency);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, nodeType, UNKNOWN);
return dkvClient.get(consistency, key);
}
@Override
public KV.Strings[] multiGet(Api.ReadConsistency consistency, String[] keys) {
checkf(keys != null && keys.length > 0, IllegalArgumentException.class, "must provide at least one key for multi get");
Map<DKVShard, List<String>> dkvShards = shardProvider.provideShards(keys);
checkf(dkvShards != null && !dkvShards.isEmpty(), IllegalArgumentException.class, "unable to compute shard(s) for the given keys");
DKVNodeType nodeType = getNodeTypeByReadConsistency(consistency);
//noinspection ConstantConditions
if (dkvShards.size() > 1) {
checkf(consistency != Api.ReadConsistency.LINEARIZABLE, UnsupportedOperationException.class,
"DKV does not yet support cross shard linearizable multi get");
LinkedList<KV.Strings> result = new LinkedList<>();
for (Map.Entry<DKVShard, List<String>> entry : dkvShards.entrySet()) {
DKVClient dkvClient = pool.getDKVClient(entry.getKey(), nodeType, UNKNOWN);
String[] reqKeys = entry.getValue().toArray(new String[0]);
KV.Strings[] kvs = dkvClient.multiGet(consistency, reqKeys);
addAll(result, kvs);
}
return result.toArray(new KV.Strings[0]);
} else {
DKVShard dkvShard = Iterables.get(dkvShards.keySet(), 0);
DKVClient dkvClient = pool.getDKVClient(dkvShard, nodeType, UNKNOWN);
return dkvClient.multiGet(consistency, keys);
}
}
@Override
public KV.Bytes[] multiGet(Api.ReadConsistency consistency, byte[][] keys) {
checkf(keys != null && keys.length > 0, IllegalArgumentException.class, "must provide at least one key for multi get");
Map<DKVShard, List<byte[]>> dkvShards = shardProvider.provideShards(keys);
checkf(dkvShards != null && !dkvShards.isEmpty(), IllegalArgumentException.class, "unable to compute shard(s) for the given keys");
DKVNodeType nodeType = getNodeTypeByReadConsistency(consistency);
//noinspection ConstantConditions
if (dkvShards.size() > 1) {
checkf(consistency != Api.ReadConsistency.LINEARIZABLE, UnsupportedOperationException.class,
"DKV does not yet support cross shard linearizable multi get");
LinkedList<KV.Bytes> result = new LinkedList<>();
for (Map.Entry<DKVShard, List<byte[]>> entry : dkvShards.entrySet()) {
DKVClient dkvClient = pool.getDKVClient(entry.getKey(), nodeType, UNKNOWN);
byte[][] reqKeys = entry.getValue().toArray(new byte[0][]);
KV.Bytes[] kvs = dkvClient.multiGet(consistency, reqKeys);
addAll(result, kvs);
}
return result.toArray(new KV.Bytes[0]);
} else {
DKVShard dkvShard = Iterables.get(dkvShards.keySet(), 0);
DKVClient dkvClient = pool.getDKVClient(dkvShard, nodeType, UNKNOWN);
return dkvClient.multiGet(consistency, keys);
}
}
@Override
public void delete(String key) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
dkvClient.delete(key);
}
@Override
public void delete(byte[] key) {
DKVShard dkvShard = shardProvider.provideShard(key);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, MASTER, UNKNOWN);
dkvClient.delete(key);
}
@Override
public Iterator<DKVEntry> iterate(String startKey) {
DKVShard dkvShard = shardProvider.provideShard(startKey);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given start key: %s", startKey);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, SLAVE, UNKNOWN);
return dkvClient.iterate(startKey);
}
@Override
public Iterator<DKVEntry> iterate(byte[] startKey) {
DKVShard dkvShard = shardProvider.provideShard(startKey);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given start key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, SLAVE, UNKNOWN);
return dkvClient.iterate(startKey);
}
@Override
public Iterator<DKVEntry> iterate(String startKey, String keyPref) {
DKVShard dkvShard = shardProvider.provideShard(startKey);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given start key: %s", startKey);
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, SLAVE, UNKNOWN);
return dkvClient.iterate(startKey, keyPref);
}
@Override
public Iterator<DKVEntry> iterate(byte[] startKey, byte[] keyPref) {
DKVShard dkvShard = shardProvider.provideShard(startKey);
checkf(dkvShard != null, IllegalArgumentException.class, "unable to compute shard for the given start key");
//noinspection ConstantConditions
DKVClient dkvClient = pool.getDKVClient(dkvShard, SLAVE, UNKNOWN);
return dkvClient.iterate(startKey, keyPref);
}
@Override
public void close() {
pool.close();
}
private static class DKVClientPool implements Closeable,
RemovalListener<DKVClientPool.Key, SimpleDKVClient>, CacheLoader<DKVClientPool.Key, SimpleDKVClient> {
private static class Key {
private final DKVNode dkvNode;
private final String authority;
private final String shardName;
private Key(DKVNode dkvNode, String authority, String shardName) {
this.dkvNode = dkvNode;
this.authority = authority;
this.shardName = shardName;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Key that = (Key) o;
return Objects.equals(dkvNode, that.dkvNode) && Objects.equals(authority, that.authority);
}
@Override
public int hashCode() {
return Objects.hash(dkvNode, authority);
}
}
private final LoadingCache<Key, SimpleDKVClient> internalPool;
private DKVClientPool(long poolSize) {
internalPool = Caffeine.newBuilder().maximumSize(poolSize).removalListener(this).build(this);
}
SimpleDKVClient getDKVClient(DKVShard dkvShard, DKVNodeType... nodeTypes) {
DKVNodeSet nodeSet = dkvShard.getNodesByType(nodeTypes);
DKVNode dkvNode = nodeSet.getNextNode();
return internalPool.get(new Key(dkvNode, nodeSet.getName(), dkvShard.getName()));
}
@Override
public void close() {
internalPool.invalidateAll();
}
@Override
public void onRemoval(Key id, SimpleDKVClient client, RemovalCause removalCause) {
if (client != null) {
client.close();
}
}
@Override
public SimpleDKVClient load(ShardedDKVClient.DKVClientPool.Key key) {
return new SimpleDKVClient(key.dkvNode.getHost(), key.dkvNode.getPort(), key.authority, key.shardName);
}
@Override
public SimpleDKVClient reload(ShardedDKVClient.DKVClientPool.Key key, SimpleDKVClient oldClient) {
oldClient.close();
return load(key);
}
}
}
|
// Unregister unregisters and shuts down the specified device.
//
// If the device is not currently registered, Unregister will do nothing.
func (reg *Registry) Unregister(d device.D) {
reg.mu.Lock()
defer reg.mu.Unlock()
e := reg.devices[d.ID()]
if e != nil {
This follows the same path done in manageEntryLifecycle's defer
statements when a device naturally expires.
e.device.MarkDone()
reg.unregisterEntryLocked(e)
}
} |
from config import load_vars
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
env_vars = load_vars()
connection_url = 'mysql+pymysql://{}:{}@{}'.format(
env_vars['AWS_RDS_USER'], env_vars['AWS_RDS_PASSWORD'], env_vars['AWS_RDS_URL'])
engine = create_engine(connection_url)
Session = sessionmaker(bind=engine)
Base = declarative_base()
|
/**
* Created by jeppe on 1/10/17.
*/
class MistLog {
static void err(String op, int code, String msg) {
Log.e("RPC error", msg + " code: " + code + " op: " + op);
}
} |
<reponame>EmrysMyrddin/saga<gh_stars>1-10
import { type Operation, type WrapperOperation, fork, terminal } from './operation'
import type { Effect } from './effect'
import type { Plugin } from './plugin'
const namespace = '@cuillere/time'
/**
* @hidden
*/
export type SleepOperations = {
sleep: SleepOperation
after: AfterOperation
}
/**
* @hidden
*/
export function timePlugin(): Plugin<SleepOperations> {
return {
namespace,
handlers: {
async* sleep({ delay }: SleepOperation) {
return new Promise((resolve) => { setTimeout(resolve, delay) })
},
* after({ effect, delay }: AfterOperation) {
yield sleep(delay)
return yield terminal(effect)
},
},
}
}
/**
* An operation to sleep during a given delay.
*
* @category for operations
*/
export interface SleepOperation extends Operation {
/**
* Sleeping delay in milliseconds.
*/
delay?: number
}
/**
* Returns after a given delay.
*
* @param delay Sleeping time in milliseconds.
* @returns A new sleep operation.
* @yields `void`
* @category for creating effects
*/
export function sleep(delay?: number): SleepOperation {
return { kind: `${namespace}/sleep`, delay }
}
/**
* An operation to execute an effect after a given delay.
*
* @category for operations
*/
export interface AfterOperation<T extends Effect = Effect> extends WrapperOperation<T> {
/**
* Delay before execution in milliseconds.
*/
delay?: number
}
/**
* Executes an effect in a separate [[Task]] (see [[fork]]) after a given delay.
*
* @param effect Effect to be executed.
* @param delay Delay before effect execution.
* @returns A new after operation.
* @yields A new asynchronous [[Task]].
* @category for creating effects
*/
export function after<T extends Effect = Effect>(effect: T, delay?: number) {
return fork({ kind: `${namespace}/after`, effect, delay } as AfterOperation<T>)
}
|
/**
* JUnit test rule to create a mock {@link WindowManagerService} instance for tests.
*/
public class SystemServicesTestRule implements TestRule {
private static final String TAG = SystemServicesTestRule.class.getSimpleName();
private final AtomicBoolean mCurrentMessagesProcessed = new AtomicBoolean(false);
private MockTracker mMockTracker;
private StaticMockitoSession mMockitoSession;
private WindowManagerService mWindowManagerService;
private TestWindowManagerPolicy mWindowManagerPolicy;
/** {@link MockTracker} to track mocks created by {@link SystemServicesTestRule}. */
private static class Tracker extends MockTracker {
// This empty extended class is necessary since Mockito distinguishes a listener by it
// class.
}
@Override
public Statement apply(Statement base, Description description) {
return new Statement() {
@Override
public void evaluate() throws Throwable {
try {
runWithDexmakerShareClassLoader(SystemServicesTestRule.this::setUp);
base.evaluate();
} finally {
tearDown();
}
}
};
}
private void setUp() {
mMockTracker = new Tracker();
mMockitoSession = mockitoSession()
.spyStatic(LocalServices.class)
.mockStatic(LockGuard.class)
.mockStatic(Watchdog.class)
.strictness(Strictness.LENIENT)
.startMocking();
doReturn(mock(Watchdog.class)).when(Watchdog::getInstance);
final Context context = getInstrumentation().getTargetContext();
spyOn(context);
doReturn(null).when(context)
.registerReceiver(nullable(BroadcastReceiver.class), any(IntentFilter.class));
doReturn(null).when(context)
.registerReceiverAsUser(any(BroadcastReceiver.class), any(UserHandle.class),
any(IntentFilter.class), nullable(String.class), nullable(Handler.class));
final ContentResolver contentResolver = context.getContentResolver();
spyOn(contentResolver);
doNothing().when(contentResolver)
.registerContentObserver(any(Uri.class), anyBoolean(), any(ContentObserver.class),
anyInt());
final AppOpsManager appOpsManager = mock(AppOpsManager.class);
doReturn(appOpsManager).when(context)
.getSystemService(eq(Context.APP_OPS_SERVICE));
final DisplayManagerInternal dmi = mock(DisplayManagerInternal.class);
doReturn(dmi).when(() -> LocalServices.getService(eq(DisplayManagerInternal.class)));
final PowerManagerInternal pmi = mock(PowerManagerInternal.class);
final PowerSaveState state = new PowerSaveState.Builder().build();
doReturn(state).when(pmi).getLowPowerState(anyInt());
doReturn(pmi).when(() -> LocalServices.getService(eq(PowerManagerInternal.class)));
final ActivityManagerInternal ami = mock(ActivityManagerInternal.class);
doReturn(ami).when(() -> LocalServices.getService(eq(ActivityManagerInternal.class)));
final ActivityTaskManagerInternal atmi = mock(ActivityTaskManagerInternal.class);
doAnswer((InvocationOnMock invocationOnMock) -> {
final Runnable runnable = invocationOnMock.getArgument(0);
if (runnable != null) {
runnable.run();
}
return null;
}).when(atmi).notifyKeyguardFlagsChanged(nullable(Runnable.class), anyInt());
doReturn(atmi).when(() -> LocalServices.getService(eq(ActivityTaskManagerInternal.class)));
final InputManagerService ims = mock(InputManagerService.class);
// InputChannel is final and can't be mocked.
final InputChannel[] input = InputChannel.openInputChannelPair(TAG_WM);
if (input != null && input.length > 1) {
doReturn(input[1]).when(ims).monitorInput(anyString(), anyInt());
}
final ActivityTaskManagerService atms = mock(ActivityTaskManagerService.class);
final TaskChangeNotificationController taskChangeNotificationController = mock(
TaskChangeNotificationController.class);
doReturn(taskChangeNotificationController).when(atms).getTaskChangeNotificationController();
final WindowManagerGlobalLock wmLock = new WindowManagerGlobalLock();
doReturn(wmLock).when(atms).getGlobalLock();
mWindowManagerPolicy = new TestWindowManagerPolicy(this::getWindowManagerService);
mWindowManagerService = WindowManagerService.main(
context, ims, false, false, mWindowManagerPolicy, atms, StubTransaction::new);
mWindowManagerService.onInitReady();
final Display display = mWindowManagerService.mDisplayManager.getDisplay(DEFAULT_DISPLAY);
// Display creation is driven by the ActivityManagerService via
// ActivityStackSupervisor. We emulate those steps here.
mWindowManagerService.mRoot.createDisplayContent(display, mock(ActivityDisplay.class));
mMockTracker.stopTracking();
}
private void tearDown() {
waitUntilWindowManagerHandlersIdle();
removeLocalServices();
mWindowManagerService = null;
mWindowManagerPolicy = null;
if (mMockitoSession != null) {
mMockitoSession.finishMocking();
mMockitoSession = null;
}
if (mMockTracker != null) {
mMockTracker.close();
mMockTracker = null;
}
}
private static void removeLocalServices() {
LocalServices.removeServiceForTest(WindowManagerInternal.class);
LocalServices.removeServiceForTest(WindowManagerPolicy.class);
}
WindowManagerService getWindowManagerService() {
return mWindowManagerService;
}
void cleanupWindowManagerHandlers() {
final WindowManagerService wm = getWindowManagerService();
if (wm == null) {
return;
}
wm.mH.removeCallbacksAndMessages(null);
wm.mAnimationHandler.removeCallbacksAndMessages(null);
SurfaceAnimationThread.getHandler().removeCallbacksAndMessages(null);
}
void waitUntilWindowManagerHandlersIdle() {
final WindowManagerService wm = getWindowManagerService();
if (wm == null) {
return;
}
// Removing delayed FORCE_GC message decreases time for waiting idle.
wm.mH.removeMessages(WindowManagerService.H.FORCE_GC);
waitHandlerIdle(wm.mH);
waitHandlerIdle(wm.mAnimationHandler);
waitHandlerIdle(SurfaceAnimationThread.getHandler());
}
private void waitHandlerIdle(Handler handler) {
synchronized (mCurrentMessagesProcessed) {
// Add a message to the handler queue and make sure it is fully processed before we move
// on. This makes sure all previous messages in the handler are fully processed vs. just
// popping them from the message queue.
mCurrentMessagesProcessed.set(false);
handler.post(() -> {
synchronized (mCurrentMessagesProcessed) {
mCurrentMessagesProcessed.set(true);
mCurrentMessagesProcessed.notifyAll();
}
});
while (!mCurrentMessagesProcessed.get()) {
try {
mCurrentMessagesProcessed.wait();
} catch (InterruptedException e) {
}
}
}
}
} |
/**
* Copyright 2010 - 2020 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrains.exodus.entitystore;
import jetbrains.exodus.ByteIterable;
import jetbrains.exodus.ExodusException;
import jetbrains.exodus.TestFor;
import jetbrains.exodus.TestUtil;
import jetbrains.exodus.bindings.ComparableSet;
import jetbrains.exodus.core.execution.Job;
import jetbrains.exodus.util.ByteArraySizedInputStream;
import jetbrains.exodus.util.DeferredIO;
import jetbrains.exodus.util.LightByteArrayOutputStream;
import jetbrains.exodus.util.UTFUtil;
import org.jetbrains.annotations.NotNull;
import org.junit.Assert;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Date;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
@SuppressWarnings({"RawUseOfParameterizedType", "ConstantConditions"})
public class EntityTests extends EntityStoreTestBase {
@Override
protected String[] casesThatDontNeedExplicitTxn() {
return new String[]{"testConcurrentCreationTypeIdsAreOk",
"testConcurrentSerializableChanges",
"testEntityStoreClear",
"testSetPhantomLink",
"testAddPhantomLink"
};
}
public void testCreateSingleEntity() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
final EntityIterable all = txn.getAll("Issue");
Assert.assertEquals(1, all.size());
Assert.assertTrue(all.iterator().hasNext());
Assert.assertNotNull(entity);
Assert.assertTrue(entity.getId().getTypeId() >= 0);
Assert.assertTrue(entity.getId().getLocalId() >= 0);
}
public void testCreateSingleEntity2() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
txn.flush();
Assert.assertNotNull(entity);
Assert.assertTrue(entity.getId().getTypeId() >= 0);
Assert.assertTrue(entity.getId().getLocalId() >= 0);
Assert.assertTrue(entity.getId().equals(new PersistentEntityId(0, 0)));
try {
txn.getEntity(new PersistentEntityId(0, 1));
Assert.fail();
} catch (EntityRemovedInDatabaseException ignore) {
}
}
public void testEntityIdToString() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
txn.flush();
final String representation = entity.getId().toString();
Assert.assertEquals(entity, txn.getEntity(txn.toEntityId(representation)));
}
public void testCreateTwoEntitiesInTransaction() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity1 = txn.newEntity("Issue");
final Entity entity2 = txn.newEntity("Issue");
txn.flush();
Assert.assertNotNull(entity1);
Assert.assertTrue(entity1.getId().getTypeId() >= 0);
Assert.assertTrue(entity1.getId().getLocalId() >= 0);
Assert.assertNotNull(entity2);
Assert.assertTrue(entity2.getId().getLocalId() > 0);
Assert.assertTrue(entity2.getId().getLocalId() > entity1.getId().getLocalId());
}
public void testCreateTwoEntitiesInTwoTransactions() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity1 = txn.newEntity("Issue");
txn.flush();
final Entity entity2 = txn.newEntity("Issue");
txn.flush();
Assert.assertNotNull(entity1);
Assert.assertTrue(entity1.getId().getTypeId() >= 0);
Assert.assertTrue(entity1.getId().getLocalId() >= 0);
Assert.assertNotNull(entity2);
Assert.assertTrue(entity2.getId().getLocalId() > 0);
Assert.assertTrue(entity2.getId().getLocalId() > entity1.getId().getLocalId());
}
public void testCreateAndGetSingleEntity() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
}
public void testRawProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("description", "it doesn't work");
txn.flush();
Assert.assertEquals("Issue", entity.getType());
Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
ByteIterable rawValue = entity.getRawProperty("description");
Assert.assertNotNull(rawValue);
Assert.assertEquals("it doesn't work", getEntityStore().getPropertyTypes().entryToPropertyValue(rawValue).getData());
entity.setProperty("description", "it works");
txn.flush();
sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
rawValue = entity.getRawProperty("description");
Assert.assertNotNull(rawValue);
Assert.assertEquals("it works", getEntityStore().getPropertyTypes().entryToPropertyValue(rawValue).getData());
}
public void testIntProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("size", 100);
entity.setProperty("minus_size", -100);
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertEquals(100, entity.getProperty("size"));
Assert.assertEquals(-100, entity.getProperty("minus_size"));
}
public void testLongProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("length", 0x10000ffffL);
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertEquals(0x10000ffffL, entity.getProperty("length"));
}
public void testStringProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("description", "This is a test issue");
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertEquals("This is a test issue", entity.getProperty("description"));
}
public void testDoubleAndFloatProperties() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("hitRate", 0.123456789);
entity.setProperty("hitRate (float)", 0.123456789f);
entity.setProperty("crude oil (WTI) price", -40.32);
entity.setProperty("crude oil (WTI) price (float)", -40.32f);
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertEquals(0.123456789, entity.getProperty("hitRate"));
Assert.assertEquals(0.123456789f, entity.getProperty("hitRate (float)"));
Assert.assertEquals(-40.32, entity.getProperty("crude oil (WTI) price"));
Assert.assertEquals(-40.32f, entity.getProperty("crude oil (WTI) price (float)"));
}
public void testDateProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
final Date date = new Date();
entity.setProperty("date", date.getTime());
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
final Comparable dateProp = entity.getProperty("date");
Assert.assertNotNull(dateProp);
Assert.assertEquals(date.getTime(), dateProp);
Assert.assertTrue(new Date().getTime() >= (Long) dateProp);
}
public void testBooleanProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("ready", true);
txn.flush();
Assert.assertEquals("Issue", entity.getType());
Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertTrue((Boolean) entity.getProperty("ready"));
entity.setProperty("ready", false);
txn.flush();
sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertNotNull(entity.getProperty("ready"));
Assert.assertEquals(false, entity.getProperty("ready"));
}
public void testHeterogeneousProperties() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("description", "This is a test issue");
entity.setProperty("size", 100);
entity.setProperty("rank", 0.5);
txn.flush();
Assert.assertEquals("Issue", entity.getType());
final Entity sameEntity = txn.getEntity(entity.getId());
Assert.assertNotNull(sameEntity);
Assert.assertEquals(entity.getType(), sameEntity.getType());
Assert.assertEquals(entity.getId(), sameEntity.getId());
Assert.assertEquals("This is a test issue", entity.getProperty("description"));
Assert.assertEquals(100, entity.getProperty("size"));
Assert.assertEquals(0.5, entity.getProperty("rank"));
}
@SuppressWarnings("unchecked")
@TestFor(issues = "XD-509")
public void testComparableSetNewEmpty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
Assert.assertFalse(entity.setProperty("subsystems", newComparableSet()));
Assert.assertTrue(entity.getPropertyNames().isEmpty());
}
@SuppressWarnings("unchecked")
public void testComparableSetNew() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
final ComparableSet<String> subsystems = newComparableSet("Search Parser", "Agile Board", "Full Text Index", "REST API", "Workflow", "Agile Board");
entity.setProperty("subsystems", subsystems);
txn.flush();
Comparable propValue = entity.getProperty("subsystems");
Assert.assertTrue(propValue instanceof ComparableSet);
ComparableSet<String> readSet = (ComparableSet) propValue;
Assert.assertFalse(readSet.isEmpty());
Assert.assertFalse(readSet.isDirty());
Assert.assertEquals(subsystems, propValue);
}
@SuppressWarnings("unchecked")
public void testComparableSetAdd() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
final ComparableSet<String> subsystems = newComparableSet("Search Parser", "Agile Board");
entity.setProperty("subsystems", subsystems);
txn.flush();
Comparable propValue = entity.getProperty("subsystems");
Assert.assertTrue(propValue instanceof ComparableSet);
ComparableSet<String> updateSet = (ComparableSet) propValue;
updateSet.addItem("Obsolete Subsystem");
Assert.assertTrue(updateSet.isDirty());
entity.setProperty("subsystems", updateSet);
txn.flush();
propValue = entity.getProperty("subsystems");
Assert.assertTrue(propValue instanceof ComparableSet);
updateSet = (ComparableSet) propValue;
Assert.assertFalse(updateSet.isEmpty());
Assert.assertFalse(updateSet.isDirty());
Assert.assertEquals(newComparableSet("Search Parser", "Agile Board", "Obsolete Subsystem"), propValue);
}
@SuppressWarnings("unchecked")
public void testComparableSetAddAll() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("subsystems", newComparableSet("Search Parser", "Agile Board"));
txn.flush();
entity.setProperty("subsystems", newComparableSet("Search Parser", "Agile Board", "Obsolete Subsystem"));
txn.flush();
Comparable propValue = entity.getProperty("subsystems");
Assert.assertTrue(propValue instanceof ComparableSet);
Assert.assertEquals(newComparableSet("Search Parser", "Agile Board", "Obsolete Subsystem"), propValue);
}
@SuppressWarnings("unchecked")
public void testComparableSetRemove() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
final ComparableSet<String> subsystems = newComparableSet("Search Parser", "Agile Board");
entity.setProperty("subsystems", subsystems);
txn.flush();
Comparable propValue = entity.getProperty("subsystems");
Assert.assertTrue(propValue instanceof ComparableSet);
ComparableSet<String> updateSet = (ComparableSet) propValue;
updateSet.removeItem("Agile Board");
Assert.assertTrue(updateSet.isDirty());
entity.setProperty("subsystems", updateSet);
txn.flush();
propValue = entity.getProperty("subsystems");
Assert.assertTrue(propValue instanceof ComparableSet);
updateSet = (ComparableSet) propValue;
Assert.assertFalse(updateSet.isEmpty());
Assert.assertFalse(updateSet.isDirty());
Assert.assertEquals(newComparableSet("Search Parser"), propValue);
}
@SuppressWarnings("unchecked")
@TestFor(issues = "XD-509")
public void testComparableSetClear() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
final ComparableSet<String> subsystems = newComparableSet("Search Parser", "Agile Board");
entity.setProperty("subsystems", subsystems);
txn.flush();
entity.setProperty("subsystems", newComparableSet());
txn.flush();
Assert.assertNull(entity.getProperty("subsystems"));
}
private ComparableSet<String> newComparableSet(String... values) {
ComparableSet<String> set = new ComparableSet<>();
for (String value : values) {
set.addItem(value);
}
return set;
}
public void testOverwriteProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity entity = txn.newEntity("Issue");
entity.setProperty("description", "This is a test issue");
txn.flush();
Assert.assertEquals("This is a test issue", entity.getProperty("description"));
entity.setProperty("description", "This is overriden test issue");
txn.flush();
Assert.assertEquals("This is overriden test issue", entity.getProperty("description"));
entity.deleteProperty("description"); // for XD-262 I optimized this to prohibit such stuff
entity.setProperty("description", 100);
txn.flush();
Assert.assertEquals(100, entity.getProperty("description"));
}
public void testDeleteProperty() {
final StoreTransaction txn = getStoreTransaction();
final Entity issue = txn.newEntity("Issue");
issue.setProperty("description", "This is a test issue");
txn.flush();
Assert.assertEquals("This is a test issue", issue.getProperty("description"));
issue.deleteProperty("description");
txn.flush();
Assert.assertNull(issue.getProperty("description"));
final EntityIterable issues = txn.find("Issue", "description", "This is a test issue");
Assert.assertFalse(issues.iterator().hasNext());
}
public void testReadingWithoutTransaction() throws Exception {
StoreTransaction txn = getStoreTransaction();
txn.getAll("Issue");
try {
final Entity issue = txn.newEntity("Issue");
issue.setProperty("name", "my name");
final Entity user = txn.newEntity("User");
user.setProperty("name", "charisma user");
issue.addLink("creator", user);
} finally {
txn.flush();
}
reinit();
txn = getStoreTransaction();
for (final Entity issue : txn.getAll("Issue")) {
Assert.assertEquals("my name", issue.getProperty("name"));
final Iterable<Entity> users = issue.getLinks("creator");
for (final Entity user : users) {
Assert.assertEquals("charisma user", user.getProperty("name"));
}
}
}
public void testClearingProperties() {
final PersistentStoreTransaction txn = getStoreTransaction();
final PersistentEntity issue = txn.newEntity("Issue");
issue.setProperty("description", "This is a test issue");
issue.setProperty("size", 0);
issue.setProperty("rank", 0.5);
txn.flush();
Assert.assertNotNull(issue.getProperty("description"));
Assert.assertNotNull(issue.getProperty("size"));
Assert.assertNotNull(issue.getProperty("rank"));
getEntityStore().clearProperties(txn, issue);
txn.flush();
Assert.assertNull(issue.getProperty("description"));
Assert.assertNull(issue.getProperty("size"));
Assert.assertNull(issue.getProperty("rank"));
}
public void testDeleteEntities() {
final StoreTransaction txn = getStoreTransaction();
txn.newEntity("Issue");
txn.newEntity("Issue");
txn.newEntity("Issue");
txn.newEntity("Issue");
txn.flush();
int i = 0;
for (final Entity issue : txn.getAll("Issue")) {
if ((i++ & 1) == 0) {
issue.delete();
}
}
txn.flush();
Assert.assertEquals(2, (int) txn.getAll("Issue").size());
}
public void testRenameEntityType() {
final StoreTransaction txn = getStoreTransaction();
for (int i = 0; i < 10; ++i) {
txn.newEntity("Issue");
}
txn.flush();
Assert.assertTrue(txn.getAll("Issue").size() == 10);
getEntityStore().renameEntityType("Issue", "Comment");
txn.flush();
//noinspection SizeReplaceableByIsEmpty
Assert.assertTrue(txn.getAll("Issue").size() == 0);
Assert.assertTrue(txn.getAll("Comment").size() == 10);
}
public void testRenameNonExistingEntityType() {
final StoreTransaction txn = getStoreTransaction();
for (int i = 0; i < 10; ++i) {
txn.newEntity("Issue");
}
txn.flush();
Assert.assertTrue(txn.getAll("Issue").size() == 10);
TestUtil.runWithExpectedException(new Runnable() {
@Override
public void run() {
getEntityStore().renameEntityType("Comment", "Issue");
}
}, IllegalArgumentException.class);
}
public void testConcurrentSerializableChanges() throws InterruptedException {
final Entity e1 = getEntityStore().computeInTransaction(new StoreTransactionalComputable<Entity>() {
@Override
public Entity compute(@NotNull final StoreTransaction txn) {
return txn.newEntity("E");
}
});
final int count = 100;
final Runnable target = new Runnable() {
@Override
public void run() {
final StoreTransaction txn = getEntityStore().beginTransaction();
try {
for (int i = 0; i <= count; ++i) {
do {
e1.setProperty("i", i);
e1.setProperty("s", Integer.toString(i));
} while (!txn.flush());
}
} finally {
txn.abort();
}
}
};
final Thread t1 = new Thread(target);
final Thread t2 = new Thread(target);
t1.start();
t2.start();
t1.join();
t2.join();
getEntityStore().executeInReadonlyTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull final StoreTransaction txn) {
Assert.assertEquals(count, e1.getProperty("i"));
Assert.assertEquals(Integer.toString(count), e1.getProperty("s"));
}
});
}
public void testConcurrentCreationTypeIdsAreOk() throws InterruptedException {
final int count = 100;
final boolean[] itsOk = {true};
final Runnable target = new Runnable() {
@Override
public void run() {
for (final int[] i = {0}; i[0] <= count; ++i[0]) {
if (!getEntityStore().computeInTransaction(new StoreTransactionalComputable<Boolean>() {
@Override
public Boolean compute(@NotNull StoreTransaction txn) {
final Entity e = txn.newEntity("Entity" + i[0]);
if (e.getId().getTypeId() != i[0]) {
itsOk[0] = false;
return false;
}
return true;
}
})) {
break;
}
}
}
};
final Thread t1 = new Thread(target);
final Thread t2 = new Thread(target);
t1.start();
t2.start();
t1.join();
t2.join();
Assert.assertTrue(itsOk[0]);
}
public void testAsciiUTFDecodingBenchmark() {
final String s = "This is sample ASCII string of not that great size, but large enough to use in the benchmark";
TestUtil.time("Constructing string from data input", new Runnable() {
@Override
public void run() {
try {
final LightByteArrayOutputStream out = new LightByteArrayOutputStream();
DataOutputStream output = new DataOutputStream(out);
output.writeUTF(s);
final InputStream stream = new ByteArraySizedInputStream(out.toByteArray(), 0, out.size());
stream.mark(Integer.MAX_VALUE);
for (int i = 0; i < 10000000; i++) {
stream.reset();
assertEquals(s, new DataInputStream(stream).readUTF());
}
} catch (IOException e) {
throw ExodusException.toEntityStoreException(e);
}
}
});
TestUtil.time("Constructing strings from bytes", new Runnable() {
@Override
public void run() {
final byte bytes[] = s.getBytes();
for (int i = 0; i < 10000000; i++) {
assertEquals(s, UTFUtil.fromAsciiByteArray(bytes, 0, bytes.length));
}
}
});
}
public void testTxnCachesIsolation() {
final Entity issue = getEntityStore().computeInTransaction(new StoreTransactionalComputable<Entity>() {
@Override
public Entity compute(@NotNull StoreTransaction txn) {
final Entity issue = txn.newEntity("Issue");
issue.setProperty("description", "1");
return issue;
}
});
final PersistentStoreTransaction txn = getStoreTransaction();
txn.revert();
Assert.assertEquals("1", issue.getProperty("description"));
getEntityStore().executeInTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
issue.setProperty("description", "2");
}
});
txn.revert();
Assert.assertEquals("2", issue.getProperty("description"));
}
public void testTxnCachesIsolation2() {
final Entity issue = getEntityStore().computeInTransaction(new StoreTransactionalComputable<Entity>() {
@Override
public Entity compute(@NotNull StoreTransaction txn) {
final Entity issue = txn.newEntity("Issue");
issue.setProperty("description", "1");
return issue;
}
});
final PersistentStoreTransaction txn = getStoreTransaction();
txn.revert();
Assert.assertEquals("1", issue.getProperty("description"));
issue.setProperty("description", "2");
getEntityStore().executeInTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
issue.setProperty("description", "3");
}
});
Assert.assertFalse(txn.flush());
Assert.assertEquals("3", issue.getProperty("description"));
}
@TestFor(issues = "XD-530")
public void testEntityStoreClear() {
final PersistentEntityStoreImpl store = getEntityStore();
final Entity user = store.computeInTransaction(new StoreTransactionalComputable<Entity>() {
@Override
public Entity compute(@NotNull StoreTransaction txn) {
final Entity result = txn.newEntity("User");
result.setProperty("login", "penemue");
return result;
}
});
store.executeInReadonlyTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
Assert.assertEquals("penemue", user.getProperty("login"));
}
});
store.clear();
store.executeInReadonlyTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
Assert.assertEquals(null, user.getProperty("login"));
}
});
store.executeInTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
txn.newEntity("UserProfile");
}
});
store.executeInTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
txn.getSequence("qwerty").increment();
}
});
}
public void testSetPhantomLink() {
setOrAddPhantomLink(false);
}
public void testAddPhantomLink() {
setOrAddPhantomLink(true);
}
private void setOrAddPhantomLink(final boolean setLink) {
final PersistentEntityStoreImpl store = getEntityStore();
store.getEnvironment().getEnvironmentConfig().setGcEnabled(false);
store.getConfig().setDebugTestLinkedEntities(true);
final Entity issue = store.computeInTransaction(new StoreTransactionalComputable<Entity>() {
@Override
public Entity compute(@NotNull StoreTransaction txn) {
return txn.newEntity("Issue");
}
});
final Entity comment = store.computeInTransaction(new StoreTransactionalComputable<Entity>() {
@Override
public Entity compute(@NotNull StoreTransaction txn) {
return txn.newEntity("Comment");
}
});
final CountDownLatch startBoth = new CountDownLatch(2);
final Semaphore deleted = new Semaphore(0);
DeferredIO.getJobProcessor().queue(new Job() {
@Override
protected void execute() {
store.executeInTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
startBoth.countDown();
try {
startBoth.await();
} catch (InterruptedException ignore) {
}
comment.delete();
txn.flush();
deleted.release();
}
});
}
});
final int[] i = {0};
TestUtil.runWithExpectedException(new Runnable() {
@Override
public void run() {
store.executeInTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
final boolean first = i[0] == 0;
if (first) {
startBoth.countDown();
try {
startBoth.await();
} catch (InterruptedException ignore) {
}
}
++i[0];
if (setLink) {
issue.setLink("comment", comment);
} else {
issue.addLink("comment", comment);
}
if (first) {
deleted.acquireUninterruptibly();
}
}
});
}
}, PhantomLinkException.class);
Assert.assertEquals(2, i[0]);
store.executeInReadonlyTransaction(new StoreTransactionalExecutable() {
@Override
public void execute(@NotNull StoreTransaction txn) {
Assert.assertNull(issue.getLink("comment"));
}
});
}
}
|
Malate–aspartate shuttle promotes l‐lactate oxidation in mitochondria
Metabolism in cancer cells is rewired to generate sufficient energy equivalents and anabolic precursors to support high proliferative activity. Within the context of these competing drives aerobic glycolysis is inefficient for the cancer cellular energy economy. Therefore, many cancer types, including colon cancer, reprogram mitochondria‐dependent processes to fulfill their elevated energy demands. Elevated glycolysis underlying the Warburg effect is an established signature of cancer metabolism. However, there are a growing number of studies that show that mitochondria remain highly oxidative under glycolytic conditions. We hypothesized that activities of glycolysis and oxidative phosphorylation are coordinated to maintain redox compartmentalization. We investigated the role of mitochondria‐associated malate–aspartate and lactate shuttles in colon cancer cells as potential regulators that couple aerobic glycolysis and oxidative phosphorylation. We demonstrated that the malate–aspartate shuttle exerts control over NAD+/NADH homeostasis to maintain activity of mitochondrial lactate dehydrogenase and to enable aerobic oxidation of glycolytic l‐lactate in mitochondria. The elevated glycolysis in cancer cells is proposed to be one of the mechanisms acquired to accelerate oxidative phosphorylation. |
#pragma once
#ifndef $fileinputname$_H_Included
#define $fileinputname$_H_Included
//TODO fill header
#endif // !$fileinputname$_H_Included |
package com.cnpc.framework.utils;
import org.apache.log4j.Logger;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisPool;
import redis.clients.jedis.JedisPoolConfig;
import java.util.Set;
/**
* Created by billJiang on 2017/4/10.
* e-mail:<EMAIL> qq:475572229
*/
public class RedisUtil {
private static JedisPool jedisPool;
// session 在redis过期时间是30分钟30*60
private static int expireTime = 1800;
// 计数器的过期时间默认2天
private static int countExpireTime = 2*24*3600;
private static String password = "<PASSWORD>";
private static String redisIp = "127.0.0.1";
private static int redisPort = 6379;
private static int maxActive = 200;
private static int maxIdle = 200;
private static long maxWait = 5000;
private static Logger logger = Logger.getLogger(RedisUtil.class);
static {
initPool();
}
// 初始化连接池
public static void initPool(){
JedisPoolConfig config = new JedisPoolConfig();
config.setMaxTotal(maxActive);
config.setMaxIdle(maxIdle);
config.setMaxWaitMillis(maxWait);
config.setTestOnBorrow(false);
jedisPool = new JedisPool(config, redisIp, redisPort, 10000, password);
}
// 从连接池获取redis连接
public static Jedis getJedis(){
Jedis jedis = null;
try{
jedis = jedisPool.getResource();
// jedis.auth(password);
} catch(Exception e){
logger.error(e);
}
return jedis;
}
// 回收redis连接
public static void recycleJedis(Jedis jedis){
if(jedis != null){
try{
jedis.close();
} catch(Exception e){
logger.error(e);
}
}
}
// 保存字符串数据
public static void setString(String key, String value){
Jedis jedis = getJedis();
if(jedis != null){
try{
jedis.set(key, value);
} catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
}
// 获取字符串类型的数据
public static String getString(String key){
Jedis jedis = getJedis();
String result = "";
if(jedis != null){
try{
result = jedis.get(key);
}catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
return result;
}
// 删除字符串数据
public static void delString(String key){
Jedis jedis = getJedis();
if(jedis != null){
try{
jedis.del(key);
}catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
}
// 保存byte类型数据
public static void setObject(byte[] key, byte[] value){
Jedis jedis = getJedis();
String result = "";
if(jedis != null){
try{
if(!jedis.exists(key)){
jedis.set(key, value);
}
// redis中session过期时间
jedis.expire(key, expireTime);
} catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
}
// 获取byte类型数据
public static byte[] getObject(byte[] key){
Jedis jedis = getJedis();
byte[] bytes = null;
if(jedis != null){
try{
bytes = jedis.get(key);;
}catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
return bytes;
}
// 更新byte类型的数据,主要更新过期时间
public static void updateObject(byte[] key){
Jedis jedis = getJedis();
if(jedis != null){
try{
// redis中session过期时间
jedis.expire(key, expireTime);
}catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
}
// key对应的整数value加1
public static void inc(String key){
Jedis jedis = getJedis();
if(jedis != null){
try{
if(!jedis.exists(key)){
jedis.set(key, "1");
jedis.expire(key, countExpireTime);
} else {
// 加1
jedis.incr(key);
}
}catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
}
// 获取所有keys
public static Set<String> getAllKeys(String pattern){
Jedis jedis = getJedis();
if(jedis != null){
try{
return jedis.keys(pattern);
}catch(Exception e){
logger.error(e);
} finally{
recycleJedis(jedis);
}
}
return null;
}
}
|
package com.gc.system.mapper;
import com.gc.starter.crud.mapper.CrudBaseMapper;
import com.gc.system.model.SysRolePO;
/**
* @author jackson
* 2020/1/24 2:20 下午
*/
public interface SysRoleMapper extends CrudBaseMapper<SysRolePO> {
}
|
<filename>ternary_test.go
package ternary
import (
"fmt"
"math"
"reflect"
"strconv"
)
func ExampleGiveOnSuccess() {
fmt.Println(
GiveOnSuccess(strconv.ParseFloat("-9.0", 64)).
Else(math.NaN()),
)
// Output: -9
}
func ExampleGiveOnSuccess_second() {
fmt.Println(
GiveOnSuccess(strconv.ParseFloat("-9.0.0", 64)).
Else(math.NaN()),
)
// Output: NaN
}
func ExampleGiveOnSuccess_third() {
fmt.Println(
GiveOnSuccess(strconv.ParseFloat("-9.0.0", 64)).Else(
GiveOnSuccess(strconv.ParseFloat("-8.0.0", 64)).Else(
GiveOnSuccess(strconv.ParseFloat("-7.0", 64)).Else(
math.NaN(),
),
),
),
)
// Output: -7
}
func ExampleGiveOnOK() {
x := struct {
Y string `tagged:"yes"`
}{}
fmt.Println(
GiveOnOK(
GiveOnOK(
reflect.TypeOf(x).FieldByName("Y"),
).Else(reflect.StructField{}).Tag.Lookup("tagged"),
).Else("no tagged"),
)
// Output: yes
}
func ExampleGiveOnOK_second() {
x := struct {
Y string `tagged:"yes"`
}{}
fmt.Println(
GiveOnOK(
GiveOnOK(
reflect.TypeOf(x).FieldByName("N"),
).Else(reflect.StructField{}).Tag.Lookup("tagged"),
).Else("no tagged"),
)
// Output: no tagged
}
func ExampleGiveOnOK_third() {
x := struct {
Y string `bagged:"yes"`
}{}
fmt.Println(
GiveOnOK(
GiveOnOK(
reflect.TypeOf(x).FieldByName("Y"),
).Else(reflect.StructField{}).Tag.Lookup("tagged"),
).Else("no tagged"),
)
// Output: no tagged
}
|
<gh_stars>1-10
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aliyuncs.v5.vpc.transform.v20160428;
import java.util.ArrayList;
import java.util.List;
import com.aliyuncs.v5.vpc.model.v20160428.DescribeIPv6TranslatorsResponse;
import com.aliyuncs.v5.vpc.model.v20160428.DescribeIPv6TranslatorsResponse.Ipv6Translator;
import com.aliyuncs.v5.transform.UnmarshallerContext;
public class DescribeIPv6TranslatorsResponseUnmarshaller {
public static DescribeIPv6TranslatorsResponse unmarshall(DescribeIPv6TranslatorsResponse describeIPv6TranslatorsResponse, UnmarshallerContext _ctx) {
describeIPv6TranslatorsResponse.setRequestId(_ctx.stringValue("DescribeIPv6TranslatorsResponse.RequestId"));
describeIPv6TranslatorsResponse.setTotalCount(_ctx.integerValue("DescribeIPv6TranslatorsResponse.TotalCount"));
describeIPv6TranslatorsResponse.setPageNumber(_ctx.integerValue("DescribeIPv6TranslatorsResponse.PageNumber"));
describeIPv6TranslatorsResponse.setPageSize(_ctx.integerValue("DescribeIPv6TranslatorsResponse.PageSize"));
List<Ipv6Translator> ipv6TranslatorsList = new ArrayList<Ipv6Translator>();
for (int i = 0; i < _ctx.lengthValue("DescribeIPv6TranslatorsResponse.Ipv6Translators.Length"); i++) {
Ipv6Translator ipv6Translator = new Ipv6Translator();
ipv6Translator.setIpv6TranslatorId(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Ipv6TranslatorId"));
ipv6Translator.setCreateTime(_ctx.longValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].CreateTime"));
ipv6Translator.setEndTime(_ctx.longValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].EndTime"));
ipv6Translator.setSpec(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Spec"));
ipv6Translator.setName(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Name"));
ipv6Translator.setDescription(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Description"));
ipv6Translator.setStatus(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Status"));
ipv6Translator.setBusinessStatus(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].BusinessStatus"));
ipv6Translator.setPayType(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].PayType"));
ipv6Translator.setBandwidth(_ctx.integerValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Bandwidth"));
ipv6Translator.setAllocateIpv6Addr(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].AllocateIpv6Addr"));
ipv6Translator.setAllocateIpv4Addr(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].AllocateIpv4Addr"));
ipv6Translator.setAvailableBandwidth(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].AvailableBandwidth"));
ipv6Translator.setRegionId(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].RegionId"));
List<String> ipv6TranslatorEntryIds = new ArrayList<String>();
for (int j = 0; j < _ctx.lengthValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Ipv6TranslatorEntryIds.Length"); j++) {
ipv6TranslatorEntryIds.add(_ctx.stringValue("DescribeIPv6TranslatorsResponse.Ipv6Translators["+ i +"].Ipv6TranslatorEntryIds["+ j +"]"));
}
ipv6Translator.setIpv6TranslatorEntryIds(ipv6TranslatorEntryIds);
ipv6TranslatorsList.add(ipv6Translator);
}
describeIPv6TranslatorsResponse.setIpv6Translators(ipv6TranslatorsList);
return describeIPv6TranslatorsResponse;
}
} |
// check if node size has been changed after this library is built
void gm_graph_check_if_size_is_correct(int node_size, int edge_size)
{
if (node_size != sizeof(node_t)) {
printf("Current nodesize in the applicaiton is %d, while the library expects %d. Please rebuild the library\n",
node_size, (int)sizeof(node_t));
}
if (edge_size != sizeof(edge_t)) {
printf("Current nodesize in the applicaiton is %d, while the library expects %d. Please rebuild the library\n",
edge_size, (int)sizeof(edge_t));
}
assert (node_size == sizeof(node_t));
assert (edge_size == sizeof(edge_t));
} |
<gh_stars>1-10
package top.yuan.test.aopAll;
import org.junit.jupiter.api.Test;
import top.yuan.context.support.ClassPathXmlApplicationContext;
/**
* \* Create by Yuan
* \* @author: Yuan
* \
*/
public class ApiTest {
@Test
public void test_aop_all() {
ClassPathXmlApplicationContext applicationContext = new ClassPathXmlApplicationContext("classpath:spring_aopAll.xml");
IUserService userService = applicationContext.getBean("userService", IUserService.class);
System.out.println(userService.queryUserInfo());
}
}
|
/******************************************************************************
* Copyright 2009-2018 Exactpro (Exactpro Systems Limited)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.exactpro.sf.common.messages.structures;
import static com.exactpro.sf.comparison.conversion.MultiConverter.convert;
import java.math.BigDecimal;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
import com.exactpro.sf.common.impl.messages.xml.configuration.JavaType;
/**
* Java interface for attributes
*/
public interface IAttributeStructure {
/**
* Get the attributes name
* @return {@link String}
*/
String getName();
/**
* Get the attributes value
* @return {@link String}
*/
String getValue();
/**
* Get the attributes casted value
* @param <T> value casted to <b>type</b>
* @return {@link T}
*/
<T> T getCastValue();
/**
* Get the attributes type
* @return {@link JavaType}
*/
JavaType getType();
/**
* Returns attribute value as the specified type (converting it if required)
*/
default <T> T getAs(Class<T> type) {
return convert(getCastValue(), type);
}
/**
* Returns attribute value as byte (converting it if required)
*/
default byte asByte() {
return convert(getCastValue(), byte.class);
}
/**
* Returns attribute value as short (converting it if required)
*/
default short asShort() {
return convert(getCastValue(), short.class);
}
/**
* Returns attribute value as integer (converting it if required)
*/
default int asInteger() {
return convert(getCastValue(), int.class);
}
/**
* Returns attribute value as long (converting it if required)
*/
default long asLong() {
return convert(getCastValue(), long.class);
}
/**
* Returns attribute value as float (converting it if required)
*/
default float asFloat() {
return convert(getCastValue(), float.class);
}
/**
* Returns attribute value as double (converting it if required)
*/
default double asDouble() {
return convert(getCastValue(), double.class);
}
/**
* Returns attribute value as boolean (converting it if required)
*/
default boolean asBoolean() {
return convert(getCastValue(), boolean.class);
}
/**
* Returns attribute value as character (converting it if required)
*/
default char asCharacter() {
return convert(getCastValue(), char.class);
}
/**
* Returns attribute value as BigDecimal (converting it if required)
*/
default BigDecimal asBigDecimal() {
return convert(getCastValue(), BigDecimal.class);
}
/**
* Returns attribute value as LocalDate (converting it if required)
*/
default LocalDate asDate() {
return convert(getCastValue(), LocalDate.class);
}
/**
* Returns attribute value as LocalTime (converting it if required)
*/
default LocalTime asTime() {
return convert(getCastValue(), LocalTime.class);
}
/**
* Returns attribute value as LocalDateTime (converting it if required)
*/
default LocalDateTime asDateTime() {
return convert(getCastValue(), LocalDateTime.class);
}
/**
* Returns attribute value as string (converting it if required)
*/
default String asString() {
return convert(getCastValue(), String.class);
}
} |
<reponame>melix99/gtk4-rs<gh_stars>1-10
// Take a look at the license at the top of the repository in the LICENSE file.
use crate::{Event, EventType};
use glib::translate::*;
use glib::StaticType;
use std::fmt;
use std::mem;
impl Event {
pub fn downcast<T: EventKind>(self) -> Result<T, Event> {
unsafe {
if T::event_types().contains(&self.event_type()) {
Ok(from_glib_full(self.to_glib_full()))
} else {
Err(self)
}
}
}
pub fn downcast_ref<T: EventKind>(&self) -> Option<&T> {
unsafe {
if T::event_types().contains(&self.event_type()) {
Some(&*(self as *const Event as *const T))
} else {
None
}
}
}
#[doc(alias = "gdk_events_get_angle")]
#[doc(alias = "get_angle")]
pub fn angle(&self, event: impl AsRef<Event>) -> Option<f64> {
skip_assert_initialized!();
unsafe {
let mut angle = mem::MaybeUninit::uninit();
let ret = from_glib(ffi::gdk_events_get_angle(
self.to_glib_none().0,
event.as_ref().to_glib_none().0,
angle.as_mut_ptr(),
));
if ret {
let angle = angle.assume_init();
Some(angle)
} else {
None
}
}
}
#[doc(alias = "gdk_events_get_center")]
#[doc(alias = "get_center")]
pub fn center(&self, event: impl AsRef<Event>) -> Option<(f64, f64)> {
skip_assert_initialized!();
unsafe {
let mut x = mem::MaybeUninit::uninit();
let mut y = mem::MaybeUninit::uninit();
let ret = from_glib(ffi::gdk_events_get_center(
self.to_glib_none().0,
event.as_ref().to_glib_none().0,
x.as_mut_ptr(),
y.as_mut_ptr(),
));
if ret {
let x = x.assume_init();
let y = y.assume_init();
Some((x, y))
} else {
None
}
}
}
#[doc(alias = "gdk_events_get_distance")]
#[doc(alias = "get_distance")]
pub fn distance(&self, event: impl AsRef<Event>) -> Option<f64> {
skip_assert_initialized!();
unsafe {
let mut distance = mem::MaybeUninit::uninit();
let ret = from_glib(ffi::gdk_events_get_distance(
self.to_glib_none().0,
event.as_ref().to_glib_none().0,
distance.as_mut_ptr(),
));
if ret {
let distance = distance.assume_init();
Some(distance)
} else {
None
}
}
}
}
impl fmt::Debug for Event {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Event")
.field("event_type", &self.event_type())
.field("history", &self.history())
.field("modifier_state", &self.modifier_state())
.field("pointer_emulated", &self.is_pointer_emulated())
.field("position", &self.position())
.field("time", &self.time())
.field("triggers_context_menu", &self.triggers_context_menu())
.finish()
}
}
#[doc(hidden)]
impl AsRef<Event> for Event {
fn as_ref(&self) -> &Self {
self
}
}
// rustdoc-stripper-ignore-next
/// A common trait implemented by the various [`Event`](crate::Event) types.
///
/// # Safety
///
/// The user is not supposed to implement this trait.
pub unsafe trait EventKind:
StaticType + FromGlibPtrFull<*mut ffi::GdkEvent> + 'static
{
fn event_types() -> &'static [EventType];
}
macro_rules! define_event {
($rust_type:ident, $ffi_type:path,$event_event_types:expr) => {
unsafe impl crate::event::EventKind for $rust_type {
fn event_types() -> &'static [crate::EventType] {
$event_event_types
}
}
impl std::ops::Deref for $rust_type {
type Target = crate::Event;
fn deref(&self) -> &Self::Target {
unsafe { &*(self as *const $rust_type as *const crate::Event) }
}
}
impl AsRef<crate::Event> for $rust_type {
fn as_ref(&self) -> &crate::Event {
self.upcast_ref()
}
}
#[doc(hidden)]
impl glib::translate::FromGlibPtrFull<*mut ffi::GdkEvent> for $rust_type {
unsafe fn from_glib_full(ptr: *mut ffi::GdkEvent) -> Self {
glib::translate::FromGlibPtrFull::from_glib_full(ptr as *mut $ffi_type)
}
}
impl $rust_type {
pub fn upcast(self) -> crate::Event {
unsafe { std::mem::transmute(self) }
}
pub fn upcast_ref(&self) -> &crate::Event {
&*self
}
}
};
}
|
Intestinal TLR9 deficiency exacerbates hepatic IR injury via altered intestinal inflammation and short‐chain fatty acid synthesis
Mice deficient in intestinal epithelial TLR9 develop small intestinal Paneth cell hyperplasia and higher Paneth cell IL‐17A levels. Since small intestinal Paneth cells and IL‐17A play critical roles in hepatic ischemia reperfusion (IR) injury, we tested whether mice lacking intestinal TLR9 have increased hepatic IR injury. Mice lacking intestinal TLR9 had profoundly increased liver injury after hepatic IR compared to WT mice with exacerbated hepatocyte necrosis, apoptosis, neutrophil infiltration, and inflammatory cytokine generation. Moreover, we observed increased small intestinal inflammation and apoptosis after hepatic IR in intestinal TLR9 deficient mice. As a potential explanation for increased hepatic IR injury, fecal short‐chain fatty acids butyrate and propionate levels were lower in intestinal TLR9 deficient mice. Suggesting a potential therapy for hepatic IR, exogenous administration of butyrate or propionate protected against hepatic IR injury in intestinal TLR9 deficient mice. Mechanistically, butyrate induced small intestinal IL‐10 expression and downregulated the claudin‐2 expression. Finally, IL‐10 neutralization abolished the protective effects of butyrate against hepatic IR injury. Our studies show intestinal TLR9 deficiency results in exacerbated hepatic IR injury with increased small intestinal apoptosis and inflammation. Furthermore, short‐chain fatty acids butyrate and propionate protect against hepatic IR injury and intestinal apoptosis/inflammation in intestinal TLR9 deficient mice. |
Intramedullary Chondrosarcoma of Proximal Humerus
Primary chondrosarcoma is the third most frequent primary malignancy of bone after myeloma and osteosarcoma. It is ranging from slow growing nonmetastasising lesions to highly aggressive lesions. We report a case of primary intramedullary chondrosarcoma of proximal humerus. A 60-year-old female presented with pain and hard swelling involving the left arm for 5 months. Radiograph showed a lucent expansile intramedullary lesion with matrix calcification and associated soft tissue mass. CT confirmed the finding. MRI showed a lobulated lesion which is hyperintense on T2WI with low signal fibrous septae. Increased tracer uptake was seen on bone scan. Histopathology confirmed the radiology diagnosis. The patient underwent wide resection and endoprosthetic reconstruction of proximal humerus.
Case Report
Primary chondrosarcoma is the third most common primary malignant tumor of bone after myeloma and osteosarcoma. It is most commonly seen between 30 and 70 years of age. We report a case of a sixty-year-old female who presented with gradually progressive pain and swelling over the proximal part of left arm since 5 months associated with restricted flexion, extension, and abduction of left shoulder. On inspection, loss of normal contour of left shoulder due to a diffuse swelling and asymmetric pectoral girdle ( Figure 1). Skin over the surface was normal, with mild prominence of the veins. On palpation, it is tender and measuring approximately 6 cm in length and 4 cm in breadth. It was bony hard in consistency and fixed in nature with immobile skin over the swelling. Clinical diagnosis was of a neoplastic musculoskeletal pathology. Radiographs of left shoulder showed an ill defined, expansile, and osteolytic lesion involving the cortical and medullary region of neck and proximal shaft of left humerus with a wide zone of transition (Figures 2 and 3). Few specks of calcifications were seen within it. It shows endosteal scalloping with cortical break and adjacent soft tissue component. In addition, a calcified nodular opacity was seen in the peripheral left lung mid zone. As computed tomography (CT) is useful in defining the bony anatomy, integrity of the cortex surrounding a lesion, and calcifications within, a helical CT scan of 5 mm thickness was done from the superior margin of left shoulder to mid arm level. It revealed that the osteolytic expansile lesion was seen with endosteal scalloping and cortical thinning. It contains matrix calcification, break in cortices at multiple sites with adjacent anterolateral soft tissue component, and specks of calcification within it ( Figure 4). The deltoid muscle in its anterior portion was thinned out and displaced.
MRI is known to best depict the tissue character, delineate the extent of bone marrow involvement, and pinpoint the effect of soft tissue masses on surrounding neurovascular structures. MRI of left arm was done on 1.5 Tesla machine using T1WI, T2WI, and GRE sequences in coronal, axial, and sagittal planes. There is a well-defined lobulated lesion which is predominantly hypointense on T1WI and hyperintense on T2WI with low signal septae (Figures 5,6,7,and 8). The glenohumeral joint space was normal and neurovascular bundle was not affected. Subsequently bone scan was run to look for any metastatic lesion elsewhere as 10-20% of chondrosarcomas are known to metastasise. The lesion in humerus revealed increased uptake of tracer 2 Case Reports in Radiology in 1st phase and high soft tissue pooling in 2nd phase. A focal tracer uptake seen in left mandible was probably related to dental pathology. Diffuse inhomogeneous increased tracer uptake was seen in the 3rd phase ( Figure 9). The nodular opacity in left mid zone that was seen on the X-ray was presumed to be a calcified granuloma as it showed no activity on bone scan. FNAC sample showed pleomorphic nuclei with vacuolated cytoplasm in chondromyxoid background ( Figure 10). Justification of diagnosis was made by comparing the possible features of chondrosarcoma with our case. As ENNEKINGS SYSTEM FOR STAGING: After a preanaesthetic assessment, the patient was taken up for surgery under general anaesthesia for wide resection and endoprosthetic reconstruction of proximal humerus.
Discussion
Chondrosarcomas account for the third most common primary tumour of the bone, after myeloma and osteosarcoma . This primary sarcoma of bone in adults has a male predominance and is seen between the 3rd and 7th decade of life, more common in male, as male : female ratio is 1.5 : 1. Usual clinical presentation of chondrosarcoma is pain, tenderness, with or without a mass, and a slow growth over an average duration of 1-2 year. The characteristic feature of chondrosarcomas is to produce coalescent cartilage lobules of varying sizes with often a necrotic or cystic centre . Chondrsarcoma is graded from 1 (low) to 3 (high). Low grade chondrsarcoma is very close in appearance to enchondroma and osteochondroma and has occasional binucleated cells. High grade chondrsarcoma have increased cellularity, atypia, and mitoses .
Skeletal Distribution.
The commonest sites are the pelvic bones, femur, humerus, and ribs followed by other sites such as the trunk, skull, and facial bones. Hands and feet are rarely involved. Peculiar forms are known to develop on laryngeal cartilage, base of the skull, or in soft tissue. Chondrosarcomas can occur on preexisting lesions. Central chondrosarcoma predominates in long bones and peripheral tumours in the pelvis and vertebrae.
Imaging.
Plain films allow depicting the location of the lesion to identify the cartilaginous nature as well as its aggressiveness. The most frequent type of lesion is central chondrosarcoma. The tumour begins in the metaphysis and extends to the diaphysis. It is a well-defined lytic lesion, associated with endosteal scalloping, cortical thinning, or thickening. High-grade tumours show irregular margins. Calcifications of the tumoral matrix may be punctate, flocculent, or have a ring-like pattern they can be small, or disseminated, dense, or subtle. Their absence is frequent in aggressive types. In the soft tissue, the mass is frequently huge and palpable when tumour has an extension. CT scan has a diagnostic role as it shows the bony destruction, the small calcifications, and the intra-and extraosseous extent.
In typical forms, MRI shows a lobulated lesion with a low or intermediate signal on T1-weighted images and a high-signal intensity on T2 . MRI shows the medullary involvement and the soft-tissue mass precisely. In diffusion, low-grade lesions show a lobulated pattern with enhanced septations after intravenous injection of contrast media. High-grade tumours do not have septations and show a more diffuse, heterogeneous enhancement. Benign and low-grade tumors cannot be differentiated by the MRI appearance of the matrix alone.
Differential Diagnosis.
The main differential diagnosis in chondromas is specially in the differentiation between a benign chondroma and a low-grade central chondrosarcoma . Features suggestive of a malignant lesion are pain, proximal location or a location on the axial skeleton, size being greater than 5 cm, a lobulated aspect, an ill-defined margin, endosteal erosion, and bone destruction with an extraosseous component . Biopsy is necessary to make the diagnosis. A metaphyseal lesion could suggest a chondromyxoid fibroma, while an epiphyseal lesion could suggest a chondroblastoma or a giant cell tumour (Figures 11 and 12). Fibrous dysplasia or a bone infarction can be misdiagnosed as chondrosarcomas; the lack of cortical erosion or of soft-tissue mass would suggest something other than a chondrosarcoma. New immunohistochemistry techniques contribute to the differentiation of malignant lesions . More rarely, a lytic lesion can be considered with a lytic form of osteosarcoma or fibrosarcoma, a plasmocytoma or a metastasis. . This is a rare form representing 1-2% of all cases of chondrosarcomas. The Case Reports in Radiology 5 Figure 11: Excision of lesion was done with affected soft tissue and bone. growth of the tumour begins at the surface of the bone (usually metaphysis of the distal femur or proximal humerus) and develops in the soft tissues as a lobulated mass. The lesion is usually well differentiated and grows slower than central forms. The cortex is never normal, either eroded or often thickened by the tumour, but never destroyed. Ring-like calcifications can be disseminated or localised within the mass. Medullary involvement evaluated on CT or MRI is rare and limited. Uncalcified tumoral nodules are hypodense on CT and show a high signal on T2-weighted MRI. Satellite nodules can be depicted and separated from the principal lesion. The outcome is generally favourable after an appropriate surgical resection.
Periosteal Chondrosarcomas
The differentiation from an osteochondroma is generally easy. The diagnosis of periosteal chondroma can be made by histology alone. Patients are younger and lesions are smaller, not painful, and are located more distally on the skeleton. The periosteal osteosarcoma is more often located on the diaphysis and has reactionary cortical spiculations.
Mesenchymal
Chondrosarcoma. This entity represents 2-3% of all chondrosarcomas and combines an undifferentiated cell component with well-differentiated cartilaginous areas . The diagnosis is only made on this biphasic aspect. The average age of the patients is 26 years. Common skeletal sites are the femur, pelvic bones, ribs, and vertebrae. Extraosseous site involvement such as brain, meninges, or soft tissues is seen in about one-third of cases. The prognosis is poor, with early pulmonary, bony, and lymph nodes metastases. The tumours are large, destructive lesions with a purely lytic pattern. Calcified masses can be found. The lesion appears of low signal intensity on T1-weighted MRI and heterogeneous high signal on T2 images . Multidrug chemotherapy used in osteosarcomas can be combined with surgery and radiotherapy, but the 10-year survival is only 28%.
Clear Cell Chondrosarcoma.
In rare forms (2%) of chondrosarcoma, these lesions are distinguished by their cytology, epiphyseal location in long bones, and slow evolution . There is a male predominance and patients are in the third to fifth decade. Clinical symptoms are pain and swelling may last up to almost 23 years. Some tumours may be an incidental finding; pathological fractures have been reported in one-quarter of cases. The commonest sites are the femur, humerus and tibia. This low-grade tumour shows a geographic lytic epiphyseal lesion with extension to the metaphysis. The margins can be well defined, but indistinct or sclerotic margins have also been described. Calcifications of the tumoral matrix are not always present. There is no extension to the soft tissues. Periosteal reactions are unusual. CT may be useful to depict lobulated margins and calcified matrix. MRI shows a well-delineated low signal on T1-weighted images and heterogeneous high signal on T2 images. The main differential diagnosis is the chondroblastoma, which is a smaller lesion in younger patients. The other differential diagnoses include giant cell tumours and other epiphyseal tumours.
The treatment is radical surgery. The prognosis is good with a 5-year survival of 92% , even though metastases are found in 15% of cases (lung, brain, and bones).
Dedifferentiated
Chondrosarcoma. This form represents 10-12% of all chondrosarcomas . It is characterised by a special histology and very poor prognosis. Pain and swelling are the usual clinical symptoms as well as pathological fractures . The commonest locations are the femur, the acetabulum, and the proximal humerus.
These metaphyseal or diaphyseal lesions are rapidly destructive. Osteolytic lesion is associated with calcifications resulting in the biomorphic pattern . A huge soft-tissue mass without calcifications, seen on CT or MRI, is also indicative for this diagnosis. Imaging helps to direct biopsy of the lytic area in order to improve the histological diagnosis. The treatment involves surgery and adjuvant chemotherapy or radiotherapy (Figures 13, 14, and 15). The prognosis is very poor, with an overall 5-year survival rate of only 8-13%. The metastases appear in the lungs but also in unusual sites such as the adrenal gland, brain, and liver.
Secondary Chondrosarcoma.
Twelve percent of all chondrosarcomas are developed in a preexisting lesion. It may be secondary to a solitary osteochondroma, osteochondromatosis, enchondromatosis (Ollier's disease), fibrous dysplasia, Paget's disease, irradiated bone, or synovial chondromatosis . In osteochondromatosis, the risk of sarcomatous transformation is 5-25%; it is 25-50% in enchondromatosis and nearly 100% in Maffucci's syndrome . The increased size of an enchondroma, lytic area with cortical destruction, associated with pain or fracture are features suggestive of malignant transformation. An enlarging exostosis associated with pain, the appearance of a less mineralized zone in the cartilage cap, calcifications in the soft tissues, and the thickening of the cap (>1 cm) on CT and MRI suggest sarcomatous transformation . |
<gh_stars>10-100
package cmd
import (
"fmt"
"os"
"sort"
"github.com/go-qamel/qamel/internal/config"
"github.com/spf13/cobra"
)
func profileListCmd() *cobra.Command {
return &cobra.Command{
Use: "list",
Short: "List all existing profile",
Args: cobra.NoArgs,
Aliases: []string{"ls"},
Run: profileListHandler,
}
}
func profileListHandler(cmd *cobra.Command, args []string) {
// Load existing profiles
profiles, err := config.LoadProfiles(configPath)
if err != nil {
cRedBold.Println("Failed to get list of profile:", err)
os.Exit(1)
}
// Get list of profile name
var names []string
for key := range profiles {
names = append(names, key)
}
sort.Strings(names)
for i := 0; i < len(names); i++ {
fmt.Println(names[i])
}
}
|
Book Review: The Economic Basis of Ethnic Solidarity: Small Business in the Japanese American Community
The second part of the volume may give it a continuing usefulness long after the historiographical discussion has become updated. This contains accounts of the resources and research possibilities of various German archives by archivists and other scholars familiar with them. A model of the type is the discussion by Wolf-Heino Struck, who surveys the several state archives of present-day Hesse. He systematically outlines the various predecessor governments and the location of their records; the principal classes of materials concerning emigration and the way in which they are generated; research and publications based upon these sources; and suggestions for further investigations. There are similar discussions (not all as fully detailed) of archives in the Palatinate, Baden, Wiirttemberg, and Hamburg. There are also more general treatments of historical materials on Swiss and Austrian emigration. American immigrationscholars contemplating research in Germany will find these essays invaluable. |
<reponame>hugograf/grafioschtrader
package grafioschtrader.reports;
import java.time.LocalDate;
import java.time.Month;
import java.time.ZoneId;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.data.jpa.repository.Modifying;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;
import grafioschtrader.GlobalConstants;
import grafioschtrader.common.DataHelper;
import grafioschtrader.dto.ISecuritycurrencyIdDateClose;
import grafioschtrader.entities.Currencypair;
import grafioschtrader.entities.Historyquote;
import grafioschtrader.entities.Security;
import grafioschtrader.entities.Securitycurrency;
import grafioschtrader.entities.Securitysplit;
import grafioschtrader.entities.Securitysplit.SplitFactorAfterBefore;
import grafioschtrader.entities.Tenant;
import grafioschtrader.entities.Transaction;
import grafioschtrader.entities.User;
import grafioschtrader.entities.Watchlist;
import grafioschtrader.instrument.SecurityCalcService;
import grafioschtrader.reportviews.DateTransactionCurrencypairMap;
import grafioschtrader.reportviews.securityaccount.SecurityPositionSummary;
import grafioschtrader.reportviews.securitycurrency.SecuritycurrencyGroup;
import grafioschtrader.reportviews.securitycurrency.SecuritycurrencyPosition;
import grafioschtrader.repository.CurrencypairJpaRepository;
import grafioschtrader.repository.GlobalparametersJpaRepository;
import grafioschtrader.repository.HistoryquoteJpaRepository;
import grafioschtrader.repository.IPositionCloseOnLatestPrice;
import grafioschtrader.repository.SecurityJpaRepository;
import grafioschtrader.repository.SecuritysplitJpaRepository;
import grafioschtrader.repository.TenantJpaRepository;
import grafioschtrader.repository.TransactionJpaRepository;
import grafioschtrader.repository.WatchlistJpaRepository;
/**
* Prepares the Data for every kind of Watchlists.
*
* @author <NAME>
*
*/
@Component
public class WatchlistReport {
private final Logger log = LoggerFactory.getLogger(this.getClass());
@Autowired
private TenantJpaRepository tenantJpaRepository;
@Autowired
private GlobalparametersJpaRepository globalparametersJpaRepository;
@Autowired
private WatchlistJpaRepository watchlistJpaRepository;
@Autowired
private TransactionJpaRepository transactionJpaRepository;
@Autowired
private SecurityCalcService securityCalcService;
@Autowired
private SecurityJpaRepository securityJpaRepository;
@Autowired
private CurrencypairJpaRepository currencypairJpaRepository;
@Autowired
private HistoryquoteJpaRepository historyquoteJpaRepository;
@Autowired
private SecuritysplitJpaRepository securitysplitJpaRepository;
/**
* Returns the watchlist with the youngest date of history quote. This should
* help to detect non working historical data feeds.
*
* @param idWatchlist
* @return
* @throws InterruptedException
* @throws ExecutionException
*/
public SecuritycurrencyGroup getWatchlistWithoutUpdateAndMaxHistoryquote(final Integer idWatchlist)
throws InterruptedException, ExecutionException {
final User user = (User) SecurityContextHolder.getContext().getAuthentication().getDetails();
final CompletableFuture<List<Historyquote>> historyquoteCF = CompletableFuture
.supplyAsync(() -> historyquoteJpaRepository
.getYoungestFeedHistorquoteForSecuritycurrencyByWatchlist(idWatchlist, user.getIdTenant()));
final CompletableFuture<int[]> securitiesIsUsedElsewhereCF = CompletableFuture
.supplyAsync(() -> watchlistJpaRepository.watchlistSecuritiesHasTransactionOrOtherWatchlist(idWatchlist));
final CompletableFuture<int[]> currencypairIsUsedElsewhereCF = CompletableFuture
.supplyAsync(() -> watchlistJpaRepository.watchlistCurrencypairsHasReferencesButThisWatchlist(idWatchlist));
return combineSecuritycurrencyGroupWithHistoryquote(getWatchlistWithoutUpdate(idWatchlist), historyquoteCF.get(),
securitiesIsUsedElsewhereCF.get(), currencypairIsUsedElsewhereCF.get());
}
private SecuritycurrencyGroup combineSecuritycurrencyGroupWithHistoryquote(
final SecuritycurrencyGroup securitycurrencyGroup, final List<Historyquote> historyquotes,
final int[] securitiesIsUsedElsewhereIds, final int[] currencypairIsUsedElsewhereIds) {
final Comparator<Historyquote> historyquoteComparator = (h1, h2) -> h1.getIdSecuritycurrency()
.compareTo(h2.getIdSecuritycurrency());
final Historyquote searchHistoryquote = new Historyquote();
securitycurrencyGroup.securityPositionList
.forEach(securitycurrencyPosition -> combineSecuritycurrencyHistoryquote(securitycurrencyPosition,
searchHistoryquote, historyquotes, historyquoteComparator));
securitycurrencyGroup.currencypairPositionList
.forEach(securitycurrencyPosition -> combineSecuritycurrencyHistoryquote(securitycurrencyPosition,
searchHistoryquote, historyquotes, historyquoteComparator));
this.markForUsedSecurityCurrencypairs(securitycurrencyGroup, securitiesIsUsedElsewhereIds,
currencypairIsUsedElsewhereIds);
return securitycurrencyGroup;
}
private <T extends Securitycurrency<T>> void combineSecuritycurrencyHistoryquote(
final SecuritycurrencyPosition<T> securitycurrencyPosition, final Historyquote searchHistoryquote,
final List<Historyquote> historyquotes, final Comparator<Historyquote> historyquoteComparator) {
searchHistoryquote.setIdSecuritycurrency(securitycurrencyPosition.securitycurrency.getIdSecuritycurrency());
final int index = Collections.binarySearch(historyquotes, searchHistoryquote, historyquoteComparator);
if (index >= 0) {
securitycurrencyPosition.youngestHistoryDate = historyquotes.get(index).getDate();
}
}
public SecuritycurrencyGroup getWatchlistForSplitAndDividend(final Integer idWatchlist)
throws InterruptedException, ExecutionException {
final CompletableFuture<Set<Integer>> securitiesIdsCF = CompletableFuture
.supplyAsync(() -> watchlistJpaRepository.hasSplitOrDividendByWatchlist(idWatchlist));
final CompletableFuture<int[]> securitiesIsUsedElsewhereCF = CompletableFuture
.supplyAsync(() -> watchlistJpaRepository.watchlistSecuritiesHasTransactionOrOtherWatchlist(idWatchlist));
final CompletableFuture<int[]> currencypairIsUsedElsewhereCF = CompletableFuture
.supplyAsync(() -> watchlistJpaRepository.watchlistCurrencypairsHasReferencesButThisWatchlist(idWatchlist));
return combineWatchlistWithDividendSplitMark(getWatchlistWithoutUpdate(idWatchlist), securitiesIdsCF.get(),
securitiesIsUsedElsewhereCF.get(), currencypairIsUsedElsewhereCF.get());
}
private SecuritycurrencyGroup combineWatchlistWithDividendSplitMark(final SecuritycurrencyGroup securitycurrencyGroup,
Set<Integer> securitiesIds, final int[] securitiesIsUsedElsewhereIds,
final int[] currencypairIsUsedElsewhereIds) {
markForUsedSecurityCurrencypairs(securitycurrencyGroup, securitiesIsUsedElsewhereIds,
currencypairIsUsedElsewhereIds);
securitycurrencyGroup.securityPositionList.forEach(
spl -> spl.watchlistSecurityHasEver = securitiesIds.contains(spl.securitycurrency.getIdSecuritycurrency()));
return securitycurrencyGroup;
}
/////////////////////////////////////////////////////////////
// Get Watchlist - Report
/////////////////////////////////////////////////////////////
public SecuritycurrencyGroup getWatchlistWithoutUpdate(final Integer idWatchlist) {
final User user = (User) SecurityContextHolder.getContext().getAuthentication().getDetails();
final CompletableFuture<int[]> watchlistSecurtiesTransactionCF = CompletableFuture
.supplyAsync(() -> watchlistJpaRepository
.watchlistSecuritiesHasOpenOrClosedTransactionForThisTenant(idWatchlist, user.getIdTenant()));
SecuritycurrencyGroup securitycurrencyGroup = createWatchlistWithoutUpdate(idWatchlist, user.getIdTenant());
return combineSecuritycurrencyGroupWithSecurtiesTransaction(securitycurrencyGroup,
watchlistSecurtiesTransactionCF.join());
}
@Transactional
@Modifying
public SecuritycurrencyGroup getWatchlistwithPeriodPerformance(final Integer idWatchlist, final Integer idTenant,
final Integer daysTimeFrame) {
final Watchlist watchlist = watchlistJpaRepository.getById(idWatchlist);
if (!watchlist.getIdTenant().equals(idTenant)) {
throw new SecurityException(GlobalConstants.CLIENT_SECURITY_BREACH);
}
final Map<Integer, List<Securitysplit>> securitysplitMap = securitysplitJpaRepository
.getSecuritysplitMapByIdWatchlist(idWatchlist);
// Currency conversion is not used since watchlist only calculates security
// gains in the currency of the instrument
final DateTransactionCurrencypairMap dateCurrencyMap = null;
final LocalDate dateTimeFrame = LocalDate.now().minusDays(daysTimeFrame);
Tenant tenant = tenantJpaRepository.getById(watchlist.getIdTenant());
final CompletableFuture<SecurityCurrency> securityCurrencyCF = CompletableFuture
.supplyAsync(() -> updateLastPrice(tenant, watchlist));
final CompletableFuture<Map<Integer, ISecuritycurrencyIdDateClose>> historyquoteMaxDayCF = CompletableFuture
.supplyAsync(() -> getMaxDayHistoryquotesByIdWatchlist(idWatchlist));
final CompletableFuture<Map<Integer, ISecuritycurrencyIdDateClose>> historyquoteLastDayYearCF = CompletableFuture
.supplyAsync(() -> getLastDayOfLastYearHistoryquotesByIdWatchlist(idWatchlist));
final CompletableFuture<Map<Integer, ISecuritycurrencyIdDateClose>> historyquoteTimeFrameCF = CompletableFuture
.supplyAsync(() -> getTimeFrameHistoryquotesByIdWatchlistAndDate(idWatchlist, dateTimeFrame));
final CompletableFuture<int[]> securitiesIsUsedElsewhereCF = CompletableFuture
.supplyAsync(() -> this.watchlistJpaRepository.watchlistSecuritiesHasTransactionOrOtherWatchlist(idWatchlist));
final CompletableFuture<int[]> currencypairIsUsedElsewhereCF = CompletableFuture.supplyAsync(
() -> this.watchlistJpaRepository.watchlistCurrencypairsHasReferencesButThisWatchlist(idWatchlist));
final CompletableFuture<int[]> watchlistSecurtiesTransactionCF = CompletableFuture
.supplyAsync(() -> this.watchlistJpaRepository
.watchlistSecuritiesHasOpenOrClosedTransactionForThisTenant(idWatchlist, idTenant));
final CompletableFuture<SecuritycurrencyGroup> cf = CompletableFuture
.allOf(securityCurrencyCF, historyquoteMaxDayCF, historyquoteLastDayYearCF, historyquoteTimeFrameCF,
securitiesIsUsedElsewhereCF)
.thenApply(ignoredVoid -> combineLastPriceHistoryquote(tenant, securityCurrencyCF.join(),
historyquoteMaxDayCF.join(), historyquoteLastDayYearCF.join(), historyquoteTimeFrameCF.join(),
securitiesIsUsedElsewhereCF.join(), currencypairIsUsedElsewhereCF.join(),
watchlistSecurtiesTransactionCF.join(), watchlist, daysTimeFrame, securitysplitMap, dateCurrencyMap));
final SecuritycurrencyGroup securitycurrencyGroup = cf.join();
securitycurrencyGroup.idWatchlist = idWatchlist;
return securitycurrencyGroup;
}
private SecuritycurrencyGroup combineSecuritycurrencyGroupWithSecurtiesTransaction(
SecuritycurrencyGroup securitycurrencyGroup, int[] watchlistSecuritesHasTransactionIds) {
markWatchlistSecurityHasEverTransactionTenant(watchlistSecuritesHasTransactionIds,
securitycurrencyGroup.securityPositionList);
return securitycurrencyGroup;
}
private SecuritycurrencyGroup createWatchlistWithoutUpdate(final Integer idWatchlist, final Integer idTenant) {
final Watchlist watchlist = watchlistJpaRepository.getById(idWatchlist);
if (!watchlist.getIdTenant().equals(idTenant)) {
throw new SecurityException(GlobalConstants.CLIENT_SECURITY_BREACH);
}
final List<SecuritycurrencyPosition<Security>> securityPositionList = createSecuritycurrencyPositionList(
watchlist.getSecuritycurrencyListByType(Security.class));
final List<SecuritycurrencyPosition<Currencypair>> currencypairPositionList = createSecuritycurrencyPositionList(
watchlist.getSecuritycurrencyListByType(Currencypair.class));
return new SecuritycurrencyGroup(securityPositionList, currencypairPositionList, watchlist.getLastTimestamp(),
watchlist.getIdWatchlist());
}
private SecurityCurrency updateLastPrice(Tenant tenant, Watchlist watchlist) {
final List<Security> securities = watchlist.getSecuritycurrencyListByType(Security.class);
final List<Currencypair> currencypairs = watchlist.getSecuritycurrencyListByType(Currencypair.class);
final Date timeframe = new Date(
System.currentTimeMillis() - 1000 * globalparametersJpaRepository.getWatchlistIntradayUpdateTimeout());
if (watchlist.getLastTimestamp() == null || timeframe.after(watchlist.getLastTimestamp())) {
watchlist.setLastTimestamp(new Date(System.currentTimeMillis()));
watchlist = watchlistJpaRepository.save(watchlist);
log.info("Intraday update for {}", watchlist.getName());
updateDependingCurrencyWhenPerformanceWatchlist(tenant, watchlist, currencypairs);
return new SecurityCurrency(
securityJpaRepository.updateLastPriceByList(watchlist.getSecuritycurrencyListByType(Security.class)),
currencypairJpaRepository.updateLastPriceByList(watchlist.getSecuritycurrencyListByType(Currencypair.class)));
} else {
log.info("No intraday update for {} because last update was at {} and is not after {}", watchlist.getName(),
watchlist.getLastTimestamp(), timeframe);
return new SecurityCurrency(securities, currencypairs);
}
}
private void updateDependingCurrencyWhenPerformanceWatchlist(final Tenant tenant, Watchlist watchlist,
List<Currencypair> currencypairs) {
if (watchlist.getIdWatchlist().equals(tenant.getIdWatchlistPerformance())) {
List<Currencypair> currenciesNotInList = currencypairJpaRepository
.getAllCurrencypairsForTenantByTenant(tenant.getIdTenant());
currenciesNotInList.removeAll(currencypairs);
currencypairJpaRepository.updateLastPriceByList(currenciesNotInList);
}
}
private Map<Integer, ISecuritycurrencyIdDateClose> getLastDayOfLastYearHistoryquotesByIdWatchlist(
final Integer idWatchlist) {
final LocalDate lastDayLastYear = LocalDate.of(LocalDate.now().minusYears(1).getYear(), Month.DECEMBER, 31);
return getTimeFrameHistoryquotesByIdWatchlistAndDate(idWatchlist, lastDayLastYear);
}
private Map<Integer, ISecuritycurrencyIdDateClose> getMaxDayHistoryquotesByIdWatchlist(final Integer idWatchlist) {
final List<ISecuritycurrencyIdDateClose> historyquotes = this.historyquoteJpaRepository
.getYoungestHistorquoteForSecuritycurrencyByWatchlist(idWatchlist);
return historyquotes.stream()
.collect(Collectors.toMap(ISecuritycurrencyIdDateClose::getIdSecuritycurrency, Function.identity()));
}
private Map<Integer, ISecuritycurrencyIdDateClose> getTimeFrameHistoryquotesByIdWatchlistAndDate(
final Integer idWatchlist, final LocalDate localDateTimeFrame) {
final Date date = Date.from(localDateTimeFrame.atStartOfDay(ZoneId.systemDefault()).toInstant());
final List<ISecuritycurrencyIdDateClose> historyquotes = this.historyquoteJpaRepository
.getCertainOrOlderDayInHistorquoteForSecuritycurrencyByWatchlist(idWatchlist, date);
return historyquotes.stream()
.collect(Collectors.toMap(ISecuritycurrencyIdDateClose::getIdSecuritycurrency, Function.identity()));
}
private <S extends Securitycurrency<?>> SecuritycurrencyGroup combineLastPriceHistoryquote(final Tenant tenant,
final SecurityCurrency securityCurrency, final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteMaxDateMap,
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteLastDayPrevYear,
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteTimeFrame, final int[] securitiesIsUsedElsewhereIds,
final int[] currencypairIsUsedElsewhereIds, final int[] watchlistSecuritesHasTransactionIds,
final Watchlist watchlist, final Integer daysTimeFrame, final Map<Integer, List<Securitysplit>> securitysplitMap,
final DateTransactionCurrencypairMap dateCurrencyMap) {
final SecuritycurrencyGroup securitycurrencyGroup = new SecuritycurrencyGroup(
setOpenPositions(tenant, watchlist,
setDailyChangeAndTimeFrameChange(securityCurrency.securities, historyquoteMaxDateMap,
historyquoteLastDayPrevYear, historyquoteTimeFrame, daysTimeFrame, securitysplitMap),
securitysplitMap, dateCurrencyMap),
setDailyChangeAndTimeFrameChange(securityCurrency.currencypairs, historyquoteMaxDateMap,
historyquoteLastDayPrevYear, historyquoteTimeFrame, daysTimeFrame, securitysplitMap),
watchlist.getLastTimestamp(), watchlist.getIdWatchlist());
markForUsedSecurityCurrencypairs(securitycurrencyGroup, securitiesIsUsedElsewhereIds,
currencypairIsUsedElsewhereIds);
markWatchlistSecurityHasEverTransactionTenant(watchlistSecuritesHasTransactionIds,
securitycurrencyGroup.securityPositionList);
return securitycurrencyGroup;
}
private void markForUsedSecurityCurrencypairs(final SecuritycurrencyGroup securitycurrencyGroup,
final int[] securitiesIsUsedElsewhereIds, final int[] currencypairIsUsedElsewhereIds) {
markSecurityCurrencypairsIsUsedElsewhere(securitiesIsUsedElsewhereIds, securitycurrencyGroup.securityPositionList);
markSecurityCurrencypairsIsUsedElsewhere(currencypairIsUsedElsewhereIds,
securitycurrencyGroup.currencypairPositionList);
}
private <T extends Securitycurrency<T>> void markSecurityCurrencypairsIsUsedElsewhere(
final int[] securitiesCurrencypairsIsUsedElsewhereIds,
final List<SecuritycurrencyPosition<T>> securitycurrencyPositionList) {
securitycurrencyPositionList.forEach(securitycurrencyPosition -> securitycurrencyPosition.isUsedElsewhere = Arrays
.binarySearch(securitiesCurrencypairsIsUsedElsewhereIds,
securitycurrencyPosition.securitycurrency.getIdSecuritycurrency().intValue()) >= 0);
}
private <T extends Securitycurrency<T>> void markWatchlistSecurityHasEverTransactionTenant(
final int[] watchlistSecuritesHasTransactionIds,
final List<SecuritycurrencyPosition<T>> securitycurrencyPositionList) {
securitycurrencyPositionList
.forEach(securitycurrencyPosition -> securitycurrencyPosition.watchlistSecurityHasEver = Arrays.binarySearch(
watchlistSecuritesHasTransactionIds,
securitycurrencyPosition.securitycurrency.getIdSecuritycurrency().intValue()) >= 0);
}
private <S extends Securitycurrency<S>> List<SecuritycurrencyPosition<S>> setDailyChangeAndTimeFrameChange(
final List<S> securitycurrenyList, final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteMaxDateMap,
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteLastDayPrevYear,
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteTimeFrame, final Integer daysTimeFrame,
final Map<Integer, List<Securitysplit>> securitysplitMap) {
final List<SecuritycurrencyPosition<S>> securitycurrencyPositionList = createSecuritycurrencyPositionList(
securitycurrenyList);
securitycurrencyPositionList.stream()
.filter(securitycurrency -> securitycurrency.securitycurrency.getSChangePercentage() == null)
.forEach(securitycurrency -> setDailyChangeByUsingHistoryquote(historyquoteMaxDateMap, securitycurrency));
securitycurrencyPositionList.stream()
.forEach(securitycurrency -> setYtdGainLoss(historyquoteLastDayPrevYear, securitycurrency, securitysplitMap));
securitycurrencyPositionList.stream().forEach(securitycurrency -> setTimeFrameGainLoss(historyquoteTimeFrame,
securitycurrency, daysTimeFrame, securitysplitMap));
return securitycurrencyPositionList;
}
private List<SecuritycurrencyPosition<Security>> setOpenPositions(final Tenant tenant, final Watchlist watchlist,
final List<SecuritycurrencyPosition<Security>> securitycurrencyPositions,
final Map<Integer, List<Securitysplit>> securitysplitMap, final DateTransactionCurrencypairMap dateCurrencyMap) {
final List<Transaction> transactions = this.transactionJpaRepository.findByIdWatchlist(watchlist.getIdWatchlist());
final boolean excludeDivTax = tenant.isExcludeDivTax();
final Map<Security, SecurityPositionSummary> summarySecurityMap = new HashMap<>();
// Calculate all positions closed or open
transactions.forEach(transaction -> securityCalcService.calcSingleSecurityTransaction(transaction,
summarySecurityMap, securitysplitMap, excludeDivTax, dateCurrencyMap));
this.calcOpenSecurityPositons(securitycurrencyPositions, summarySecurityMap, securitysplitMap, dateCurrencyMap);
return securitycurrencyPositions;
}
private void calcOpenSecurityPositons(final List<SecuritycurrencyPosition<Security>> securitycurrencyPositions,
final Map<Security, SecurityPositionSummary> summarySecurityMap,
final Map<Integer, List<Securitysplit>> securitysplitMap, final DateTransactionCurrencypairMap dateCurrencyMap) {
final Map<SecurityPositionSummary, SecuritycurrencyPosition<Security>> securitycurrencyPositionMap = new HashMap<>();
final List<SecurityPositionSummary> securityPostionSummaryList = new ArrayList<>();
securitycurrencyPositions.forEach(securitycurrencyPosition -> {
final SecurityPositionSummary securityPositionSummary = summarySecurityMap
.get(securitycurrencyPosition.securitycurrency);
if (securityPositionSummary != null && securityPositionSummary.units != 0) {
securitycurrencyPositionMap.put(securityPositionSummary, securitycurrencyPosition);
securityPostionSummaryList.add(securityPositionSummary);
}
});
if (!securityPostionSummaryList.isEmpty()) {
securityJpaRepository.calcGainLossBasedOnDateOrNewestPrice(securityPostionSummaryList,
new IPositionCloseOnLatestPrice<Security, SecurityPositionSummary>() {
@Override
public void calculatePositionClose(final SecurityPositionSummary securityPositionSummary,
final Double lastPrice) {
final SecuritycurrencyPosition<Security> securitycurrencyPosition = securitycurrencyPositionMap
.get(securityPositionSummary);
securityPositionSummary.reCalculateOpenPosition = true;
securityCalcService.createHypotheticalSellTransaction(securityPositionSummary, lastPrice,
securitysplitMap, dateCurrencyMap, null);
securitycurrencyPosition.valueSecurity = securityPositionSummary.getValueSecurity();
securitycurrencyPosition.units = securityPositionSummary.getUnits();
securitycurrencyPosition.positionGainLossPercentage = securityPositionSummary
.getPositionGainLossPercentage();
}
}, new Date());
}
}
@SuppressWarnings("unchecked")
private <S extends Securitycurrency<S>> List<SecuritycurrencyPosition<S>> createSecuritycurrencyPositionList(
final List<S> securitycurrencyList) {
final List<SecuritycurrencyPosition<S>> securityPositionList = new ArrayList<>();
securitycurrencyList.stream().forEach(securitycurrency -> {
final SecuritycurrencyPosition<S> securitycurrencyPosition = new SecuritycurrencyPosition<>(securitycurrency);
if (securitycurrency instanceof Security) {
this.securityJpaRepository
.setSecuritycurrencyHistoricalDownloadLink((SecuritycurrencyPosition<Security>) securitycurrencyPosition);
this.securityJpaRepository
.setSecuritycurrencyIntradayDownloadLink((SecuritycurrencyPosition<Security>) securitycurrencyPosition);
this.securityJpaRepository
.setDividendDownloadLink((SecuritycurrencyPosition<Security>) securitycurrencyPosition);
} else {
this.currencypairJpaRepository.setSecuritycurrencyHistoricalDownloadLink(
(SecuritycurrencyPosition<Currencypair>) securitycurrencyPosition);
this.currencypairJpaRepository
.setSecuritycurrencyIntradayDownloadLink((SecuritycurrencyPosition<Currencypair>) securitycurrencyPosition);
}
securityPositionList.add(securitycurrencyPosition);
});
return securityPositionList;
}
private <S extends Securitycurrency<S>> void setDailyChangeByUsingHistoryquote(
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteMaxDateMap,
final SecuritycurrencyPosition<S> securitycurrencyPosition) {
final ISecuritycurrencyIdDateClose historyquote = historyquoteMaxDateMap
.get(securitycurrencyPosition.securitycurrency.getIdSecuritycurrency());
if (historyquote != null && securitycurrencyPosition.securitycurrency.getSLast() != null) {
securitycurrencyPosition.securitycurrency.setSPrevClose(historyquote.getClose());
securitycurrencyPosition.securitycurrency
.setSChangePercentage((securitycurrencyPosition.securitycurrency.getSLast() - historyquote.getClose())
/ historyquote.getClose() * 100);
}
}
private <S extends Securitycurrency<S>> void setYtdGainLoss(
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteTimeFrame,
final SecuritycurrencyPosition<S> securitycurrencyPosition,
final Map<Integer, List<Securitysplit>> securitysplitMap) {
final ISecuritycurrencyIdDateClose historyquote = historyquoteTimeFrame
.get(securitycurrencyPosition.securitycurrency.getIdSecuritycurrency());
if (historyquote != null && securitycurrencyPosition.securitycurrency.getSLast() != null) {
SplitFactorAfterBefore splitFactorAfterBefore = new SplitFactorAfterBefore();
final double histroyClose = historyquote.getClose();
if (securitycurrencyPosition.securitycurrency instanceof Security) {
splitFactorAfterBefore = Securitysplit.calcSplitFatorForFromDateAndToDate(
securitycurrencyPosition.securitycurrency.getIdSecuritycurrency(), historyquote.getDate(), new Date(),
securitysplitMap);
}
final double historyCloseAdjusted = histroyClose / splitFactorAfterBefore.fromToDateFactor;
securitycurrencyPosition.ytdChangePercentage = DataHelper.roundStandard(
(securitycurrencyPosition.securitycurrency.getSLast() - historyCloseAdjusted) / historyCloseAdjusted * 100);
}
}
private <S extends Securitycurrency<S>> void setTimeFrameGainLoss(
final Map<Integer, ISecuritycurrencyIdDateClose> historyquoteLastDayPrevYear,
final SecuritycurrencyPosition<S> securitycurrencyPosition, final Integer daysTimeFrame,
final Map<Integer, List<Securitysplit>> securitysplitMap) {
final ISecuritycurrencyIdDateClose historyquote = historyquoteLastDayPrevYear
.get(securitycurrencyPosition.securitycurrency.getIdSecuritycurrency());
if (historyquote != null && securitycurrencyPosition.securitycurrency.getSLast() != null) {
final double histroyClose = historyquote.getClose();
SplitFactorAfterBefore splitFactorAfterBefore = new SplitFactorAfterBefore();
if (securitycurrencyPosition.securitycurrency instanceof Security) {
splitFactorAfterBefore = Securitysplit.calcSplitFatorForFromDateAndToDate(
securitycurrencyPosition.securitycurrency.getIdSecuritycurrency(), historyquote.getDate(), new Date(),
securitysplitMap);
}
final int years = daysTimeFrame / 365;
final double historyCloseAdjusted = histroyClose / splitFactorAfterBefore.fromToDateFactor;
securitycurrencyPosition.timeFrameChangePercentage = DataHelper.roundStandard(
(securitycurrencyPosition.securitycurrency.getSLast() - historyCloseAdjusted) / historyCloseAdjusted * 100);
if (years >= 1) {
securitycurrencyPosition.timeFrameAnnualChangePercentage = DataHelper.roundStandard(
(Math.pow(securitycurrencyPosition.timeFrameChangePercentage / 100 + 1, 1.0 / years) - 1.0) * 100);
}
}
}
private static class SecurityCurrency {
List<Security> securities;
List<Currencypair> currencypairs;
public SecurityCurrency(final List<Security> securities, final List<Currencypair> currencypairs) {
super();
this.securities = securities;
this.currencypairs = currencypairs;
}
}
}
|
/** provides noise algorithms
* @author dermetfan */
public abstract class Noise {
/** the seed used by {@link #random} */
private static long seed = -1;
/** if {@link #seed} should be used (false by default) */
private static boolean seedEnabled;
/** the {@link Random} used to generate pseudo-random values */
private static Random random = new Random();
/** randomizes a given float array using the midpoint-displacement algorithm
* @param values the float array to randomize
* @param range the range used for random values
* @param smoothness the smoothness of the transitions
* @return the randomized float array */
public static float[] midpointDisplacement(float[] values, float range, float smoothness) {
for(int i = 0; i < values.length; i++, range /= smoothness)
values[i] = (getRepeated(values, i - 1) + getRepeated(values, i + 1)) / 2 + random(-range, range);
return values;
}
/** @see #midpointDisplacement(int, float, float, boolean, net.dermetfan.utils.Function, int, int) */
public static float[][] midpointDisplacement(int n, float smoothness, float range, int scaleX, int scaleY) {
return midpointDisplacement(n, range, smoothness, true, null, scaleX, scaleY);
}
/** @see #midpointDisplacement(int, float, float, boolean, net.dermetfan.utils.Function, int, int) */
public static float[][] midpointDisplacement(int n, float smoothness, float range, Function<Pair<Float, Float>, Float> init, int scaleX, int scaleY) {
return midpointDisplacement(n, range, smoothness, false, init, scaleX, scaleY);
}
/** @see #midpointDisplacement(int, float, float, boolean, float, int, int) */
public static float[][] midpointDisplacement(int n, float smoothness, float range, final float init, int scaleX, int scaleY) {
return midpointDisplacement(n, range, smoothness, false, init, scaleX, scaleY);
}
/** @see #midpointDisplacement(int, float, float, boolean, net.dermetfan.utils.Function, int, int) */
public static float[][] midpointDisplacement(int n, float smoothness, float range, boolean initializeRandomly, final float init, int scaleX, int scaleY) {
return midpointDisplacement(n, range, smoothness, initializeRandomly, initializeRandomly ? null : new Function<Pair<Float, Float>, Float>() {
@Override
public Float apply(Pair<Float, Float> object) {
return init;
}
}, scaleX, scaleY);
}
/** generates a height map using the midpoint-displacement algorithm
* @param n level of detail
* @param range the range used for random values
* @param smoothness the smoothness of the transitions
* @param initializeRandomly if init should be ignored to use random values instead
* @param init an Accessor that takes the coordinate to be initialized (in a Pair) and returns the value to use for initialization
* @param scaleX scale of the x axis
* @param scaleY scale of the y axis
* @return a height map generated using the midpoint-displacement algorithm */
private static float[][] midpointDisplacement(int n, float smoothness, float range, boolean initializeRandomly, Function<Pair<Float, Float>, Float> init, int scaleX, int scaleY) {
if(n < 0)
throw new IllegalArgumentException("n must be >= 0: " + n);
range /= 2; // divide range by two to avoid doing it later for random(-range, range) calls
int x, y, power = (int) Math.pow(2, n), width = scaleX * power + 1, height = scaleY * power + 1, step;
float[][] map = new float[width][height];
boolean sy, sx;
Pair<Float, Float> coord = new Pair<>();
for(x = 0; x < width; x += power)
for(y = 0; y < height; y += power)
map[x][y] = initializeRandomly ? random(-range, range) : init.apply(coord.set((float) x, (float) y));
for(step = power / 2; step > 0; step /= 2, range /= smoothness) {
sx = false;
for(x = 0; x < width; x += step, sx = !sx) {
sy = false;
for(y = 0; y < height; y += step, sy = !sy)
if(sx && sy)
map[x][y] = (map[x - step][y - step] + map[x + step][y - step] + map[x - step][y + step] + map[x + step][y + step]) / 4 + random(-range, range);
else if(sx)
map[x][y] = (map[x - step][y] + map[x + step][y]) / 2 + random(-range, range);
else if(sy)
map[x][y] = (map[x][y - step] + map[x][y + step]) / 2 + random(-range, range);
}
}
return map;
}
/** @see #diamondSquare(int, float, float, boolean, boolean, boolean, net.dermetfan.utils.Function, int, int) */
public static float[][] diamondSquare(int n, float smoothness, float range, boolean wrapX, boolean wrapY, Function<Pair<Float, Float>, Float> init, int scaleX, int scaleY) {
return diamondSquare(n, smoothness, range, wrapX, wrapY, false, init, scaleX, scaleY);
}
/** @see #diamondSquare(int, float, float, boolean, boolean, boolean, float, int, int) */
public static float[][] diamondSquare(int n, float smoothness, float range, boolean wrapX, boolean wrapY, int scaleX, int scaleY) {
return diamondSquare(n, smoothness, range, wrapX, wrapY, true, Float.NaN, scaleX, scaleY);
}
/** @see #diamondSquare(int, float, float, boolean, boolean, boolean, float, int, int) */
public static float[][] diamondSquare(int n, float smoothness, float range, boolean wrapX, boolean wrapY, float init, int scaleX, int scaleY) {
return diamondSquare(n, smoothness, range, wrapX, wrapY, false, init, scaleX, scaleY);
}
/** @param init the value to initialize every coordinate with
* @see #diamondSquare(int, float, float, boolean, boolean, boolean, net.dermetfan.utils.Function, int, int) */
public static float[][] diamondSquare(int n, float smoothness, float range, boolean wrapX, boolean wrapY, boolean initializeRandomly, final float init, int scaleX, int scaleY) {
return diamondSquare(n, smoothness, range, wrapX, wrapY, initializeRandomly, initializeRandomly ? null : new Function<Pair<Float, Float>, Float>() {
@Override
public Float apply(Pair<Float, Float> object) {
return init;
}
}, scaleX, scaleY);
}
/** generates a height map using the diamond-square algorithm
* @param n level of detail
* @param range the range used for random values
* @param smoothness the smoothness of the transitions
* @param wrapX if the map should wrap on the x axis
* @param wrapY if the map should wrap on the y axis
* @param initializeRandomly if init should be ignored to use random values instead
* @param init an Accessor that takes the coordinate to be initialized (in a Pair) and returns the value to use for initialization
* @param scaleX scale of the x axis
* @param scaleY scale of the y axis
* @return a height map generated using the diamond-square algorithm */
private static float[][] diamondSquare(int n, float smoothness, float range, boolean wrapX, boolean wrapY, boolean initializeRandomly, Function<Pair<Float, Float>, Float> init, int scaleX, int scaleY) {
if(n < 0)
throw new IllegalArgumentException("n must be >= 0: " + n);
range /= 2; // divide range by two to avoid doing it later for random(-range, range) calls
int power = (int) Math.pow(2, n), width = scaleX * power + 1, height = scaleY * power + 1, x, y;
float map[][] = new float[width][height], avg;
Pair<Float, Float> coord = new Pair<>();
// seed the grid
for(x = 0; x < width; x += power)
for(y = 0; y < height; y += power)
map[x][y] = initializeRandomly ? random(-range, range) : init.apply(coord.set((float) x, (float) y));
for(power /= 2; power > 0; power /= 2, range /= smoothness) {
// square step
for(x = power; x < width; x += power * 2)
for(y = power; y < height; y += power * 2)
map[x][y] = (map[x - power][y - power] + map[x - power][y + power] + map[x + power][y + power] + map[x + power][y - power]) / 4 + random(-range, range);
// diamond step
for(x = 0; x < width - (wrapX ? 1 : 0); x += power)
for(y = power * (1 - x / power % 2); y < height - (wrapY ? 1 : 0); y += power * 2) {
map[x][y] = (avg = (map[ArrayUtils.repeat(width, x - power)][y] + map[ArrayUtils.repeat(width, x + power)][y] + map[x][ArrayUtils.repeat(height, y - power)] + map[x][ArrayUtils.repeat(height, y + power)]) / 4) + random(-range, range);
if(wrapX && x == 0)
map[width - 1][y] = avg;
if(wrapY && y == 0)
map[x][height - 1] = avg;
}
}
return map;
}
/** @return a random value between start (inclusive) and end (exclusive) */
public static float random(float start, float end) {
return start + random.nextFloat() * (end - start);
}
/** @param seedEnabled if {@link #seed} should be used */
public static void setSeedEnabled(boolean seedEnabled) {
if(Noise.seedEnabled = seedEnabled)
random.setSeed(seed);
else
random = new Random();
}
/** @return the {@link #seedEnabled} */
public static boolean isSeedEnabled() {
return seedEnabled;
}
/** @return the {@link #seed} */
public static long getSeed() {
return seed;
}
/** @param seed the {@link #seed} to set */
public static void setSeed(long seed) {
random.setSeed(Noise.seed = seed);
}
/** @return the {@link #random} */
public static Random getRandom() {
return random;
}
} |
/**
* Tests the deactivate() method, after setting some option.
*
* @param setOption function that sets an option
* @throws Throwable if an error occurs during setup
*/
private void testDeactivate(RunnableWithEx setOption) throws Throwable {
setUp();
setOption.run();
mgr.configure(properties);
mgr.deactivate();
verify(prov1).beforeDeactivate(mgr);
verify(prov2).beforeDeactivate(mgr);
verify(controller).lock();
verify(controller2).lock();
verify(controller).stop();
verify(controller2).stop();
assertTrue(mgr.isLocked());
verify(prov1).afterDeactivate(mgr);
verify(prov2).afterDeactivate(mgr);
} |
// ParseCoordinates parses string to coordinates.
func ParseCoordinates(str string) (Coordinates, error) {
var coordinates Coordinates
splits := strings.Split(str, api.Semicolon)
for _, s := range splits {
c, err := ParseCoordinate(s)
if err != nil {
return nil, err
}
coordinates = append(coordinates, c)
}
return coordinates, nil
} |
GRADE system: new paradigm
Purpose of reviewAn exposition of the Grading of Recommendations Assessment, Development and Evaluation (GRADE) approach to recommendations. Recent findingsIn this review, we outline the process whereby the strength of evidence from the literature undergoes a systematic reappraisal. The GRADE system allows four grades of evidence (high quality, moderate, low, and very low) and strength of recommendation is qualified as strong, weak, or conditional to an intervention (pro or con) and defined as the level of confidence that desirable effects predominate over untoward ones with a certain intervention. We provide research and clinical reviews in various settings in which this approach has been used. SummaryEvidence-based medicine requires integrating the best available ‘benchmark’ literature with patient preferences and values (bedside) and is an evaluation process involving both patient and clinician, with a systematic assessment of the rated evidence from state-of-the-art medical literature. The GRADE methodology was developed as an application of evidence-based medicine to the field of recommendations and their formulation. The GRADE working group brings together clinical researchers and methodologists who developed a rating system to assess the quality of evidence for the purpose of making clinical practice recommendations. |
Imperfect drug penetration leads to spatial monotherapy and rapid evolution of multidrug resistance
Significance The evolution of drug resistance is a major health threat. In chronic infections with rapidly mutating pathogens—including HIV, tuberculosis, and hepatitis B and C viruses—multidrug resistance can cause even aggressive combination drug treatment to fail. Oftentimes, individual drugs within a combination do not penetrate equally to all infected regions of the body. Here we present a mathematical model suggesting that this imperfect penetration can dramatically increase the chance of treatment failure by creating regions where only one drug from a combination reaches a therapeutic concentration. The resulting single-drug compartments allow the pathogen to evolve resistance to each drug sequentially, rapidly causing multidrug resistance. More broadly, our model provides a quantitative framework for reasoning about trade-offs between aggressive and moderate drug therapies. Infections with rapidly evolving pathogens are often treated using combinations of drugs with different mechanisms of action. One of the major goal of combination therapy is to reduce the risk of drug resistance emerging during a patient’s treatment. Although this strategy generally has significant benefits over monotherapy, it may also select for multidrug-resistant strains, particularly during long-term treatment for chronic infections. Infections with these strains present an important clinical and public health problem. Complicating this issue, for many antimicrobial treatment regimes, individual drugs have imperfect penetration throughout the body, so there may be regions where only one drug reaches an effective concentration. Here we propose that mismatched drug coverage can greatly speed up the evolution of multidrug resistance by allowing mutations to accumulate in a stepwise fashion. We develop a mathematical model of within-host pathogen evolution under spatially heterogeneous drug coverage and demonstrate that even very small single-drug compartments lead to dramatically higher resistance risk. We find that it is often better to use drug combinations with matched penetration profiles, although there may be a trade-off between preventing eventual treatment failure due to resistance in this way and temporarily reducing pathogen levels systemically. Our results show that drugs with the most extensive distribution are likely to be the most vulnerable to resistance. We conclude that optimal combination treatments should be designed to prevent this spatial effective monotherapy. These results are widely applicable to diverse microbial infections including viruses, bacteria, and parasites. |
/**
* = AdditionalDetails
TODO Auto-generated class documentation
*
*/
@RooJavaBean
@RooToString
@RooJpaEntity
@RooEquals(isJpaEntity = true)
@Entity
@EntityFormat
public class AdditionalDetails {
/**
* TODO Auto-generated attribute documentation
*
*/
@Id
@GeneratedValue(strategy = GenerationType.AUTO)
private Long id;
/**
* TODO Auto-generated attribute documentation
*
*/
private String fileName;
/**
* TODO Auto-generated attribute documentation
*
*/
private String documentTitle;
/**
* TODO Auto-generated attribute documentation
*
*/
private String documentOwner;
/**
* TODO Auto-generated attribute documentation
*
*/
private String attachment;
/**
* TODO Auto-generated attribute documentation
*
*/
private String uniqueId;
/**
* TODO Auto-generated attribute documentation
*
*/
public static final String ITERABLE_TO_ADD_CANT_BE_NULL_MESSAGE = "The given Iterable of items to add can't be null!";
/**
* TODO Auto-generated attribute documentation
*
*/
public static final String ITERABLE_TO_REMOVE_CANT_BE_NULL_MESSAGE = "The given Iterable of items to add can't be null!";
/**
* Gets id value
*
* @return Long
*/
public Long getId() {
return this.id;
}
/**
* Sets id value
*
* @param id
* @return AdditionalDetails
*/
public AdditionalDetails setId(Long id) {
this.id = id;
return this;
}
/**
* Gets fileName value
*
* @return String
*/
public String getFileName() {
return this.fileName;
}
/**
* Sets fileName value
*
* @param fileName
* @return AdditionalDetails
*/
public AdditionalDetails setFileName(String fileName) {
this.fileName = fileName;
return this;
}
/**
* Gets documentTitle value
*
* @return String
*/
public String getDocumentTitle() {
return this.documentTitle;
}
/**
* Sets documentTitle value
*
* @param documentTitle
* @return AdditionalDetails
*/
public AdditionalDetails setDocumentTitle(String documentTitle) {
this.documentTitle = documentTitle;
return this;
}
/**
* Gets documentOwner value
*
* @return String
*/
public String getDocumentOwner() {
return this.documentOwner;
}
/**
* Sets documentOwner value
*
* @param documentOwner
* @return AdditionalDetails
*/
public AdditionalDetails setDocumentOwner(String documentOwner) {
this.documentOwner = documentOwner;
return this;
}
/**
* Gets attachment value
*
* @return String
*/
public String getAttachment() {
return this.attachment;
}
/**
* Sets attachment value
*
* @param attachment
* @return AdditionalDetails
*/
public AdditionalDetails setAttachment(String attachment) {
this.attachment = attachment;
return this;
}
/**
* Gets uniqueId value
*
* @return String
*/
public String getUniqueId() {
return this.uniqueId;
}
/**
* Sets uniqueId value
*
* @param uniqueId
* @return AdditionalDetails
*/
public AdditionalDetails setUniqueId(String uniqueId) {
this.uniqueId = uniqueId;
return this;
}
/**
* This `equals` implementation is specific for JPA entities and uses
* the entity identifier for it, following the article in
* https://vladmihalcea.com/2016/06/06/how-to-implement-equals-and-hashcode-using-the-jpa-entity-identifier/
*
* @param obj
* @return Boolean
*/
public boolean equals(Object obj) {
if (this == obj) {
return true;
}
// instanceof is false if the instance is null
if (!(obj instanceof AdditionalDetails)) {
return false;
}
return getId() != null && Objects.equals(getId(), ((AdditionalDetails) obj).getId());
}
/**
* This `hashCode` implementation is specific for JPA entities and uses a fixed `int` value to be able
* to identify the entity in collections after a new id is assigned to the entity, following the article in
* https://vladmihalcea.com/2016/06/06/how-to-implement-equals-and-hashcode-using-the-jpa-entity-identifier/
*
* @return Integer
*/
public int hashCode() {
return 31;
}
/**
* TODO Auto-generated method documentation
*
* @return String
*/
public String toString() {
return "AdditionalDetails {" + "id='" + id + '\'' + ", fileName='" + fileName + '\'' + ", documentTitle='" + documentTitle + '\'' + ", documentOwner='" + documentOwner + '\'' + ", attachment='" + attachment + '\'' + ", uniqueId='" + uniqueId + '\'' + "}" + super.toString();
}
} |
<reponame>StarWarsDev/legion-discord-bot
package output
import (
"github.com/bwmarrin/discordgo"
)
const (
colorError = 0xE84A4A
colorInfo = 0xF2E82B
)
// Error returns an Embedder with a level of Success
func Error(title, description string) discordgo.MessageEmbed {
return newEmbedder(colorError, title, description)
}
// Info returns an Embedder with a level of Success
func Info(title, description string) discordgo.MessageEmbed {
return newEmbedder(colorInfo, title, description)
}
// Field creates an embedded field
func Field(name, value string) discordgo.MessageEmbedField {
return discordgo.MessageEmbedField{
Name: name,
Value: value,
Inline: true,
}
}
func newEmbedder(color int, title, description string) discordgo.MessageEmbed {
return discordgo.MessageEmbed{
Title: title,
Description: description,
Color: color,
}
}
|
<filename>actix-rt/src/runtime.rs
use std::{future::Future, io};
use tokio::task::{JoinHandle, LocalSet};
/// A Tokio-based runtime proxy.
///
/// All spawned futures will be executed on the current thread. Therefore, there is no `Send` bound
/// on submitted futures.
#[derive(Debug)]
pub struct Runtime {
local: LocalSet,
rt: tokio::runtime::Runtime,
}
pub(crate) fn default_tokio_runtime() -> io::Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()
}
impl Runtime {
/// Returns a new runtime initialized with default configuration values.
#[allow(clippy::new_ret_no_self)]
pub fn new() -> io::Result<Self> {
let rt = default_tokio_runtime()?;
Ok(Runtime {
rt,
local: LocalSet::new(),
})
}
/// Offload a future onto the single-threaded runtime.
///
/// The returned join handle can be used to await the future's result.
///
/// See [crate root][crate] documentation for more details.
///
/// # Examples
/// ```
/// let rt = actix_rt::Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// let handle = rt.spawn(async {
/// println!("running on the runtime");
/// 42
/// });
///
/// assert_eq!(rt.block_on(handle).unwrap(), 42);
/// ```
///
/// # Panics
/// This function panics if the spawn fails. Failure occurs if the executor is currently at
/// capacity and is unable to spawn a new future.
pub fn spawn<F>(&self, future: F) -> JoinHandle<F::Output>
where
F: Future + 'static,
{
self.local.spawn_local(future)
}
/// Runs the provided future, blocking the current thread until the future completes.
///
/// This function can be used to synchronously block the current thread until the provided
/// `future` has resolved either successfully or with an error. The result of the future is
/// then returned from this function call.
///
/// Note that this function will also execute any spawned futures on the current thread, but
/// will not block until these other spawned futures have completed. Once the function returns,
/// any uncompleted futures remain pending in the `Runtime` instance. These futures will not run
/// until `block_on` or `run` is called again.
///
/// The caller is responsible for ensuring that other spawned futures complete execution by
/// calling `block_on` or `run`.
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.local.block_on(&self.rt, f)
}
}
impl From<tokio::runtime::Runtime> for Runtime {
fn from(rt: tokio::runtime::Runtime) -> Self {
Self {
local: LocalSet::new(),
rt,
}
}
}
|
<filename>app/components/home/home.e2e.ts
describe('Home', function() {
beforeEach(function() {
browser.get('/dist/dev');
});
});
|
<reponame>wuqingsen/-FFmpegDemo<gh_stars>1-10
package com.wuqingsen.opengllearn;
import android.content.Context;
import android.opengl.GLSurfaceView;
import android.util.AttributeSet;
/**
* wuqingsen on 2021/4/7
* Mailbox:<EMAIL>
* annotation:
*/
public class FGLView extends GLSurfaceView {
public FGLView(Context context) {
super(context);
}
//三角形,正方形,立方体
public FGLView(Context context, AttributeSet attrs) {
super(context, attrs);
setEGLContextClientVersion(2);
setRenderer(new FGLRender(this));
//设置渲染模式
setRenderMode(GLSurfaceView.RENDERMODE_WHEN_DIRTY);
}
}
|
Neonatal screening for cystic fibrosis in São Paulo State, Brazil: a pilot study.
Cystic fibrosis is one of the most common autosomal recessive hereditary diseases in the Caucasian population, with an incidence of 1:2000 to 1:3500 liveborns. More than 1000 mutations have been described with the most common being F508del. It has a prevalence of 23-55% within the Brazilian population. The lack of population-based studies evaluating the incidence of cystic fibrosis in São Paulo State, Brazil, and an analysis concerning the costs of implantation of a screening program motivated the present study. A total of 60,000 dried blood samples from Guthrie cards obtained from April 2005 to January 2006 for neonatal screening at 4 reference centers in São Paulo State were analyzed. The immunoreactive trypsinogen (IRT)/IRT protocol was used with the cut-off value being 70 ng/mL. A total of 532 children (0.9%) showed IRT >70 ng/mL and a 2nd sample was collected from 418 (80.3%) of these patients. Four affected children were detected at two centers, corresponding to an incidence of 1:8403. The average age at diagnosis was 69 days, and 3 of the children already showed severe symptoms of the disease. The rate of false-positive results was 95.2% and the positive predictive value for the test was 8%. The cost of detecting an affected subject was approximately US$8,000.00 when this cystic fibrosis program was added to an existing neonatal screening program. The present study clearly shows the difficulties involved in cystic fibrosis screening using the IRT/IRT protocol, particularly in a population with no long-term tradition of neonatal screening. |
/**
* Displays the population of a specified region
* @param region the region to show
*/
public void showBasicPopulationReportForRegion(String region){
var population = _populationService.getPopulationOfRegion(region);
System.out.println("The population of " + region + " is: " + population);
} |
#include <bits/stdc++.h>
#include <string>
using namespace std;
int main(){
int t;
vector <string> vt;
cin>>t;
unordered_map<string , int> map;
while(t--){
string str;
cin>>str;
if(map.count(str)==0){
map[str] ++;
vt.push_back("OK");
}
else{
int n=map[str];
string a=to_string(n);
a=str+a;
map[a] ++;
vt.push_back(a);
map[str] ++;
}
}
int nn=vt.size();
for(int i=0;i<nn;i++){
cout<<vt[i]<<endl;
}
} |
<filename>NewShare/app/src/main/java/com/melvin/share/modelview/item/ProductDetailItemViewModel.java
package com.melvin.share.modelview.item;
import android.content.Context;
import android.databinding.BaseObservable;
import android.view.View;
import com.melvin.share.model.serverReturn.ImgUrlBean;
import com.melvin.share.model.serverReturn.ProductDetailBean;
/**
* Created Time: 2016/7/23.
* <p>
* Author:Melvin
* <p>
* 功能:单个商品item的ViewModel
*/
public class ProductDetailItemViewModel extends BaseObservable {
private ProductDetailBean.PropertiesBean detailsBean;
private Context context;
public ProductDetailItemViewModel(Context context, ProductDetailBean.PropertiesBean detailsBean) {
this.detailsBean = detailsBean;
this.context = context;
}
public void onItemClick(View view) {
}
public String getDetailName() {
return detailsBean.propertyName+":";
}
public String getDetailValue() {
return detailsBean.propertyValue;
}
public void setEntity(ProductDetailBean.PropertiesBean detailsBean) {
this.detailsBean = detailsBean;
notifyChange();
}
}
|
def teacher_count(self):
return self.teachers.count() |
CONTRA-LATERAL PARADOXICAL PLEURAL EFFUSION DURING ANTITUBERCULOUS CHEMOTHERAPY Case
: abStraCt A 24-year old male developed left sided pleural effusion 10 days after the start of anti tubercular chemotherapy for right-sided pleural effusion and parenchymal lesion. This effusion seemed to be a paradoxical response as it resolved on follow up.
INtrODUCtION
Paradoxical response is referred to an unusual expansion or formation of a new lesion during successful anti-tubercular chemotherapy 1 . This response has been described in cases of tubercular lymph-adenopathy 2 and intra-cranial tuberculoma 3 though it has been very rarely reported in cases of pleural effusion . The pleural effusion has been documented to occur 3-4 weeks after the start of ATT 6,7 although it developed within 10 days of initiating anti tuberculosis treatment in our case. To our knowledge about 15 cases have been reported so far in literature hence this case report.
CaSE rEPOrt
A 24-year old male patient presented with complaints of cough with expectoration for one month, fever for 20 days and pain on the right side of the chest of five days duration. General physical examination revealed no abnormality. Examination of the chest revealed crepitations in the right infra-clavicular area and signs of right-sided pleural effusion. Chest radiograph showed a heterogeneous opacity in the right upper zones with blunting of the right costophrenic angle ( fig.1). Sputum examination was positive for acid-fast bacilli. Pleural fluid revealed straw colored exudative effusion with a predominance of lymphocytes. Patient was put on Category 1 treatment for tuberculosis as per Revised National Tuberculosis Control Programme (RNTCP) guidelines. After eight days of start of therapy, the patient presented with sudden onset of pain on the left side of chest and dyspnoea. Chest radiograph revealed a pleural effusion on the left side ( fig.2).
A straw colored effusion was aspirated which was exudative in nature with predominance of lymphocytes. The patient was reassured and ATT was continued as before.
Key words : Pleural effusion, Antituberculosis chemotheraphy effects of mycobacterial products as an explanation to this response 9 by some workers. Other mechanism put forward is the 'immunological rebound' by which improved CMI after treatment coincides with an excessive antigen load (bacterial cell wall residues) resulting from rapid bacterial lysis 1,4,10 . It has also been suspected that bactericidal drugs like Isoniazid & Rifampicin could be worse offenders than bacteriostatic drugs. INH is well known to induce lupus though the reported cases are rare. An elevated level of ANA and a decreased level of CH50 were found in the effusion fluid which are characteristic of lupus pleuritis 5 .
A more detailed study is warranted to understand the etiopathogenesis of paradoxical worsening in cases of tuberculosis. The present case highlights the importance of understanding and knowing the pathological evidence of this clinical process during the management of pulmonary tuberculosis.
Thus it is concluded that the development of contralateral pleural effusion during the treatment for tuberculous effusion is very rare and no change in the treatment is required until some other disease is suspected.
After 12 weeks of ATT there was significant clinical and radiological improvement bilaterally. A chest radiograph at 4 months showed clearing of the pleural effusion on the left side with blunting of right costo-phrenic angle probably due to thickening of the pleura (fig 3).
DISCUSSION
Paradoxical response is referred to an unusual expansion or formation of a new lesion during successful anti-tubercular chemotherapy 1 . This response has been described in cases of tubercular lymphadenopathy but has been rarely reported in cases of pleural effusion. The paradoxical increase of the disease is documented to occur weeks or months following the start of ATT 1-3 .
Previous reporters established an incidence of 16% in cases of tubercular pleural effusion 4 that is far less than 30% reported for tubercular lymphadenopathy 3 . About 15 cases have been reported so far in literature and in all the reported cases the paradoxical effusion occurred in the same hemithorax but in our case the response was on the opposite side.
Rupture of subpleural abcesses into the pleural space along with hematogenous dissemination and rupture of caseous lymph nodes into the pleural space has been proposed as an explanation 8 . Speculations have lead to the interaction between host's immune response and direct |
// ListElements lists all elements of an architecture
func (architecture *Architecture) ListElements() ([]string, error) {
elementConfigurations := []string{}
architecture.ElementsX.RLock()
for elementConfiguration := range architecture.Elements {
elementConfigurations = append(elementConfigurations, elementConfiguration)
}
architecture.ElementsX.RUnlock()
return elementConfigurations, nil
} |
def findenumfiles(dir, prefix='.*?', suffix='', ngroups=1):
from os.path import join
from re import compile as recomp
if ngroups < 1:
raise ValueError('At least one number group must be specified')
numstr = '-([0-9]+)' * ngroups
grpidx = tuple(range(ngroups + 1))
regexp = recomp(r'^%s%s%s$' % (prefix, numstr, suffix))
return [tuple([join(dir, f)] + [int(g) for g in m.group(*grpidx)[1:]])
for f in os.listdir(dir) for m in [regexp.match(f)] if m] |
/*
* SimpleForamt (FBigDec) will be used to update the data. Then new data
* will be read also by SimpleForamt (FBigDec).
*/
@Test
public void testWriteReadSortedBigDecimal()
throws IOException {
/* Copy log file resource to log file zero. */
TestUtils.loadLog(getClass(), "je-4.0.103_BigDecimal.jdb", envHome);
open(false /* registerProxy */);
PrimaryIndex<Integer, BigDecimalData> primary =
store.getPrimaryIndex(Integer.class, BigDecimalData.class);
/*
* DPL will use FBigDec format to write the BigDecimal in sorted
* BigDecimal.
*/
primary.put(null,
new BigDecimalData (1, new BigDecimal("1234.1234000")));
/*
* DPL will use FBigDec format to read the BigDecimal in sorted
* BigDecimal.
*/
BigDecimalData entity = primary.get(1);
assertNotNull(entity);
/* Sorted BigDecimal cannot preserve precision. */
assertEquals(new BigDecimal("1234.1234"), entity.getF1());
close();
/* Re-open and read the data again. */
open(false /*registerProxy*/);
primary = store.getPrimaryIndex(Integer.class, BigDecimalData.class);
/*
* In the future, DPL will use FBigDec format to read the BigDecimal in
* sorted BigDecimal.
*/
entity = primary.get(1);
assertNotNull(entity);
/* Sorted BigDecimal cannot preserve precision. */
assertEquals(new BigDecimal("1234.1234"), entity.getF1());
close();
} |
<commit_msg>Update attribute to follow pre-established testing patterns
<commit_before>import { Box } from "@artsy/palette"
import { findCurrentRoute } from "Artsy/Router/Utils/findCurrentRoute"
import { NavBar } from "Components/NavBar"
import { isFunction } from "lodash"
import React, { useEffect } from "react"
import createLogger from "Utils/logger"
const logger = createLogger("Apps/Components/AppShell")
export const AppShell = props => {
const { children, match } = props
const routeConfig = findCurrentRoute(match)
/**
* Check to see if a route has a prepare key; if so call it. Used typically to
* preload bundle-split components (import()) while the route is fetching data
* in the background.
*/
useEffect(() => {
if (isFunction(routeConfig.prepare)) {
try {
routeConfig.prepare()
} catch (error) {
logger.error(error)
}
}
}, [routeConfig])
/**
* Let our end-to-end tests know that the app is hydrated and ready to go
*/
useEffect(() => {
document.body.setAttribute("data-test-ready", "")
}, [])
return (
<Box width="100%">
<Box pb={6}>
<Box left={0} position="fixed" width="100%" zIndex={100}>
<NavBar />
</Box>
</Box>
<Box>
<Box>{children}</Box>
</Box>
</Box>
)
}
<commit_after>import { Box } from "@artsy/palette"
import { findCurrentRoute } from "Artsy/Router/Utils/findCurrentRoute"
import { NavBar } from "Components/NavBar"
import { isFunction } from "lodash"
import React, { useEffect } from "react"
import createLogger from "Utils/logger"
const logger = createLogger("Apps/Components/AppShell")
export const AppShell = props => {
const { children, match } = props
const routeConfig = findCurrentRoute(match)
/**
* Check to see if a route has a prepare key; if so call it. Used typically to
* preload bundle-split components (import()) while the route is fetching data
* in the background.
*/
useEffect(() => {
if (isFunction(routeConfig.prepare)) {
try {
routeConfig.prepare()
} catch (error) {
logger.error(error)
}
}
}, [routeConfig])
/**
* Let our end-to-end tests know that the app is hydrated and ready to go
*/
useEffect(() => {
document.body.setAttribute("data-test", "AppReady")
}, [])
return (
<Box width="100%">
<Box pb={6}>
<Box left={0} position="fixed" width="100%" zIndex={100}>
<NavBar />
</Box>
</Box>
<Box>
<Box>{children}</Box>
</Box>
</Box>
)
}
|
Sawant beat the establishment at its own game. Now comes the hard part.
A united corporate front couldn’t save Sawant's opponent.
The race between Seattle City Council member Kshama Sawant of the Socialist Alternative party and her challenger, Seattle Urban League President and CEO Pamela Banks that attracted national attention and set records for nearly $1 million in funding was over before it began.
In August Sawant handily won a five-candidate primary, and Banks came in second after business interests had united behind her as their best hope to unseat the socialist who played the central role in Seattle’s $15 minimum-wage bill and is now taking on skyrocketing housing costs in the Emerald City.
As the first results trickled in on election night, November 3, showing Sawant leading 53 percent to 47 percent , Banks delivered the rote non-concession concession, “I am proud of the race that we ran,” and thanked three City Council members by name who coached her through her first race. Banks’ son, his eyes cast down, wrapped her in a full-body embrace as the enthusiasm of a few score supporters leaked out of a small coffee shop in the Capitol Hill neighborhood the two candidates were vying to represent.
Ballots are mailed to voters weeks before the election, so the final results won’t be announced until November 24. David Goldstein, one of the best local political handicappers, told me earlier he expected Sawant to get 52 to 53 percent, which would indicate she had it in the bag. He explained that with the campaign’s impressive ground game—which included more than 70 people door-knocking in a rainstorm days before the official vote—Sawant’s share would likely rise to the high fifties by the time all the ballots were counted.
The Sawant campaign and Socialist Alternative knew for two years the business and political class was hoping to oust her. In a wide-ranging election night interview with In These Times, Sawant said that when she was first elected in 2013, “The establishment didn’t take us seriously. They said, ‘What will a group of young radical socialists amount to? They won’t get anything done.’ We showed them the most transformative two years in Seattle politics.”
Sawant said accomplishments included the bill phasing in a $15 minimum wage by 2021 , changing Columbus Day to Indigenous People’s Day , pushing the City Council to ask the state to lift its ban on local rent control to address Seattle rents that have increased on average 80 percent since early 2010 and endorsing a move toward zero-percent incarceration of youth .
Sawant said when they began gearing up for the 2015 election, “I had no doubt the establishment would run a woman of color. It’s a tried-and-true tactic.”
Enter Pamela Banks. This was her first race, her platform was flimsy, but support and money came pouring in. To find out who would support Banks in one of the most left-leaning neighborhoods in a liberal city like Seattle, I attended her election night party. The crowd was evenly split between African Americans and whites, though there was little mingling among the two groups. Blazers, coiffed hair, cashmere and designer handbags were all well represented. I spoke to a retired economist, who confided with a grimace, “All my friends are with Sawant.” A retired professor interjected, “Me too, me too.” The economist added, “It’s scary, scary.”
A staffer with a major Seattle-area foundation said she told Banks, “You’re getting all this money because you are the anti-Sawant candidate.” He who hosted a fundraiser for Banks at his house said Banks got enthusiastic backing from his wife because “she was very anti the opponent.”
Being the anti-Sawant was good enough to earn Banks more than $385,000 in campaign funds, a colossal sum for a novice in a local race. It also got Banks the endorsement from major media like the Seattle Times and, in an unprecedented move, backing from six of nine of the other City Council members. Banks was the darling of developers, restaurateurs, and corporate executives who were among the 200 donors that gave the maximum of $700 to her campaign.
A united corporate front couldn’t save Banks, however. But that may not have mattered to her as she earned points for being the sacrificial lamb. He said, “Speaking honestly, if Banks loses that was the expectation, … It’s a win-win for her. She got a lot of exposure, a lot of people embraced her. I think she has a real future.”
Sawant’s supporters said their biggest enemy was complacency. Everyone expected Sawant to win in November after she pulled 50 percent of the vote in the primary in which she and Banks advanced to the general election. Campaign volunteers said local unions that pulled out the stops for Sawant in the primary scaled back to mailers and phone banking in the general election, but did not deploy volunteers or staff in the field.
Despite elite support, Banks received only $43,000 in third-party independent expenditures, not the hundreds of thousands of dollars many expected to flood in against Sawant. It appears to have been an astute calculation as corporate money flowed to other races as a rearguard action. Business interests seem to have conceded the race against Sawant, focusing instead on preventing her from gaining more allies on the council.
Sawant’s triumph in 2013, and her role and that of Socialist Alternative in turning $15 Now from a slogan into law, inspired like-minded candidates to run for council seats as well. With most City Council positions now elected by district rather than at-large, as was the case with Sawant's previous election, all nine seats were up for re-election and three lacked an incumbent. Potential Sawant allies included Jon Grant, the former executive director of the Tenants Union of Washington State, who took on City Council President Tim Burgess, and Lisa Herbold, who was looking to succeed Sawant’s most stalwart ally on the council, Nick Licata, running against Shannon Braddock. More than $349,000 in independent expenditures surged behind Burgess and Braddock, helping propel them to comfortable leads after initial returns were announced election night.
Sawant said for the business community, “Their task was not only to try to defeat me but all the other progressive candidates as well. I think as a matter of strategy they focused their money on the Burgess-Grant race, who was very important for them to defeat.”
Such was the full-court press for Burgess he was also endorsed by the M.L. King County Labor Council. Sawant lamented organized labor’s failure to throw its weight behind all progressive candidates. “For decades the labor movement has been playing it safe, let’s not rock the boat, endorse the Democrats,” Sawant said. “The mistake we can make at this point is not to understand that working people don’t want to play it safe.”
That Sawant proved she was no fluke was a testament to her genuine appeal and Socialist Alternative’s organization. Behind the scenes at the victory party, even when Sawant was quietly conferring with an aide, all eyes were drawn toward her.
Banks' side recognized they were outmatched as well. A Banks supporter and former Washington State Legislator said of Sawant’s campaign. “The other side had a lot of foot soldiers. They had a lot of people in the trenches,” he said.
Sawant’s campaign mobilized over 600 volunteers, knocked on more than 90,000 doors, placed more than 170,000 phone calls and raised more than $450,000, with no money from corporate backers. It also engaged in a sophisticated voter ID operation, slicing up the Capitol Hill district into scores of blocs according to turnout and what percentage voted for Sawant in the August primary. The campaign then identified likely voters, ranked them on their degree of support for Sawant, and engaged them multiple times to turn them into votes.
It was a concrete example of how labor, when well organized, can beat capital. Banks' campaign expenditures were mostly on consultants and advertising, while Sawant’s went toward labor and literature .
Such intensive retail politics is a rare type of organizing experience for the radical left these days. Dozens of Socialist Alternative members from across the country decamped to Seattle to work on the campaign, with some volunteering for months. The Sawant campaign did not have to re-invent the wheel either. As part of the Committee for a Workers International, Socialist Alternative also drew on the membership and electoral experience of parties in other countries to assist them in organizing and executing a successful electoral strategy.
That’s not to say the Socialist Alternative side doesn’t know how to have fun. While the Banks party was like a country club and most attendees were absorbed with their cellphones and laptops, the Sawant event was young, scruffy and boisterous. It was also ready to bust loose after months of 14-hour days. Giddiness fueled an all-night after-party in a campaign office cum flophouse with sweaty dancing, collegiate drinking games, and shot-gunning beers.
It was a brief respite before the next slog. Having lost her main ally, Sawant may be even more isolated on the council. But she still has big plans. In addition to not letting up on rent control or enforcing the minimum-wage bill—wage theft is still widespread in Seattle—Sawant wants to end “the Comcast cable monopoly” by pushing for municipal broadband , address the homeless crisis that has seen a 21 percent jump in the homeless count in only one year and enact a “millionaire’s tax” to improve Seattle’s ramshackle mass transit system. There are also plans for public rallies to encourage people to join Socialist Alternative as well as a conference in February 2016 “for a strategy for fighting the billionaire class.”
Sawant may get help in the future via an innovative ballot measure for clean elections that sailed to victory. It could shift the council away from a pro-business, pro-developer orientation as it limits campaign contributions and lobbying, and gives voters $100 in “democracy vouchers” that can be sent to candidates. The hope is to reduce the influence of money while increasing the influence of voters.
But for Sawant elections are just a means to the end of “building worker power.” By way of example, she points to how difficult the fight will be to take on Comcast. “Young people will see corporations like Comcast will not just back down. They will see how serious of a task it will be to build a movement.”
She rejects the role of saviour, however. She explained that in the case of the housing crisis, government has a vital role to play in cracking down on slumlords, passing a strong tenants rights bill and rent control, but the message is, “I’m not going to solve your problem, the message is, you need to get organized.”
If that happens, Seattle may become an incubator for 21st-century socialism. |
#t = int(input())
#for i in range(t):
n,q = list(map(int,input().split(' ')))
s = input()
c = 0
li = [0]
for k in s:
c += (ord(k) - 96)
li.append(c)
for i in range(q):
l,r = list(map(int,input().split(' ')))
print(li[r] - li[l - 1]) |
#include <gtest/gtest.h>
#include <functional>
#include <sigc++/connection.h>
#include <sigc++/signal.h>
using namespace std;
TEST (SignalTests, nothingConnected)
{
sigc::signal<void> sig;
sig();
}
TEST (SignalTests, singleConnected)
{
sigc::signal<void> sig;
bool wasCalled = false;
sig.connect([&] { wasCalled = true; });
EXPECT_FALSE ( wasCalled );
sig();
EXPECT_TRUE ( wasCalled );
}
TEST (SignalTests, multipleConnected)
{
sigc::signal<void> sig;
int count = 0;
sig.connect([&] { count++; });
sig.connect([&] { count++; });
sig();
EXPECT_EQ ( 2, count );
}
TEST (SignalTests, connections)
{
sigc::signal<void> sig;
int count = 0;
sigc::connection conn = sig.connect([&] { count++; });
sig();
EXPECT_EQ ( 1, count );
EXPECT_TRUE ( conn.connected() );
conn.block();
sig();
EXPECT_EQ ( 1, count );
EXPECT_TRUE ( conn.blocked() );
conn.unblock();
EXPECT_EQ ( 1, count );
EXPECT_FALSE ( conn.blocked() );
sig();
EXPECT_EQ ( 2, count );
}
|
#include "stdafx.h"
#include "Snap.h"
#include<iostream>
using namespace std;
const std::string currentDateTime() {
time_t now = time(0);
struct tm tstruct;
char buf[80];
tstruct = *localtime(&now);
// Visit http://en.cppreference.com/w/cpp/chrono/c/strftime
// for more information about date/time format
strftime(buf, sizeof(buf), "%Y-%m-%d.%X", &tstruct);
return buf;
}
int main(int argc,char* argv[]) {
TTableContext Context;
//Create schema
//Input File Format Source,Dest,Start_Time,Duration
Schema TimeS;
TimeS.Add(TPair<TStr,TAttrType>("Source",atInt));
TimeS.Add(TPair<TStr,TAttrType>("Dest",atInt));
PTable P = TTable::LoadSS(TimeS,argv[1],&Context,' ');
int K = atoi(argv[2]);
cerr<<"Table Loaded "<<currentDateTime()<<endl;
PNGraph G = GetBiGraph(P, 0, 1);
cerr<<"Graph Generated "<<currentDateTime()<<endl;
#ifdef GCC_ATOMIC
PNEANet KNN = KNNJaccardParallel(G, K);
for (TNEANet::TEdgeI EI = KNN->BegEI(); EI < KNN->EndEI(); EI++ ){
cout<<EI.GetSrcNId()<<" "<<EI.GetDstNId()<<" "<<KNN->GetFltAttrDatE(EI.GetId(), "sim")<<endl;
}
#endif
return 0;
}
|
Slow Burn
Though American and European leaders have yet to make good on threats to isolate Russia, the Ukraine crisis could still punish Russia’s already struggling economy.
Russian President Vladimir Putin laughed off the United States’ move to sanction a bank and several businessmen from his inner circle, promising to open a new account at Bank Rossiya, the first financial institution to find itself in Western crosshairs. "I personally didn’t have an account there, but I’ll definitely open an account there on Monday," he joked.
That kind of bravado comes naturally to Putin, and the initial sets of American and European sanctions were too weak to prevent him from annexing the Crimean peninsula and may not, NATO officials worry, be enough to dissuade him from invading eastern Ukraine.
That’s not to say that the sanctions are completely toothless. They may not do immediate damage to the Russian economy, but analysts say they could turn potential investors away at a time when Russia dearly needs foreign money.
"The real potential damage to Russia’s economic future is self inflicted," said Chris Weafer of Moscow-based consultancy Macro-Advisory, in a recent research note. "The real damage from a prolonged conflict in Ukraine," Weafer said, "may be to radically slow the inflow of much needed investment capital." Weafer recently cut his forecast for the Russian economy in 2014 from 1.9 percent to 1 percent growth.
Investors’ cooling interest in Russia could make it more expensive for Russia to borrow money in international markets. Rating firms Standard & Poor’s and Fitch Ratings both downgraded Russia’s outlook from stable to negative, after the U.S. rolled out new sanctions Thursday. The Russian Finance Ministry has said it might delay plans to sell $7 billion in Russian sovereign bonds this year. Russian Finance Minister Anton Siluanov acknowledged Friday that Russia’s borrowing costs are going up.
"The imposed sanctions are definitely negatively affecting the general perception of our country’s economy," Siluanov said to reporters in Moscow, according to Bloomberg.
The falling value of Russia’s currency could also be a source of pain. Companies that have borrowed money abroad and Russians planning their summer vacations could be forced to pay more, if the ruble continues its decline. It has fallen nearly 10% against the dollar since the beginning of the year.
The impact on Russia’s stock market is harder to gauge, but there were initial signs sanctions were having at least a minor impact. The Russian benchmark Micex index fell almost 3 percent Friday, a day after President Obama blacklisted 20 top Russian officials and businessmen. The U.S. sanctions on Bank Rossiya were Washington’s first against a Russian financial institution, and the move caused Visa and Mastercard to stop processing payments for the blacklisted bank and three of its affiliates Friday. Moscow responded by barring an array of powerful lawmakers and Obama administration officials from entering Russia. Putin indicated that he didn’t think Russia needed to pursue other forms of retaliation, but Russian Deputy Foreign Minister Grigory Karasin said Russia was considering additional measures, according to Reuters.
That was the second exchange of sanctions and mocking responses between the U.S. and Russia this week, after Moscow annexed Crimea, the Ukrainian province that voted to breakaway from the country on Sunday. Obama upped the ante Thursday by laying out a plan for a broad trade blockade that – if carried out — could cripple Russia’s energy or banking sectors. The U.S. is unlikely to go down that road unless Russia moves to take over broad swathes of eastern Ukraine. Even if the U.S. acted, meanwhile, it’s not clear Europe would follow. The continent’s leaders’ have expressed clear reluctance to levy broad punitive measures on Russia because of the boomerang effect they’d have on European trade.
While the back-and-forth over Russia’s annexation of Crimea has at times hit the volatile Russian stock market, broader indicators about the Russian economy could be more worrisome. Russian stocks plunged 10 percent after Russia invaded Crimea, for instance, but bounced back immediately on the weak response from the West. The bigger concern for Russian leaders could be that the unrest makes bankers and money managers reconsider Russia as a place to make long term investments.
In the first two months of this year, investors have already moved more that $30 billion out of the country because of the standoff, Weafer said. That compares to $63 billion in all of 2013. Part of that amount is just the regular flow of money from cross-border trade, but Weafer estimates that at least a quarter of it is personal wealth.
"Russia has two problems: one is attracting foreign investment and the other is convincing Russians to keep their money in Russia and invest it there," said Weafer, speaking on the phone from Abu Dhabi.
The new sanctions against businessmen and politicians close to Putin haven’t shut down the Russian economy, but they are making a bad situation worse by slashing the value of the ruble and making foreign investors think twice about putting their money into Russian companies.
The value of the ruble could be the most important indicator inside Russia, said Weafer. He said Russians are extremely sensitive to currency fluctuations because of memories of shocks in the 1990s that crushed the ruble, including the 1998 banking crisis when Russia had to default on its debt.
"You cannot walk down a street in a Russian town without being aware of what the currency rate is," Weafer said. "The yellow currency exchange signs are as familiar as the golden arches in the U.S."
Win Thin, global head of emerging-market currency strategy at Brown Brothers Harriman, had a slightly more optimistic view. He said the Russian economy could muddle through at a growth rate of 1 to 1.5 percent and investors would likely move on — as long as Putin doesn’t go any further.
"If he’s stopping at Crimea, which I think he is, then I think people just kind of forget about it," Thin said.
After all, Russia is still considered an emerging market, so investors may expect a bit of volatility.
"The Russian economy is like a large old tanker," said Mujtaba Rahman, head Europe analyst at risk consultancy Eurasia Group. "These developments are tantamount to more rust, but, at current, are unlikely to represent a serious inflection point." |
<filename>src/characters-challenges/dto/create-characters-challenge.dto.ts<gh_stars>0
import { Achievement } from 'src/achievements/entities/achievement.entity';
import { Challenge } from 'src/challenges/entities/challenge.entity';
import { Character } from 'src/characters/entities/character.entity';
export class CreateCharactersChallengeDto {
id: number;
class: string;
level: number;
accepts: number;
fails: number;
start_date: Date;
end_date: Date;
achievement: Achievement;
character: Character;
challenge: Challenge;
}
|
def priority_task(self):
if self.is_queue is False:
for f, s in history.items():
if s["state"] == "pending":
heapq.heappush(self.pending_queue, (s["creating_ts"], f))
elif s["state"] == "finished":
heapq.heappush(self.finished_queue, (s["finishing_ts"], f))
elif s["state"] == "aborted":
heapq.heappush(self.aborted_queue, (s["finishing_ts"], f))
self.is_queue = True
if self.aborted_queue:
return heapq.heappop(self.aborted_queue)[1]
elif self.finished_queue:
return heapq.heappop(self.finished_queue)[1]
elif self.pending_queue:
return heapq.heappop(self.pending_queue)[1]
else:
return |
Application of High-band UWB Body Area Network to Medical Vital Sensing in Hospital
We evaluated a prototype high-band UWB-BAN using several vital information sensors in a hospital. We confirmed by experiments that setting one coordinator near the ceiling or back or forward of the bed and another coordinator under the bed is better solution when the patient lies on a bed. A helical antenna with a high antenna gain along the axial direction, when used in the coordinator showed better radiation performance and less dependence on the direction of the sensor node antenna than a biconical antenna. Next we applied new version of prototype with smaller size of hub and sensor node with more mobility models in a hospital attaching a hub and sensor nodes on the body and confirmed the communication worked well. We also examined the interference by other wireless communication system and medical device and optimized the sequence length to get maximum data rate and satisfying the packet rate and suppressing the interference under multi BAN by numerical analysis. |
def __rebuild_crashed_jobs(self, crashed_jobs):
bucket_to_jobs = defaultdict(list)
buckets = [int(x) for x in self.cesar_buckets.split(",") if x != ""]
if len(buckets) == 0:
buckets.append(self.cesar_mem_limit)
for elem in crashed_jobs:
elem_args = elem.split()
chains_arg = elem_args[2]
chain_ids = self.__read_chain_arg(chains_arg)
if chain_ids is None:
continue
memlim_arg_ind = elem_args.index(MEMLIM_ARG) + 1
mem_val = float(elem_args[memlim_arg_ind])
bucket_lim = self.__get_bucket_val(mem_val, buckets)
if FRAGM_ARG in elem_args:
cmd_copy = elem_args.copy()
cmd_str = " ".join(cmd_copy)
bucket_to_jobs[bucket_lim].append(cmd_str)
continue
for chain_id in chain_ids:
cmd_copy = elem_args.copy()
cmd_copy[2] = str(chain_id)
cmd_str = " ".join(cmd_copy)
bucket_to_jobs[bucket_lim].append(cmd_str)
return bucket_to_jobs |
/**
* Implementation of UserDetailsService interface for authentication handling.
*
* @author anaelcarvalho
* @see org.springframework.security.core.userdetails.UserDetailsService
*/
@Service
public class UserDetailsServiceImpl implements UserDetailsService {
@Autowired
private UserService userService;
@Override
public UserDetails loadUserByUsername(String login) throws UsernameNotFoundException {
UserLogin userLogin = userService.getUserLoginByLogin(login);
if(userLogin == null) {
throw new UsernameNotFoundException("User " + login + " not found");
}
Set<GrantedAuthority> grantedAuthorities = new HashSet<>();
grantedAuthorities.add(new SimpleGrantedAuthority(userLogin.getRole()));
return new org.springframework.security.core.userdetails.User(userLogin.getLogin(), userLogin.getPassword(), grantedAuthorities);
}
} |
<reponame>CeliaRozalenM/poopy<filename>APP/src/app/services/bins.service.ts
import { Injectable } from '@angular/core';
import { Bin } from '../bin';
import { Observable, of } from 'rxjs';
import { HttpClient, HttpHeaders } from '@angular/common/http';
@Injectable({
providedIn: 'root'
})
export class BinsService {
constructor(private http: HttpClient) { }
private httpOptions = {
headers: new HttpHeaders({ 'Content-Type': 'application/json' })
};
private binUrl = 'http://localhost:3000/bins'; // URL to web api
getBinData(): Observable<Array<Bin>> {
return this.http.get<Bin[]>(this.binUrl);
}
getOneBinInfo(binId: string): Observable<Array<Bin>> {
return this.http.get<Bin[]>(this.binUrl + '/' + binId);
}
updateBinBags(binId: string, hasBags: boolean): Observable<Bin> {
return this.http.put<Bin>(this.binUrl + '/bags', { 'id': binId, 'info': hasBags.toString() });
}
getBinDataByUser(binId): Observable<Array<Bin>> {
return this.http.post<Bin[]>('http://localhost:3000/binUser', binId, this.httpOptions);
}
}
|
/**
* Draw a line between nodes
* Lines could be drawn as straight line or as a cubic curve
* Lines aren't considered design nodes (dnodes) as such
* It doesn't inherit from dnode
* Should be move to a new package
*/
public class Line extends Path
{
double startX;
double startY;
double endX;
double endY;
double controlX1;
double controlY1;
double controlX2;
double controlY2;
double arrowHeadSize;
private Color color;
// Assign them
private IConnectable from = null;
private IConnectable to = null;
private MoveTo moveTo = null;
private CubicCurveTo cubicCurveTo = null;
private LineTo lineTo = null;
private static final double defaultArrowHeadSize = 3.0;
public Line(double startX, double startY, double endX, double endY,
double controlX1, double controlY1 , double controlX2, double controlY2, double arrowHeadSize, Color color){
super();
this.color = color;
this.startX = startX;
this.startY = startY;
this.endX = endX;
this.endY = endY;
this.controlX1 = controlX1;
this.controlY1 = controlY1;
this.controlX2 = controlX2;
this.controlY2 = controlY2;
this.arrowHeadSize = arrowHeadSize;
// Stop[] stops = new Stop[] { new Stop(0, Color.VIOLET), new Stop(1, Color.DARKCYAN)};
// LinearGradient lg = new LinearGradient(0, 0, 1, 0, true, CycleMethod.NO_CYCLE, stops);
setStroke(this.color);
setStrokeWidth(2);
setFill(null);
setEffect(new Bloom());
moveTo = new MoveTo(startX, startY);
cubicCurveTo = new CubicCurveTo(controlX1, controlY1, controlX2, controlY2, endX, endY);
lineTo = new LineTo(endX, endY);
getElements().add(moveTo);
if(Math.abs(endY - startY) > 40)
getElements().add(cubicCurveTo);
else
getElements().add(lineTo);
}
public Line(double startX, double startY, double endX, double endY,
double controlX1, double controlY1 , double controlX2, double controlY2, Color color){
this(startX, startY, endX, endY,
controlX1, controlY1, controlX2, controlY2, defaultArrowHeadSize, color);
}
public Line(IConnectable nodeFrom, IConnectable nodeTo, Color color)
{
this( nodeFrom.getX()+NodeConstants.nodeSize1,
nodeFrom.getY()+NodeConstants.nodeSize1/2,
nodeTo.getX(),
nodeTo.getY()+NodeConstants.nodeSize1/2,
nodeFrom.getX()+NodeConstants.nodeSize1+100,
nodeFrom.getY()+NodeConstants.nodeSize1/2,
nodeTo.getX()-100,
nodeTo.getY()+NodeConstants.nodeSize1/2,
color);
from = nodeFrom;
to = nodeTo;
from.setLine(this);
to.setLine(this);
}
public IConnectable getNodeFrom() { return from; }
public IConnectable getNodeTo() { return to; }
public void changeLinePosition()
{
getElements().clear();
this.startX = from.getX()+NodeConstants.nodeSize1;
this.startY = from.getY()+NodeConstants.nodeSize1/2;
this.controlX1 = from.getX()+NodeConstants.nodeSize1+100;
this.controlY1 = from.getY()+NodeConstants.nodeSize1/2;
this.endX = to.getX();
this.endY = to.getY()+NodeConstants.nodeSize1/2;
this.controlX2 = to.getX()-100;
this.controlY2 = to.getY()+NodeConstants.nodeSize1/2;
moveTo = new MoveTo(startX, startY);
cubicCurveTo = new CubicCurveTo(controlX1, controlY1, controlX2, controlY2, endX, endY);
lineTo = new LineTo(endX, endY);
getElements().add(moveTo);
if(Math.abs(endY - startY) > 40)
getElements().add(cubicCurveTo);
else
getElements().add(lineTo);
}
public void removeLine()
{
from.removeLine();
to.removeLine();
}
} |
def domains_unique(self,x):
return np.unique([self.alias(name) for name in self[x].index.names]).tolist() |
Computationally Tractable Riemannian Manifolds for Graph Embeddings
Representing graphs as sets of node embeddings in certain curved Riemannian manifolds has recently gained momentum in machine learning due to their desirable geometric inductive biases, e.g., hierarchical structures benefit from hyperbolic geometry. However, going beyond embedding spaces of constant sectional curvature, while potentially more representationally powerful, proves to be challenging as one can easily lose the appeal of computationally tractable tools such as geodesic distances or Riemannian gradients. Here, we explore computationally efficient matrix manifolds, showcasing how to learn and optimize graph embeddings in these Riemannian spaces. Empirically, we demonstrate consistent improvements over Euclidean geometry while often outperforming hyperbolic and elliptical embeddings based on various metrics that capture different graph properties. Our results serve as new evidence for the benefits of non-Euclidean embeddings in machine learning pipelines.
Introduction
Before representation learning started gravitating around deep representations (Bengio et al., 2009) in the last decade, a line of research that sparked interest in the early 2000s was based on the so called manifold hypothesis (Bengio et al., 2013). According to it, real-world data given in their raw format (e.g., pixels of images) lie on a low-dimensional manifold embedded in the input space. At that time, most manifold learning algorithms were based on locally linear approximations to points on the sought manifold -such as LLE (Roweis & Saul, 2000) and Isomap (Tenenbaum et al., 2000) -and/or spectral methods -such as MDS (Hofmann & Buhmann, 1995) and graph Laplacian eigenmaps (Belkin & Niyogi, 2002 Back to recent years, two trends are apparent: (i) the use of graph-structured data and their direct processing by machine learning algorithms (Bruna et al., 2014;Henaff et al., 2015;Defferrard et al., 2016;Grover & Leskovec, 2016), and (ii) the resurgence of the manifold hypothesis, but with a different flavor: being explicit about the assumed manifold and the inductive bias that it entails; e.g., hyperbolic spaces (Nickel & Kiela, 2017;2018;Ganea et al., 2018), spherical spaces (Wilson et al., 2014), and Cartesian products of them (Gu et al., 2018;Tifrea et al., 2019;Skopek et al., 2020). While for the first two the choice can be a priori justified -e.g., complex networks are intimately related to hyperbolic geometry (Krioukov et al., 2010) -the last one is motivated through the presumed flexibility coming from its varying curvature. Our work takes that hypothesis further by exploring the representation properties of several irreducible spaces 1 of non-constant sectional curvature. We use, in particular, Riemannian manifolds where points are represented as specific types of matrices and which are at the sweet spot between semantic richness and tractability.
With no additional qualifiers, graph embedding is a vaguely specified intermediary step used as part of systems solving a wide range of graph analytics problems (Nie et al., 2017;Wang et al., 2017;Wei et al., 2017;Zhou et al., 2017). What they all have in common is the representation of certain parts of a graph as points in a continuous space. The desired mathematical properties depend on the problem setting. Classically, the Euclidean space has been ubiquitous due to its interpretability and structure: inner product, metric, and, very conveniently for compositional models, linearity.
As a particular instance of that general task, here we embed nodes of graphs with structural information only (i.e., undirected and without node or edge labels), as the one shown in Figure 1, in novel curved spaces, by leveraging the closedform expressions of the corresponding Riemannian distance between embedding points; the resulting geodesic distances enter a differentiable objective function which "compares" them to the ground-truth metric given through the nodeto-node graph distances. We focus on the representation capabilities of the considered matrix manifolds relative to the previously studied spaces by monitoring graph reconstruction metrics. We note that preserving graph structure is essential to downstream tasks such as link prediction (Trouillon et al., 2016) or node classification (Wang et al., 2017).
Our main contributions are (i) the introduction of two families of matrix manifolds for graph embedding purposes: the non-positively curved spaces of symmetric positive definite (SPD) matrices, and the compact, non-negatively curved Grassmann manifolds; (ii) reviving Stochastic Neighbor Embedding (SNE) (Hinton & Roweis, 2003) in the context of Riemannian embeddings and showing that it unifies, on the one hand, the loss functions based on the reconstruction likelihood of local graph neighborhoods and, on the other hand, the global, all-pairs stress functions used for global metric recovery; (iii) a generalization of the usual ranking-based metric to quantify reconstruction fidelity beyond immediate neighbors; (iv) a comprehensive experimental comparison of the introduced manifolds against the baselines in terms of their graph reconstruction capabilities, focusing on the impact of curvature.
Preliminaries & Background
Notation Let G = (X, E, w) be an undirected graph, with X the set of nodes, E the set of edges, and w : E → R + the edge-weighting function. Let m = |X|. We denote by d G (x i , x j ) the shortest path distance between nodes x i , x j ∈ X, induced by w. The node embeddings are 2 Y = {y i } i∈ ⊂ M and the geodesic distance function is d M (y i , y j ), with M -the embedding space -a Riemannian manifold. N (x i ) denotes the set of neighbors of node x i .
Riemannian Geometry
A brief but comprehensive account of the fundamental concepts from Riemannian geometry is included in Appendix A. 2 We use i ∈ as a short-hand for i ∈ {1, 2, . . . , m}.
Informally, an n-dimensional manifold M is a space that locally resembles R n . Each point x ∈ M has attached a tangent space T x M -a vector space that can be thought of as a first-order local approximation of M around x. The Riemannian metric ·, · x is a collection of inner products on these tangent spaces that vary smoothly with x. It makes possible measuring geodesic distances, angles, and curvatures. The different notions of curvature quantify the ways in which a surface is locally curved around a point. The exponential map is a function exp x : T x M → M that can be seen as folding or projecting the tangent space onto the manifold. Its inverse is called the logarithm map, log x (·).
Learning Framework
The embeddings are learned in the framework used in prior work (Nickel & Kiela, 2017;Gu et al., 2018) in which a loss function L depending on the embedding points solely via the (Riemannian) distances between them is minimized using stochastic Riemannian optimization (Bonnabel, 2013;Becigneul & Ganea, 2019). In this respect, the following general property is useful (Lee, 2006): for any point x on a Riemannian manifold M and any y in a neighborhood of x, we have ∇ R x d 2 (x, y) = −2 log x (y). 3 Hence, as long as L is differentiable with respect to the (squared) distances, it will also be differentiable with respect to the embedding points. The specifics of L are deferred to Section 4.
Model Spaces & Cartesian Products
The model spaces of Riemannian geometry are manifolds with constant sectional curvature K: (i) Euclidean space (K = 0), (ii) hyperbolic space (K < 0), and (iii) elliptical space (K > 0). We summarize the Riemannian geometric properties of the last two in Appendix B. They are used as baselines in our experiments (Section 5).
We also recall that given a set of manifolds {M i } k i=1 , the product manifold M = × k i=1 M i has non-constant sectional curvature and can be used for graph embedding purposes as long as each factor has efficient closed-form formulas for the quantities of interest (Gu et al., 2018).
Measuring Curvature
Curvature properties are central to our work since they set apart the matrix manifolds discussed in Section 3. Here, we review several analogous concepts defined for graphs. Graphs are different mathematical abstractions but yet similar in many aspects (through, e.g., Laplace operators and heat kernels). Furthermore, we introduce a simple method for quantifying space curvature around a set of embeddings.
Geometric Properties of Graphs We use the following geometry-inspired graph properties throughout this work (details in Appendix C): • Ollivier-Ricci curvature. Introduced by (Ollivier, 2009) for general metric spaces and specialized in (Lin et al., 2011) to graphs, it is defined for pairs of neighbors (u, v) and is inspired by the continuous Ricci curvature. An analogue to the Riemannian scalar curvature at u is obtained by averaging its value for all neighbors v. Intuitively, a negative value means the edge/node is part of the backbone, i.e., the graph would get disconnected if it were removed. See Figure 1.
• δ-hyperbolicity (Gromov, 1987). It quantifies the hyperbolicity of a given metric space: the smaller δ, the more hyperbolic-like (or negatively-curved) the space. It is based on the following insight: geodesic triangles are "slim" in negatively curved spaces and "thick" in positively curved ones. See Figure 2.
Sum-of-Angles in Geodesic Triangles Note that even in hyperbolic geometry, which has constant negative curvature, placing points close to each other leads to an approximately flat embedding. With that in mind, given three points x, y, z ∈ M, a simple quantity that characterizes the actual space curvature between them is the sum of the angles in the geodesic triangle that they form (see Appendix A) In practice, we look at empirical distributions of k θ given by triples sampled uniformly from an embedding set {y i } k i=1 . Moreover, for presentation purposes, because the sum is between for hyperbolic triangles and between for spherical ones, we translate k θ by −π and divide by 2π. This gives us the ranges and , respectively.
Matrix Manifolds
We now review the two proposed families of matrix manifolds. We have chosen them such that they cover negative and positive curvature ranges, respectively. Also, essential for graph embedding purposes, they lend themselves to . Geodesic paths between four random pairs of SPD matrices represented as ellipses. The first (black) and last (yellow) ones are randomly generated. The ones in between follow the geodesic path between them, in small steps. Note that we add an artificial increment on the x axis to get a better visualization of the path. computationally tractable Riemannian optimization. Their properties are summarized in Table 1. In what follows, we insist on those aspects that are relevant for graph embedding.
3.1. Non-positive Curvature: SPD Manifold Definition The space of n × n real symmetric positivedefinite matrices, (2) is an n(n+1) 2 -dimensional differentiable manifold -an embedded submanifold of S(n), the space of n × n symmetric matrices. Its tangent space can be identified with S(n).
Riemannian Structure The most common Riemannian metric endowed to S ++ (n) is P, Q A = Tr A −1 P A −1 Q. Also called the canonical metric, it is motivated as being invariant to congruence transformations Γ X (A) = X AX, with X an n × n invertible matrix (Pennec et al., 2006). Several geodesic paths are drawn in Figure 3.
Riemannian Distance
The induced distance function is equivalent to 4 d(A, B) = n i=1 log 2 λ i (A −1 B) . Notice that singular positive semi-definite matrices, which lie on the boundary ∂S ++ (n), are points at infinity. An interpretation of the eigenvalues λ i (A −1 B) can be obtained by recalling that for any A, B ∈ S ++ (n), there exist an invertible matrix X and a diagonal matrix D such that X AX = Id n and X BX = D. Thus, the distance can be seen as measuring how well A and B can be simultaneously reduced to the identity matrix (Chossat & Faugeras, 2009). See Appendix D for proofs and details.
Properties The canonical SPD manifold has non-positive sectional curvature everywhere (Bhatia, 2009). It is also a high-rank symmetric space (Lang, 2012). The high-rank property tells us that there are at least planes of the tangent space on which the sectional curvature vanishes. Contrast it with the hyperbolic space which is also a symmetric space but where the only (intrinsic) flats are the geodesics. Table 1. Summary of Differential and Riemannian Geometry Tools. Notation: A, B -manifold points; P, Q -tangent space points; P -ambient space point; ∇ E A / ∇ R A -Euclidean / Riemannian gradient; exp(A) / log(A) -matrix exponential / logarithm. References: SPD (Bhatia, 2009;Bridson & Haefliger, 2013;Jeuris, 2015); Grassmann (Edelman et al., 1998;Zhang et al., 2018).
Alternative Metrics There are several other metrics that one can endow the SPD manifold with. One of the simplest is the more efficient log-Euclidean one (see, e.g., Arsigny et al., 2006). However, the induced curvature is zero, so it presents no advantage over Euclidean geometry. Another, more interesting one is the Bures-Wasserstein metric from quantum information theory (Bhatia et al., 2019), which induces a non-negative curvature on S ++ (n). It is leveraged in (Muzellec & Cuturi, 2018) to embed nodes as elliptical distributions. Finally, a popular alternative to the (squared) canonical distance, which we adopt in our experiments, is the symmetric Stein divergence, It has been thoroughly studied in (Sra, 2012;Sra & Hosseini, 2015) who prove that √ S is indeed a metric, and that S(A, B) shares many properties of the Riemannian distance function (19), such as congruence and inversion invariances, as well as geodesic convexity in each argument. It is particularly appealing for backpropagation-based training due to its computationally efficient gradients (see below).
Computational Aspects
We compute gradients via automatic differentiation (Paszke et al., 2017). Notice that if A = U DU is the eigendecomposition of a symmetric matrix with distinct eigenvalues and L is some loss function that depends on A only via D, then (Giles, 2008) Computing geodesic distances requires the eigenvalues of A −1 B, though, which may not be symmetric. We overcome that by using the matrix A −1/2 BA −1/2 instead which is SPD and has the same spectrum. Moreover, for the 2 × 2 and 3 × 3 cases, we use closed-form eigenvalue formulas to speed up our implementation. 5 See Appendix D for details. For the Stein divergence, the gradients can be computed in closed form as ∇ A S(A, B) = 1 2 (A + B) −1 − 1 2 A −1 . We additionally note that many of the required matrix operations can be efficiently computed via Cholesky decompositions.
Non-negative Curvature: Grassmann Manifold
Definition The orthogonal group O(n) is the set of n × n real orthogonal matrices. It is a special case of the compact Stiefel manifold V (k, n) := {A ∈ R n×k : A A = Id k }, i.e., the set of n × k "tall-skinny" matrices with orthonormal columns, for k n. The Grassmannian is defined as the space of k-dimensional linear subspaces of R n . It is related to the Stiefel manifold in that every orthonormal k-frame in R n spans a k-dimensional subspace of the n-dimensional Euclidean space. Similarly, every such subspace admits infinitely many orthonormal bases. This suggests the identification of the Grassmann manifold Gr(k, n) with the quotient space V (k, n)/O(k). In other words, an n × k orthonormal matrix A ∈ V (k, n) represents the equivalence class which is a single point on Gr(k, n).
Riemannian Structure
The canonical Riemannian metric of Gr(k, n) is simply the Frobenius inner product (10). We refer to (Edelman et al., 1998) for details on how it arises from its quotient geometry. As before, we include examples of geodesic paths on Gr(2, 3) in Figure 4.
Riemannian Distance
The closed form formula, shown in (20), depends on the set {θ i } k i=1 of so-called principal angles between two subspaces, defined recursively as They can be interpreted as the minimal angles between all possible bases of the two subspaces.
Alternative Metrics Several alternatives to the arc-length metric (20) have been proposed, all expressible in terms of the principle angles -see (Edelman et al., 1998, Section 4.3) for an overview. A popular one is the so-called projection norm, d p (A, B) = AA − BB F . It corresponds to embedding Gr(k, n) in R n×n but then using the ambient space metric. It is analogous to taking Euclidean distances between points on a sphere, thus ignoring its geometry.
Computational Aspects Computing a geodesic distance requires the SVD decomposition of an k × k, matrix which can be significantly smaller than the manifold dimension k(n−k). For k = 2, we use closed-form solutions for singular values (see Appendix D). Otherwise, we employ standard numerical algorithms. For the gradients, a result analogous to eq. (22) makes automatic differentiation straight-forward.
Properties The Grassmann manifold Gr(k, n) is a compact, non-negatively curved manifold. As shown by (Wong, 1968), its sectional curvatures at A ∈ Gr(k, n) satisfy for all P, Q ∈ T A Gr(k, n). Contrast the above with the constant positive curvature of the sphere which can be made arbitrarily large by making its radius R → 0.
Decoupling Learning and Evaluation
Recall that our goal is to preserve the graph structure given through its node-to-node shortest paths by optimizing an objective which encourages similar (relative) geodesic distances between node embeddings. Prior work broadly uses local or global loss functions that focus on either close neighborhood information or all-pairs interactions, respectively. The methods that fall under the former emphasize correct placement of immediate neighbors, such as the one used in (Nickel & Kiela, 2017) for unweighted graphs 6 .
(25) Those that fall under the latter, on the other hand, compare distances directly via loss functions inspired by generalized MDS (Bronstein et al., 2006), e.g., Note that (26) focuses mostly on distant nodes, while misrepresenting close ones yields a large loss according to (27) -one of several objective functions used in (Gu et al., 2018).
The two types of objectives yield embeddings with different properties. It is thus not surprising that each one of them has been coupled in prior work with a preferred metric quantifying reconstruction fidelity. The likelihood-based one is evaluated via the rank-based mean average precision with B(j; i) = {y k ∈ M : d M (y i , y k ) d M (y i , y j )}, while the global, stress-like ones yield best scores when measured by the average distortion of the reference metric Our Proposal To decouple learning and evaluation, we propose to optimize another loss function that allows moving in a continuous way on the representation scale ranging from local neighborhoods patching, as encouraged by (25), to the global topology matching, as made desirable by (26) and (27). In the same spirit, we propose a more fine-grained ranking metric that makes the trade-off clearer.
Riemannian Stochastic Neighbor Embedding
We advocate training embeddings via a version of the celebrated Stochastic Neighbor Embedding (SNE) (Hinton & Roweis, 2003) adapted to the Riemannian setting. As shown next, this is almost trivial while the benefits are significant.
SNE works by attaching to each node a distribution defined over all other nodes and based on the distance to them. This is done for both the input graph distances, yielding the ground truth distribution, and for the embedding distances, yielding the model distribution. That is, with j = i, we have where Z pi and Z qi are the normalizing constants and λ is the input scale parameter. The original SNE formulation uses M = R n . In this case, the probabilities are proportional to an isotropic Gaussian N (y j | y i , λ). As defined above, it is our (natural) generalization to Riemannian manifolds.
The embeddings are then learned by minimizing the sum of Kullback-Leibler (KL) divergences between the two families of distributions, p i := p(· | x i ) and q i := q(· | y i ), The connection to the local neighborhood regime from (25) is stated next. Lemma 1 For λ → 0, minimizing (32) is equivalent to maximizing the sum of the following per-node terms .
Proof The result follows directly from the definition of the KL divergence, D KL p i q i = − j =i p ij log q ij + const, and the limit of the distributions defined in (30), The Euclidean intuition is that of a Gaussian becoming "infinitely peaked" around x i , so its nearest neighbors will have "infinitely more" mass assigned to them than the others.
Interestingly, it has been remarked that feeding squared distances to the objective function improves training stability in certain cases because they are continuously differentiable (De Sa et al., 2018). In this regard, Lemma 1 serves as a more principled justification for doing that in (25).
Finally, we point out that a connection to an MDS-like loss function is mentioned in (Hinton & Roweis, 2003, Section 6), in the regime λ → ∞, but we have not been able to make sense out of it. That being said, appealing to intuition, we expect that for a large λ, the objective (32) tends towards placing equal emphasis on the relative distances between all pairs of points, thus behaving similar to eqs. (26) and (27). The advantage is that the temperature-like parameter λ acts as a knob for controlling the optimization goal.
F1@k -Generalizing Ranking Fidelity
To the best of our knowledge, none of the metrics proposed in the literature can quantify the ranking fidelity of nodes that are k hops away from a source node, with k > 1. Recall that the motivation stems, for one, from the limitation of mean average precision to immediate neighbors, and, at the other side of the spectrum, from the sensitivity to absolute values of non-ranking metrics such as the average distortion.
In what follows, we will assume that the input graph G is unweighted. The definitions can be adapted to graphs with edge weights, but in most of our experiments we have used unweighted graphs, so we limit the treatment as such.
For an input graph G, we denote by L G (u; k) the set of nodes that are exactly k hops away from a source node u (i.e., on "layer" k), and by B G (v; u) the set of nodes that are closer to node u than another node v. We can now define the precision and the recall for the ordered pair (u, v).
Definition 2
For an embedding f : G → M, the precision and recall of a node v in the shortest-path tree rooted at u, with u = v, are given by They follow the conventional definitions. For instance, the numerator is the number of true positives: the nodes that appear before v in the shortest-path tree rooted at u and, at the same time, are embedded closer to u than v is. Moreover, notice that our definition of precision recovers the one used in (28) when restricting to layer-1 nodes (i.e., neighbors).
The definition of the F1 score of (u, v), denoted by F 1 (v; u), follows naturally as the harmonic mean of precision and recall. Then, the F1@k metric is obtained by averaging the F1 scores of all nodes that are on layer k 1, across all shortest-path trees. That is, with K = u∈G |L G (u; k)|, This draws a curve {(k, F 1 (k))} k∈ , where d(G) denotes the diameter of the graph. In our results, we sometimes summarize performance via the area under F 1 (k).
Experiments
We restrict our experiments to evaluating the graph reconstruction capabilities of the proposed matrix manifolds relative to the constant curvature baseline spaces. An analysis via properties of nearest-neighbor graphs constructed from random samples is included in Appendix E. Our code is accessible at http://github.com/dalab/matrix-manifolds.
Training Details
We compute and save all-pairs shortestpaths in all input graphs. Then, we optimize a set of embeddings for each combination of optimization setting and loss function, including both the newly proposed Riemannian SNE, for several values of λ, and the ones used in prior work (Section 4). This is described in more detail in Appendix F.
Evaluation We report on F1@1, the area under the F1@k curve, and the average distortion. Given our transductive inference setting (i.e., lower loss is better), we report the best performing numbers across the aforementioned repetitions.
Synthetic Graphs
We begin by showcasing the F1@k metric that we advocate for several generated graphs in Figure 5. On the 10 × 10 × 10 grid and the 500-nodes cycle all manifolds perform well. This is because every Riemannian manifold generalizes Euclidean space and Euclidean geometry suffices for grids and cycles (e.g., a cycle looks locally as a line). The more discriminative ones are the two other graphs -a full balanced tree (branching factor r = 4 and depth h = 5) and a cycle of 10 trees (r = 3 and h = 4). The best performing embeddings involve a hyperbolic component while the SPD ones come between those and the non-negatively curved ones, which are indistinguishable. The results confirm our expectations: (more) negative curvature is useful when embedding trees. Finally, notice that the high-temperature SNE regime encourages the recovery of the global structure more than the local neighborhoods.
Real-world Graphs
We compare SPD and hyperbolic spaces on several real datasets (Table 2). Details about them Ollivier-Ricci Curvature (c) "road-minnesota" Figure 6. The graphs embedded in Tables 2 and 3 ("facebook" is shown in Figure 1). More such drawings are included in Appendix G. Figure 7. Distributions of (normalized) sum-of-angles in geodesic triangles formed by the learned embeddings that yield the best F1@1 metrics (up) and the best average distortion metrics (down), for all datasets from Table 2, for n = 3. 10000 triples are sampled.
and an analysis of their geometric properties are attached in Appendix G. We plot the ones shown here in Figures 1 and 6. Extended results are included in Appendix H.
Discussion First of all, we see that the (partial) negative curvature of the SPD and hyperbolic manifolds is beneficial: they outperform the flat Euclidean embeddings in almost all scenarios. This can be explained by the complex-network structure of the input graphs (Krioukov et al., 2010). Second, we see that especially when using the Stein divergence, the SPD embeddings achieve significant improvements on the average distortion metric and are competitive and sometimes better w.r.t. the ranking metrics. We attribute this to a betterbehaved optimization task thanks to its geodesic convexity and stable gradients (see Section 3.1).
How Do the Embeddings Curve? It is a priori unclear to what extent the curvature of the embedding space is lever-aged. To shed light on that, we employ our technique based on sum-of-angles in geodesic triangles (see Section 2.4). We recognize in Figure 7 something remarkable: the better performing embeddings (as per Table 2) yield more negativelycurved triangles. Notice, for instance, the collapsed box plot corresponding to the "web-edu" hyperbolic embedding (a), i.e., almost all triangles have sum-of-angles close to 0 . This is explained by its obvious tree-like structure (Figure 6a). Similarly, the SPD-Stein embedding of "facebook" outperforms the hyperbolic one in terms of F1@1 and that reflects in the slightly more stretched box plot (b). Moreover, the pattern applies to the best average-distortion embeddings, where the SPD-Stein embeddings are the only ones leveraging negative curvature and, hence, perform better -the only exception is the "power" graph (c), for which indeed Table 2 confirms that the hyperbolic embeddings are slightly better.
Compact Embeddings
We embed several graphs with traits associated with positive curvature in Grassmann manifolds and compare them to spherical embeddings. Table 3 Table 3. The "Gr vs. S" results on 2 datasets and 3 dimensions. The dataset "cat-cortex" (Scannell et al., 1995) is a dissimilarity matrix, lacking graph information, so F1@k cannot be computed.
shows that the former yields non-negligibly lower average distortion on the "cat-cortex" dissimilarity dataset and that the two are on-par on the "road-minnesota" graph (displayed in Figure 6c -notice its particular structure, characterized by cycles and low node degrees). More such results are included in Appendix H. As a general pattern, we find learning compact embeddings to be optimization-unfriendly.
Conclusion
We proposed embedding graph nodes into matrix spaces of non-constant sectional curvature, such as the SPD and Grassmann manifolds. Leveraging their powerful representational capabilities, we showed that they can consistently and significantly improve over Euclidean embeddings as well as often outperform hyperbolic and elliptical ones on the graph reconstruction task. This suggests that their geometry can accommodate certain graphs with better precision and less distortion than other embedding spaces. We also advocate the Riemannian SNE objective for learning embeddings and explained how, in a sense, it unifies previously used loss functions. Finally, we defined the F1@k metric as an extension of mAP for quantifying ranking fidelity.
A. Overview of Differential & Riemannian Geometry
In this section, we introduce the foundational concepts from differential geometry, a discipline that has arisen from the study of differentiable functions on curves and surfaces, thus generalizing calculus. Then, we go a step further, into the more specific Riemannian geometry which enables the abstract definitions of lengths, angles, and curvatures. We base this section on (Carmo, 1992;Absil et al., 2009) and point the reader to them for a more thorough treatment of the subject.
Differentiable Manifolds Informally, an n-dimensional manifold is a set M which locally resembles n-dimensional Euclidean space. To be formal, one first introduces the notions of charts and atlases. A bijection ϕ from a subset U ∈ M onto an open subset of R n is called an ndimensional chart of the set M, denoted by (U, ϕ). It enables the study of points x ∈ M via their coordinates are open sets in R n and the change of coordinates ϕ β • ϕ −1 α is smooth.
Two atlases A 1 and A 2 are equivalent if they generate the same maximal atlas. The maximal atlas A + is the set of all charts (U, ϕ) such that A ∪ {(U, ϕ)} is also an atlas. It is also called a differentiable structure on M. With that, an n-dimensional differentiable manifold is a couple (M, A + ), with M a set and A + a maximal atlas of M into R n . In more formal treatments, A + is also constrained to induce a well-behaved topology on M.
Embedded Submanifolds and Quotient Manifolds
How is a differentiable structure for a set of interest usually constructed? From the definition above it is clear that it is something one endows the set with. That being said, in most useful cases it is not explicitly chosen or constructed. Instead, a rather recursive approach is taken: manifolds are obtained by considering either subsets or quotients (see last subsection paragraph) of other manifolds, thus inheriting a "natural" differentiable structure. Where does this recursion end? It mainly ends when one reaches a vector space, which is trivially a manifold via the global chart ϕ : R n×k → R nk , with X → vec(X). That is the case for the matrix manifolds considered in Section 3 too.
What makes the aforementioned construction approach (almost) assumption-free is the following essential property: if M is a manifold and N is a subset of the set M (respectively, a quotient M/∼), then there is at most one differentiable structure that agrees with the subset topology (respectively, with the quotient projection). 7 The resulting manifolds are called embedded submanifolds and quotient manifolds, respectively. Sufficient conditions for their existence (hence, uniqueness) are known too and do apply for our manifolds. For instance, the submersion theorem says that for a smooth function F : and a point y ∈ M 2 such that F has full rank 8 for all The quotient of a set M by an equivalence relation ∼ is defined as with := {y ∈ M : y ∼ x} -the equivalence class of x. The function π : M → M/∼, given by x → , is the canonical projection. The simplest example of a quotient manifold is the real projective space, RP(n − 1). It is the set of lines through the origin in R n . With the notation R n * = R n \ {0}, the real projective space can be identified with the quotient R n * /∼ given by the equivalence relation x ∼ y ⇐⇒ ∃t ∈ R * : y = tx.
Tangent Spaces To do even basic calculus on a manifold, one has to properly define the derivatives of manifold curves, γ : R → M, as well as the directional derivatives of smooth real-valued functions defined on the manifold, f : M → R. The usual definitions, are invalid as such because addition does not make sense on general manifolds. However, notice that f • γ : t → f (γ(t)) is differentiable in the usual sense. With that in mind, let T x (M) denote the set of smooth real-valued functions defined on a neighborhood of x. Then, the mappingγ (0) from is called the tangent vector to the curve γ at t = 0. The equivalence class 7 We say almost assumption-free because we still assume that this agreement is desirable. 8 That is, the Jacobian ∂F (x) ∂x ∈ R d 2 ×d 1 has rank d2 irrespective of the chosen charts. is a tangent vector at x ∈ M. The set of such equivalence classes forms the tangent space T x M. It is immediate from the linearity of the differentiation operator from (41) that T x M inherits a linear structure, thus forming a vector space.
The abstract definition from above recovers the classical definition of directional derivative from (39) in the following sense: if M is an embedded submanifold of a vector space E and f is the extension of f in a neighborhood of γ(0) ∈ E, thenγ ( so there is a natural identification of the mappingγ(0) with the vector γ (0).
For quotient manifolds M/∼, the tangent space splits into two complementary linear subspaces called the vertical space V x and the horizontal space H x . Intuitively, the vectors in the former point in tangent directions which, if we were to follow for an infinitesimal step, we would get another element of . Thus, only the horizontal tangent vectors make us move on the quotient manifold.
Riemannian Metrics They are inner products ·, · x , sometimes denoted by g x (·, ·), attached to each tangent space T x M. They give a notion of length via ξ x x := ξ x , ξ x x , for all ξ x ∈ T x M. A Riemannian metric is an additional structure added to a differentiable manifold (M, A + ), yielding the Riemannian manifold (M, A + , g x ). However, as it was the case for the differentiable structure, for Riemannian submanifolds and Riemannian quotient manifolds it is inherited from the "parent" manifold in a natural way -see (Absil et al., 2009) for several examples.
The Riemannian metric enables measuring the length of a curve γ : → M, which, in turn, yields the Riemannian distance function, that is, the shortest path between two points on the manifold. The infimum is taken over all curves γ : → M with γ(a) = x and γ(b) = y. Note that in general it is only defined locally because M might have several connected components or it might not be geodesically complete (see next paragraphs). Deriving a closed-form expression of it is paramount for graph embedding purposes.
The Riemannian gradient of a smooth function f : M → R at x, denoted by ∇ R f (x) is defined as the unique element of T x M that satisfies Retractions Up until this point, we have not introduced the concept of "moving in the direction of a tangent vector", although we have intuitively used it. This is achieved via retractions. At a point x ∈ M, the retraction R x is a map from T x M to M satisfying local rigidity conditions: R x (0) = x and D R x (0) = Id n . For embedded submanifolds, the twostep approach consisting of (i) taking a step along ξ in the ambient space, and (ii) projecting back onto the manifold, defines a valid retraction. In quotient manifolds, the retractions of the base space that move an entire equivalence class to another equivalence class induce retractions on the quotient space.
Riemannian Connections Let us first briefly introduce and motivate the need for an additional structure attached to differentiable manifolds, called affine connections. They are functions where X is the set of vector fields on M, i.e., functions assigning to each point x ∈ M a tangent vector ξ x ∈ T x M. They satisfy several properties that represent the generalization of the directional derivative of a vector field in Euclidean space. Affine connections are needed, for instance, to generalize second-order optimization algorithms, such as Newton's method, to functions defined on manifolds.
The Riemannian connection, also known as the Levi-Civita connection, is the unique affine connection that, besides the properties referred to above, satisfies two others, one of which depends on the Riemannian metric -see (Carmo, 1992) for details. It is the affine connection implicitly assumed when working with Riemannian manifolds.
Geodesics, Exponential Map, Logarithm Map, Parallel Transport They are all concepts from Riemannian geometry, defined in terms of the Riemannian connection. A geodesic is a curve with zero acceleration, Geodesics are the generalization of straight lines from Euclidean space. They are locally distance-minimizing and parameterized by arc-length. Thus, for every ξ ∈ T x M, there exists a unique geodesic γ(t; x, ξ) such that γ(0) = x andγ(0) = ξ.
The exponential map is the function where U is a neighborhood of 0. The manifold is said to be geodesically complete if the exponential map is defined on the entire tangent space, i.e., U = T x M. It can be shown that exp x (·) is a retraction and satisfies the following useful property d M x, exp x (ξ) = ξ x , for all ξ ∈ U . (50) The exponential map defines a diffeomorphism between a neighborhood of 0 ∈ T x M onto a neighborhood of x ∈ M.
If we follow a geodesic γ(t; x, ξ) from t = 0 to infinity, it can happen that it is minimizing only up to t 0 < ∞. If that is the case, the point y = γ(t 0 ; x, ξ) is called a cut point. The set of all such points, gathered across all geodesics starting at x, is called the cut locus of M at x, C(x) ⊂ M. It can be proven that the cut locus has finite measure (Pennec, 2006). The maximal domain where the exponential map is a diffeomorphism is given by its preimage on M \ C(x). Hence, the inverse is called the logarithm map, The parallel transport of a vector ξ x ∈ T x M along a curve γ : I → M is the unique vector field ξ that points along the curve to tangent vectors, satisfying Curvature The Riemann curvature tensor is a tensor field that assigns a tensor to each point of a Riemannian manifold. Each such tensor measures the extent to which the manifold is not locally isometric to Euclidean space. It is defined in terms of the Levi-Civita connection. For each pair of tangent vectors u, v ∈ T x M, R x (u, v) is a linear transformation on the tangent space. The vector w = R x (u, v) w quantifies the failure of parallel transport to bring w back to its original position when following a quadrilateral determined by −tu, −tv, tu, tv, with t → 0. This failure is caused by curvature and it is also known as the infinitesimal non-holonomy of the manifold.
The sectional curvature is defined for a fixed point x ∈ M and two tangent vectors u, v ∈ T x M as It measures how far apart two geodesics emanating from x diverge. If it is positive, the two geodesics will eventually converge. It is the most common curvature characterization that we use to compare, from a theoretical perspective, the manifolds discussed in this work.
The Ricci tensor Ric(w, v) is defined as the trace of the linear map T x M → T x M given by u → R(u, v)w. It is fully determined by specifying the scalars Ric(u, u) for all unit vectors u ∈ T x M, which is known simply as the Ricci curvature. It is equal to the average sectional curvature across all planes containing u times (n − 1). Intuitively, it measures how the volume of a geodesic cone in direction u compares to that of an Euclidean cone.
Finally, the scalar curvature (or Ricci scalar) is the most coarse-grained notion of curvature at a point on a Riemannian manifold. It is the trace of the Ricci tensor, or, equivalently, n(n − 1) times the average of all sectional curvatures. Note that a space of non-constant sectional curvature can have constant Ricci scalar. This is true, in particular, for homogeneous spaces.
B. Differential Geometry Tools for Hyperbolic and Elliptical Spaces
We include in Table 4 the differential geometry tools for the hyperbolic and elliptical spaces. We use the hyperboloid model for the former and the hyperspherical model for the latter, depicted in Figure 8. Some prior work prefers working with the Poincaré ball model and/or the stereographic projection. They have both advantages and disadvantages.
For instance, our choice yields simple formulas for certain quantities of interest, such as exponential and logarithm maps. They are also more numerically stable. In fact, it is claimed in (Nickel & Kiela, 2018) that numerical stability, together with its impact on optimization, is the only explanation for the Lorentz model outperforming the prior experiments in the (theoretically equivalent) Poincaré ball (Nickel & Kiela, 2017).
On the other hand, the just-mentioned alternative models have the strong advantage that the corresponding metric tensors are conformal. This means that they are proportional to the Riemannian metric of Euclidean space, 1 − x 2 2 2 g E and g S = 2 1 + x 2 2 2 g E .
(54) Notice the syntactic similarity between them. Furthermore, Table 4. Differential Geometry Tools for Hyperbolic Space. Notation: x, y ∈ M; u, v ∈ TxM; x, y L -Lorentz product
Characterizations Constant negative curvature Isotropic
Constant positive curvature Isotropic the effect of the denominators in the conformal factors reinforce the intuition we have about the two spaces: distances around far away points are increasingly larger in the hyperbolic space and increasingly smaller in the elliptical space.
Let us point out that both hyperbolic and elliptical spaces are isotropic. Informally, isotropy means "uniformity in all directions." Note that this is a stronger property than the homogeneity of the matrix manifolds discussed in Section 3 (see also Appendix D) which means that the space "looks the same around each point."
C. Geometric Properties of Graphs
Graphs and manifolds, while different mathematical abstractions, share many similar properties through Laplace operators, heat kernels, and random walks. Another example is the deep connection between trees and the hyperbolic plane: any tree can be embedded in H(2) with arbitrarily small distortion (Sarkar, 2011). On a similar note, complex networks arise naturally from hyperbolic geometry (Krioukov et al., 2010). With these insights in mind, in this section we review some continuous geometric properties that have been adapted to arbitrary weighted graphs. See also (Ni et al., 2015).
Gromov Hyperbolicity Also known as δ-hyperbolicity (Gromov, 1987), it quantifies with a single number the hyperbolicity of a given metric space: the smaller δ is, the more hyperbolic-like or negatively-curved the space is. The definition that makes it easier to picture it is via the slim triangles property: a metric space 9 (M, d M ) is δ-hyperbolic if all geodesic triangles are δ-slim. Three points x, y, w ∈ M form a δ-slim triangle if any point on the geodesic segment between any two of them is within distance δ from the other two geodesics (i.e., "sides" of the geodesic triangle).
For discrete metric spaces such as graphs, an equivalent definition using the so-called "4-points condition" can be used to devise algorithms that look at quadruples of points. Both exact and approximating algorithms exist that run fast enough to analyze graphs with tens of thousands of nodes within minutes (Fournier et al., 2015;Cohen et al., 2015). Finally, in practice we look at histograms of δ instead of the worst-case value.
Ollivier-Ricci Curvature (Ollivier, 2009) generalized the Ricci curvature to metric spaces (M, d M ) equipped with a family of probability measures {m x (·)} x∈M . It is defined in a way that mimics the interpretation of Ricci curvature on Riemannian manifolds: it is the average distance between two small balls taken relative to the distance between their centers. The difference is that now the former is given by the Wasserstein distance (i.e., Earth mover's distance) between the corresponding probability measures, Figure 9. Geodesic triangles in negatively curved, flat, and positively curved spaces, respectively. Source: www.science4all.org with W (µ 1 , µ 2 ) := inf ξ x y d(x, y)dξ(x, y) and ξ(x, y)a join distribution with marginals µ 1 and µ 2 . This definition was then specialized in (Lin et al., 2011) for graphs by making m x assign a probability mass of α ∈ |
package binary
import "github.com/recursivecurry/gobox/typeclass/ord"
type Interface interface {
ord.Interface
Left() Interface
SetLeft(node Interface)
Right() Interface
SetRight(node Interface)
}
func Add(root *Interface, node Interface) {
var parent Interface
current := *root
for {
if current.Eq(node) {
if parent == nil {
*root = current
} else {
if parent.Less(node) {
parent.SetRight(node)
} else {
parent.SetLeft(node)
}
}
node.SetLeft(current.Left())
node.SetRight(current.Right())
return
} else if current.Less(node) {
if current.Right() == nil {
current.SetRight(node)
return
}
parent = current
current = current.Right()
} else {
if current.Left() == nil {
current.SetLeft(node)
return
}
parent = current
current = current.Left()
}
}
}
func Remove(root *Interface, node ord.Interface) (Interface, bool) {
return find(root, node, true)
}
func Get(root *Interface, node ord.Interface) (Interface, bool) {
return find(root, node, false)
}
func find(root *Interface, node ord.Interface, remove bool) (Interface, bool) {
var parent Interface
var found Interface
current := *root
for {
if current == nil {
return nil, false
}
if current.Eq(node) {
found = current
if remove {
if parent.Less(current) {
if current.Left() == nil {
parent.SetRight(current.Right())
} else if current.Right() == nil {
parent.SetRight(current.Left())
} else {
minimum := current.Right()
for minimum.Left() != nil {
minimum = minimum.Left()
}
minimum, _ = find(¤t, minimum, true)
minimum.SetRight(current.Right())
minimum.SetLeft(current.Left())
parent.SetRight(minimum)
}
} else {
if current.Left() == nil {
parent.SetLeft(current.Right())
} else if current.Right() == nil {
parent.SetLeft(current.Left())
} else {
minimum := current.Right()
for minimum.Left() != nil {
minimum = minimum.Left()
}
minimum, _ = find(¤t, minimum, true)
minimum.SetRight(current.Right())
minimum.SetLeft(current.Left())
parent.SetLeft(minimum)
}
}
}
break
} else if current.Less(node) {
parent = current
current = current.Right()
} else {
parent = current
current = current.Left()
}
}
return found, true
}
func removeNode(parent Interface, right bool) Interface {
var found Interface
var current Interface
if right {
current = parent.Right()
} else {
current = parent.Left()
}
for {
if current.Right() == nil {
found = current.Left()
break
} else if current.Left() == nil {
found = current.Right()
break
} else {
minimum := current.Right()
for minimum.Left() != nil {
minimum = minimum.Left()
}
found = minimum
find(current, remove)
}
}
if parent.Less(found) {
parent.SetRight(found)
} else {
parent.SetLeft(found)
}
return current
}
|
/** Rule definitions for Python rules. */
public class PyRuleClasses {
public static final FileType PYTHON_SOURCE = FileType.of(".py", ".py3");
/**
* Input for {@link RuleClass.Builder#cfg(RuleTransitionFactory)}: if {@link
* PythonOptions#forcePython} is unset, sets the Python version according to the rule's default
* Python version. Assumes the rule has the expected attribute for this setting.
*
* <p>Since this is a configuration transition, this propagates to the rules' transitive deps.
*/
public static final RuleTransitionFactory DEFAULT_PYTHON_VERSION_TRANSITION =
(rule) -> {
String attrDefault = RawAttributeMapper.of(rule).get("default_python_version", Type.STRING);
// It should be a target value ("PY2" or "PY3"), and if not that should be caught by
// attribute validation. But just in case, we'll treat an invalid value as null (which means
// "use the hard-coded default version") rather than propagate an unchecked exception in
// this context.
PythonVersion version = null;
// Should be non-null because this transition shouldn't be used on rules without the attr.
if (attrDefault != null) {
try {
version = PythonVersion.parseTargetValue(attrDefault);
} catch (IllegalArgumentException ex) {
// Parsing error.
}
}
return new PythonVersionTransition(version);
};
} |
/* m/ file for Paragon i860 machine. */
#include "i860.h"
#define COFF
#define SYSTEM_MALLOC
#define TEXT_START 0x10000
#define LIB_STANDARD -lc -lic -lmach
#define KEEP_OLD_TEXT_SCNPTR
#define KEEP_OLD_PADDR
#define drem fmod
|
<gh_stars>1-10
/*
* GDevelop IDE
* Copyright 2008-2016 <NAME> (<EMAIL>). All rights reserved.
* This project is released under the GNU General Public License version 3.
*/
#include "SigneTest.h"
//(*InternalHeaders(SigneTest)
#include <wx/bitmap.h>
#include <wx/intl.h>
#include <wx/image.h>
#include <wx/string.h>
//*)
#include "GDCore/Tools/HelpFileAccess.h"
#include "GDCore/IDE/wxTools/SkinHelper.h"
//(*IdInit(SigneTest)
const long SigneTest::ID_RADIOBOX1 = wxNewId();
const long SigneTest::ID_STATICBITMAP2 = wxNewId();
const long SigneTest::ID_HYPERLINKCTRL1 = wxNewId();
const long SigneTest::ID_BUTTON1 = wxNewId();
//*)
BEGIN_EVENT_TABLE(SigneTest,wxDialog)
//(*EventTable(SigneTest)
//*)
END_EVENT_TABLE()
SigneTest::SigneTest(wxWindow* parent)
{
//(*Initialize(SigneTest)
wxFlexGridSizer* FlexGridSizer2;
wxFlexGridSizer* FlexGridSizer1;
wxFlexGridSizer* FlexGridSizer17;
Create(parent, wxID_ANY, _("Choose the test\'s sign"), wxDefaultPosition, wxDefaultSize, wxDEFAULT_DIALOG_STYLE, _T("wxID_ANY"));
FlexGridSizer1 = new wxFlexGridSizer(0, 1, 0, 0);
wxString __wxRadioBoxChoices_1[6] =
{
_("= ( equal to )"),
_("> ( greater than )"),
_("< ( less than )"),
_(">= ( greater than or equal to )"),
_("<= ( less than or equal to )"),
_("!= ( not equal to )")
};
SigneRadio = new wxRadioBox(this, ID_RADIOBOX1, _("Choose the comparison operator"), wxDefaultPosition, wxDefaultSize, 6, __wxRadioBoxChoices_1, 1, wxRA_HORIZONTAL, wxDefaultValidator, _T("ID_RADIOBOX1"));
FlexGridSizer1->Add(SigneRadio, 1, wxALL|wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL, 5);
FlexGridSizer2 = new wxFlexGridSizer(0, 2, 0, 0);
FlexGridSizer2->AddGrowableCol(1);
FlexGridSizer17 = new wxFlexGridSizer(0, 3, 0, 0);
FlexGridSizer17->AddGrowableRow(0);
StaticBitmap1 = new wxStaticBitmap(this, ID_STATICBITMAP2, gd::SkinHelper::GetIcon("help", 16), wxDefaultPosition, wxDefaultSize, wxNO_BORDER, _T("ID_STATICBITMAP2"));
FlexGridSizer17->Add(StaticBitmap1, 1, wxTOP|wxBOTTOM|wxLEFT|wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL, 5);
helpBt = new wxHyperlinkCtrl(this, ID_HYPERLINKCTRL1, _("Help"), wxEmptyString, wxDefaultPosition, wxDefaultSize, wxHL_CONTEXTMENU|wxHL_ALIGN_CENTRE|wxNO_BORDER, _T("ID_HYPERLINKCTRL1"));
helpBt->SetToolTip(_("Display help about this window"));
FlexGridSizer17->Add(helpBt, 1, wxALL|wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL, 5);
FlexGridSizer2->Add(FlexGridSizer17, 1, wxALL|wxEXPAND|wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL, 0);
OkBt = new wxButton(this, ID_BUTTON1, _("Ok"), wxDefaultPosition, wxDefaultSize, 0, wxDefaultValidator, _T("ID_BUTTON1"));
FlexGridSizer2->Add(OkBt, 1, wxALL|wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL, 5);
FlexGridSizer1->Add(FlexGridSizer2, 1, wxALL|wxEXPAND|wxALIGN_CENTER_HORIZONTAL|wxALIGN_CENTER_VERTICAL, 0);
SetSizer(FlexGridSizer1);
FlexGridSizer1->Fit(this);
FlexGridSizer1->SetSizeHints(this);
Connect(ID_HYPERLINKCTRL1,wxEVT_COMMAND_HYPERLINK,(wxObjectEventFunction)&SigneTest::OnhelpBtClick);
Connect(ID_BUTTON1,wxEVT_COMMAND_BUTTON_CLICKED,(wxObjectEventFunction)&SigneTest::OnOkBtClick);
//*)
}
SigneTest::~SigneTest()
{
//(*Destroy(SigneTest)
//*)
}
void SigneTest::OnOkBtClick(wxCommandEvent& event)
{
EndModal( 1+SigneRadio->GetSelection());
}
void SigneTest::OnhelpBtClick(wxCommandEvent& event)
{
gd::HelpFileAccess::Get()->OpenPage("game_develop/documentation/manual/events_editor/parameters");
}
|
/**
* Test for max with two args.
*/
@Test
public void whenInput5And10ThenReturns10() {
final int first = 5;
final int second = 10;
final int expect = 10;
final Max maximum = new Max();
int res = maximum.max(first, second);
assertThat(res, is(expect));
} |
r"""
``cotk._utils`` often is used by internal lib. The users should
not find api here.
"""
from .file_utils import get_resource_file_path, import_local_resources
from .resource_processor import ResourceProcessor, DefaultResourceProcessor
from ._utils import trim_before_target
from .hooks import start_recorder, close_recorder
__all__ = ['ResourceProcessor', 'DefaultResourceProcessor', 'get_resource_file_path', \
'import_local_resources']
|
def handle_dataset_view():
email = get_auth().auth()['email']
database_api = get_database()
if request.method == 'GET':
view_id = request.args.get('id', '')
dataset_id = request.args.get('ds_id', '')
if view_id == '' or dataset_id == '':
return 'Please provide an id', 400
return json_response(database_api.get_dataset_view(email, dataset_id=dataset_id, view_id=view_id))
content = request.get_json(force=True, cache=False)
view_id = content.get('id')
name = content.get('name')
dataset_id = content.get('ds_id')
if request.method == 'PUT' or request.method == 'POST':
if request.method == 'PUT' and view_id is None:
return 'Please supply an id', 400
if request.method == 'POST' and dataset_id is None:
return 'Please supply a ds_id', 400
view_id = database_api.upsert_dataset_view(
email=email,
dataset_id=dataset_id,
view_id=view_id if request.method == 'PUT' else None,
name=name,
value=content.get('value'))
return json_response({'id': view_id})
elif request.method == 'DELETE':
database_api.delete_dataset_view(email, dataset_id=dataset_id, view_id=view_id)
return json_response('', 204) |
#[cfg(not(target_os = "linux"))]
pub use std::fs::rename;
#[cfg(target_os = "linux")]
mod rename_linux;
#[cfg(target_os = "linux")]
pub use rename_linux::*;
|
We at Word of Mouth were intrigued recently by a regular's mention of smoked drinks: BeckyDavidson wondered if I could come up with a cheap DIY version of smoked vodka. Smoky flavours in drinks are hardly unknown; whiskies, especially those such as the wonderful Islay malts, can be gloriously smoky thanks to the malt drying process. I've smoked the odd bit of mackerel over the years but had never tried with drinks.
Never one to turn down a challenge, I thought I'd give Becky's suggestion a go. I devised my own equipment – tubes, tin cans and fire – taking me back to my teenage years making small rockets and squibs in my dad's shed(s). As with these juvenile experiments I quickly discovered that plan A wouldn't work and that neither would plan B. Plan C did: but the equipment is a bit dangerous to make and perhaps dangerous to use. That said, I had a lot of fun trying it out, Becky, and the burns are healing nicely, thank you.
I made a large hole in the middle of a soup can lid, washed it and removed the label, opened the kitchen windows and heated it directly on the gas hob to burn off any extraneous material. (It's important to do this thoroughly because tin cans are plastic-coated inside, and for this reason and just for safety's sake using a camping stove outside would have been better.)
John Wright's experimental vodka smoking apparatus. Photograph: John Wright
Once the can had cooled I enlarged the hole to take a demijohn cork, connected a length of plastic hose to the cork, put my sawdust mix (below) into the can, wrapped the cork in tinfoil to form a (largely ineffective) seal and inserted it into the hole. The can went back on the hob at a very low setting (again, outdoors would have been better), and I inserted the other end of the tube into a clean bottle containing a little vodka. Within minutes a few wisps of smoke appeared in the bottle – I ran the process for four or five minutes, closed the bottle and shook it to collect the condensed smoke from the sides.
It's possible to buy smoking chips ready-made and mixed with various flavourings, but more fun to make them. I cut up some oak from the shed for the sawdust – it shouldn't be bone dry so I stirred in a tiny amount of water and left it for an hour to soak. You can mix in all sorts of flavourings. I experimented with cloves, cinnamon, vanilla, allspice, juniper berries and chopped orange peel – the last being my favourite, and the cheapest. The oak smoke tends to dominate so at least one third of the mix should come from the other flavourings.
Other ideas for home smoking vodka include cloves, cinnamon, vanilla, allspice, juniper berries and chopped orange peel. Photograph: John Wright
A single measure of smoked vodka will contain a tiny fraction of the smoky residue you'd ingest at a summer barbecue or from standing next to a bonfire, but there is a cloud of health concerns hanging over smoked foods in general. For this reason, as well as the potential for burning or cutting yourself in the ramshackle DIY method I devised, I'd advise people seriously interested in home-smoking drinks to invest in a smoke gun; for the sake of 50 quid you'll be able to have a go at smoking just about anything.
There's plenty of reason to do so – the flavour of smoked vodka is quite a revelation – it tastes like one of the smoky malt whiskies; not a very refined one I admit, but a smoky malt whisky nevertheless (if it ends up too smoky it's easily remedied by adding more vodka). There's no reason why you cannot try this with other drinks – sloe gin with juniper berries in the mix, cider brandy using applewood sawdust, elderberry wine using elder sawdust. Maybe they'll taste awful but we won't know unless we try. |
President Trump yesterday signed a presidential memorandum re-instating the Global Gag Rule. For most of the day yesterday the actual text of the memorandum was not released so much of the media coverage — including our own — was based on the understanding that Donald Trump simply re-instated the same policy that existed during the George W. Bush-era administration. In fact, he did not. Rather, after the text became public last night it became clear that Trump dramatically expanded the scope of the Global Gag Rule to include all global health assistance provided by the US government.
Previously, the restrictions embedded in the Global Gag Rule were limited exclusively to NGOs that receive US government assistance for family planning and reproductive health, like contraception. These restrictions include prohibiting that NGO from counseling women that abortion is an option or lobbying foreign governments to liberalize their abortion laws. Even if the funding sources for abortion counseling come from another source, that NGO must cease that counseling or either relinquish its US funding for, say, condom distribution or obstetric surgeries. That’s how it worked in the Bush administration–to disasterous effect.
But the Trump memo takes this a huge step further. Rather than applying the Global Gag Rule exclusively to US assistance for family planning in the developing world, which amounts to about $575 million per year, the Trump memo applies it to “global health assistance furnished by all department or agencies.” In other words, NGOs that distribute bed nets for malaria, provide childhood vaccines, support early childhood nutrition and brain development, run HIV programs, fight ebola or Zika, and much more, must now certify their compliance with the Global Gag Rule or risk losing US funds. According to analysis from PAI, a global health NGO, this impacts over $9 billion of US funds, or about 15 times more than the previous iteration of the Global Gag Rule which only impacted reproductive health assistance.
We already know from research that the Bush-era Global Gag Rule resulted in a sharp decline in the availability of contraceptives in some of the poorest places on earth, which in turn hindered the fight against maternal death and increased abortion raters. Now, this one change may profoundly undermine not only progress on maternal health worldwide but also the US government’s ability to fight HIV/AIDS, big childhood killers like malaria, and prevent infectious diseases like Zika and ebola from reaching US shores.
This is a profound expansion of what was already a harmful policy.
Discussion
comments... |
import { Align, HorizontalAlign, VerticalAlign } from '../../shared/Types';
import IBaseElement from './IBaseElement';
export default interface IPositionElement extends IBaseElement {
align: Align;
alignHorizontal: HorizontalAlign;
alignVertical: VerticalAlign;
top: number;
right: number;
bottom: number;
left: number;
zIndex: number;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.