content
stringlengths 10
4.9M
|
---|
#include<stdio.h>
#define MAX 2000
int main(){
int n,q;
int a[MAX],mi[MAX],s[MAX],b,c;
int i,j;
for(i=0;i<MAX;i++)s[i]=0;
scanf("%d",&n);
for(i=0;i<n;i++){
scanf("%d",&a[i]);
b=a[i];
for(j=2000-b;j>0;j--){
if(s[j]==1)s[j+b]=1;
/* if(a[i]==j)s[j]=1;
sovle(a[i],a[j]);*/
}
s[b]=1;
}
/*for(i=0;i<MAX;i++)printf(" *%d:%d* ",i,s[i]);*/
scanf("%d",&q);
for(i=0;i<q;i++){
scanf("%d",&mi[i]);
c=mi[i];
if(s[c]==1)printf("yes\n");
else printf("no\n");
}
return 0;
}
|
Scientific Study of Hybrid Testing Framework and Procedure
In this paper it is discussed in details the meaning of hybrid framework, hybrid model, testing life cycle and its different procedures. The word hybrid it self explains mixing of two together. This paper will explain using hybrid model techniques to test the applications. There are few stages that need to be followed carefully to achieve accuracy in test results. Hybrid approach is of two types. Both will be explained in brief in this paper. |
<reponame>mickmister/mattermost-plugin-stonks
import {ActionType} from './action_types';
export const openOptionsModal = () => {
return {
type: ActionType.OPEN_OPTIONS_MODAL,
};
};
export const closeOptionsModal = () => {
return {
type: ActionType.CLOSE_OPTIONS_MODAL,
};
};
export const toggleOptionsModal = () => {
return {
type: ActionType.TOGGLE_OPTIONS_MODAL,
};
};
export const fetchSharePricePictureURL = (symbol: string, duration: string, frequency: string) => {
const u = `/plugins/stonks/etrade?symbol=${symbol}&duration=${duration}&frequency=${frequency}`;
return fetch(u).then(r => r.text());
};
|
<filename>drama/run_tools.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .signal_synthesis import *
import numpy as np
def synt_event(i_sig, n_ftrs,x=None,n_inlier=2000,n_outlier=100,
sigma = 0.2,n1 = 0.02,n2 = 0.01,n3 = 0.02,n4 = 0.01,
mu=[0,1],amp=[0.3,0.4],sig=[0.08,0.1]):
main_data = {i_sig:n_inlier}
event_data = {i_sig:n_outlier}
if x is None:
x = np.linspace(0,1,n_ftrs)
X = []
y = []
for key,value in main_data.items():
for _ in range(value):
Xp = signal(key,x,sigma,n1,n2,n3,n4)
X.append(Xp)
y.append(0)
for key,value in event_data.items():
for _ in range(value):
Xp = signal(key,x,sigma,n1,n2,n3,n4)
Xp = event_sig(Xp,mu=mu,amp=amp,sig=sig)
X.append(Xp)
y.append(1)
return np.array(X),np.array(y)
def synt_mix(i_sig, n_ftrs,x=None,n_sig=11,n_inlier=1000,n_outlier=5,sigma = 0.2,n1 = 0.02,n2 = 0.01,n3 = 0.02,n4 = 0.01):
main_data = {i_sig:n_inlier}
if x is None:
x = np.linspace(0,1,n_ftrs)
X = []
y = []
for key,value in main_data.items():
for _ in range(value):
Xp = signal(key,x,sigma,n1,n2,n3,n4)
X.append(Xp)
y.append(key)
for i in range(1,n_sig):
if i!=i_sig:
for j in range(n_outlier):
Xp = signal(i,x,sigma,n1,n2,n3,n4)
X.append(Xp)
y.append(i)
return np.array(X),np.array(y)
def synt_unbalanced(train_data = {1:1000,2:1000,3:1000,4:1000,5:50,6:50},
test_data = {1:1000,2:1000,3:1000,4:1000,5:50,6:50,7:50,8:50,9:50,10:50},
sigma = 0.1,n1 = 0.005,n2 = 0.005,n3 = 0.005,n4 = 0.005,n_ftrs = 100):
x = np.linspace(0,1,n_ftrs)
X = []
y = []
for key,value in train_data.items():
for _ in range(value):
Xp = signal(key,x,sigma,n1,n2,n3,n4)
X.append(Xp)
y.append(key)
X_train = np.array(X)
y_train = np.array(y)
X = []
y = []
for key,value in test_data.items():
for _ in range(value):
Xp = signal(key,x,sigma,n1,n2,n3,n4)
X.append(Xp)
y.append(key)
X_test = np.array(X)
y_test = np.array(y)
return X_train,y_train,X_test,y_test
def simulate_shapes(numbers = {1:10,2:10,3:10,4:10,5:10,6:10,7:10,8:10,9:10,10:10},
sigma = 0.1,n1 = 0.005,n2 = 0.005,n3 = 0.005,n4 = 0.005,n_ftrs = 100):
x = np.linspace(0,1,n_ftrs)
X = []
y = []
for key,value in numbers.items():
for _ in range(int(value)):
Xp = signal(key,x,sigma,n1,n2,n3,n4)
X.append(Xp)
y.append(key)
X = np.array(X)
y = np.array(y)
return X,y
#def job(X_train,X,name,n_t):
# out = {}
# n_ftrs = X.shape[1]
# dim_rs ={'none':'none','AE':'AE','VAE':'VAE',
# 'PCA':PCA(n_components=2),
# 'NMF':NMF(n_components=2),
# 'FastICA':FastICA(n_components=2, max_iter=1000)}
# for dim_r, value in dim_rs.items():
# print '------------------- '+dim_r+' --------------------'
# splitter = mce.Splitter(X_train, value, clustering)
# outliers = [None for i in range(7)]
# for i in range(2):
# splitter.split(1,verbose=0,training_epochs=20)
# outliers[i] = mce.outliers(X,splitter,metrics)
# for i,nn in enumerate([5,10,35]):
# lof = neighbors.LocalOutlierFactor(n_neighbors=nn)
# lof.fit(X)
# outliers[3+i] = -lof.negative_outlier_factor_
# isof = IsolationForest(max_samples='auto')
# isof.fit(X_train)
# scores_pred = isof.decision_function(X)
# outliers[6] = scores_pred.max()-scores_pred
# out[dim_r] = outliers
# with open('./res/'+name+str(n_t)+'.pkl', 'wb') as f:
# pickle.dump(out, f)
|
/**
* The default {@link ContentViewClient} implementation for the chrome layer embedders.
*/
public class ChromeContentViewClient extends ContentViewClient {
@Override
public boolean isJavascriptEnabled() {
return PrefServiceBridge.getInstance().javaScriptEnabled();
}
} |
def validate_card(DEBIT_CARD=True):
return "1234-5678-9999-000" if DEBIT_CARD else False |
package node
import (
"context"
"github.com/filecoin-project/go-filecoin/core"
"github.com/filecoin-project/go-filecoin/net/pubsub"
"github.com/filecoin-project/go-filecoin/types"
"github.com/pkg/errors"
)
// defaultMessagePublisher publishes messages to a pubsub topic and adds them to a message pool.
// This is wiring for message publication from the outbox.
type defaultMessagePublisher struct {
network *pubsub.Publisher
topic string
pool *core.MessagePool
}
func newDefaultMessagePublisher(pubsub *pubsub.Publisher, topic string, pool *core.MessagePool) *defaultMessagePublisher {
return &defaultMessagePublisher{pubsub, topic, pool}
}
func (p *defaultMessagePublisher) Publish(ctx context.Context, message *types.SignedMessage, height uint64) error {
encoded, err := message.Marshal()
if err != nil {
return errors.Wrap(err, "failed to marshal message")
}
if _, err := p.pool.Add(ctx, message); err != nil {
return errors.Wrap(err, "failed to add message to message pool")
}
if err = p.network.Publish(p.topic, encoded); err != nil {
return errors.Wrap(err, "failed to publish message to network")
}
return nil
}
|
package soot.util;
/*-
* #%L
* Soot - a J*va Optimization Framework
* %%
* Copyright (C) 2002 Ondrej Lhotak
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as
* published by the Free Software Foundation, either version 2.1 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Lesser Public License for more details.
*
* You should have received a copy of the GNU General Lesser Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/lgpl-2.1.html>.
* #L%
*/
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* A java.util.Map with Numberable objects as the keys. This one is designed for maps close to the size of the universe. For
* smaller maps, use SmallNumberedMap.
*
* @author Ondrej Lhotak
*/
public final class LargeNumberedMap<K extends Numberable, V> implements INumberedMap<K, V> {
private final IterableNumberer<K> universe;
private V[] values;
public LargeNumberedMap(IterableNumberer<K> universe) {
this.universe = universe;
int size = universe.size();
this.values = newArray(size < 8 ? 8 : size);
}
@SuppressWarnings("unchecked")
private static <T> T[] newArray(int size) {
return (T[]) new Object[size];
}
@Override
public boolean put(K key, V value) {
int number = key.getNumber();
if (number == 0) {
throw new RuntimeException(String.format("oops, forgot to initialize. Object is of type %s, and looks like this: %s",
key.getClass().getName(), key.toString()));
}
if (number >= values.length) {
Object[] oldValues = values;
values = newArray(Math.max(universe.size() * 2, number) + 5);
System.arraycopy(oldValues, 0, values, 0, oldValues.length);
}
boolean ret = (values[number] != value);
values[number] = value;
return ret;
}
@Override
public V get(K key) {
int i = key.getNumber();
if (i >= values.length) {
return null;
}
return values[i];
}
@Override
public void remove(K key) {
int i = key.getNumber();
if (i < values.length) {
values[i] = null;
}
}
@Override
public Iterator<K> keyIterator() {
return new Iterator<K>() {
int cur = 0;
private void advance() {
while (cur < values.length && values[cur] == null) {
cur++;
}
}
@Override
public boolean hasNext() {
advance();
return cur < values.length;
}
@Override
public K next() {
if (!hasNext()) {
throw new NoSuchElementException();
}
return universe.get(cur++);
}
@Override
public void remove() {
values[cur - 1] = null;
}
};
}
}
|
Systematic review of innovative ablative therapies for the treatment of locally advanced pancreatic cancer
Locally advanced pancreatic cancer (LAPC) is associated with a very poor prognosis. Current palliative (radio)chemotherapy provides only a marginal survival benefit of 2–3 months. Several innovative local ablative therapies have been explored as new treatment options. This systematic review aims to provide an overview of the clinical outcomes of these ablative therapies. |
/**
* Write to multiple OutputStreams
*
* @version $Revision$
*/
public class MultiplexOutputStream extends OutputStream {
List streams = new CopyOnWriteArrayList();
/**
* Add an Output Stream
* @param os
*/
public void add(OutputStream os) {
streams.add(os);
}
/**
* Remove an OutputStream
* @param os
*/
public void remove(OutputStream os) {
streams.remove(os);
}
/**
* write a byte
* @param b
* @throws IOException
*/
public synchronized void write(int b) throws IOException {
for (Iterator i = streams.iterator(); i.hasNext();) {
OutputStream s = (OutputStream) i.next();
s.write(b);
}
}
/**
* write an array
* @param b
* @param off
* @param len
* @throws IOException
*/
public synchronized void write(byte b[], int off, int len) throws IOException {
for (Iterator i = streams.iterator(); i.hasNext();) {
OutputStream s = (OutputStream) i.next();
s.write(b, off, len);
}
}
/**
* flush
* @throws IOException
*/
public void flush() throws IOException {
for (Iterator i = streams.iterator(); i.hasNext();) {
OutputStream s = (OutputStream) i.next();
s.flush();
}
}
/**
* close
* @throws IOException
*/
public void close() throws IOException {
for (Iterator i = streams.iterator(); i.hasNext();) {
OutputStream s = (OutputStream) i.next();
s.close();
}
streams.clear();
}
} |
package com.tuyo.namoroapp.repositories;
import com.tuyo.namoroapp.entities.ContaUsuario;
import com.tuyo.namoroapp.entities.Interesse;
import org.springframework.data.jpa.repository.JpaRepository;
public interface InteresseRepo extends JpaRepository<Interesse, Integer> {
}
|
import sys
def mmul(a,b,m):
return ((a%m)*(b%m))%m
[n,k]=[int(i) for i in sys.stdin.readline().split()]
p=[int(j) for j in sys.stdin.readline().split()]
a=[]
val=n
ans=0
for i in range(k):
ans+=val
val-=1
m=998244353
check=n-(k-1)
ways=1
ctr=-2
for g in range(n):
if(p[g]>=check):
if(ctr<0):
ctr=1
else:
ways=mmul(ways,ctr,m)
ctr=1
else:
if(ctr>0):
ctr+=1
print(ans,ways)
|
Orlando City SC's three teams finished this week with two losses, as Orlando City B had a bye week.
(Photo by Victor Tan / New Day Review)
ORLANDO, Fla. – Regarding results, Orlando City SC had itself a poor week. Both its Major League Soccer and National Women’s Soccer League sides fell in disappointing fashion while Orlando City B didn’t play a game. Both losing sides will have a chance at redemption against teams they lost to this week. The Lions will first travel to face Atlanta United FC on Saturday, and the Pride will host the Chicago Red Stars on Aug. 5. OCB will play its next game on Saturday in an away game against the No. 1 Charleston Battery.
Orlando City SC (MLS)
Record: 8-8-5, 29 points
No. Team GP Overall (W-L-T) GF GA GD Pts 1. Toronto 21 11-3-7 37 22 15 40 2. Chicago 20 11-4-5 38 21 17 38 3. NYCFC 21 11-6-4 40 27 13 37 4. Atlanta 20 10-7-3 40 27 13 33 5. NYRB 20 10-8-2 28 26 2 32 6. Columbus 21 10-10-1 31 32 -1 31 7. Orlando City 21 8-8-5 22 30 -8 29 8. Montreal 19 6-7-6 30 32 -2 24 9. Philadelphia 20 6-9-5 26 24 2 23 10. New England 20 6-9-5 33 34 -1 23 11. D.C. 21 5-13-3 18 38 -20 18
Héctor Villalba’s Late Golazo Leads Atlanta Over Orlando City
Orlando City lost its first-ever matchup with Atlanta on Friday behind an 86th-minute goal by Héctor Villalba. City center-back Jonathan Spector had two on-goal headers, one in each half, that were saved by diving efforts by Atlanta goalkeeper Brad Guzan, who was making his first MLS appearance since 2008.
Click here for the full recap.
–
Jason Kreis: Orlando City-Atlanta United Matchup Isn’t Rivalry Without Meaningful Matches
Ahead of City’s first-ever matchup with Atlanta, head coach Jason Kreis wouldn’t call the two teams’ relationship a rivalry yet. For the second-year City head coach, rivalries are born from meaningful matches. Kreis highlighted his time as a coach with Real Salt Lake and as a player with FC Dallas when he referenced two of his favorite rivalries: RSL-Colorado Rapids and Dallas-Chicago Fire.
Click here for the full story.
–
Will Johnson: Orlando City-Atlanta United Rivalry Has Begun
For midfielder Will Johnson, a rivalry between his side and Atlanta has begun. Johnson had a near one-on-one chance and a chance to equalize in the second half of a 1-0 loss to Atlanta on Friday. Kreis, on the other hand, still didn’t call it a rivalry after the loss.
Click here for the full story.
–
Training-availability Transcripts
Click here for the full transcript of media availability with Kreis, Spector and midfielder Antonio Nocerino from July 19.
Orlando Pride (NWSL)
Record: 5-6-4, 19 points
No. Team GP Overall (W-L-T) GF GA GD Pts 1. Chicago 15 8-3-4 20 14 6 28 2. North Carolina 14 9-5-0 20 13 7 27 3. Portland 15 7-4-4 19 13 6 25 4. Seattle 15 6-3-6 30 22 8 24 5. Sky Blue FC 16 7-7-2 28 28 0 23 6. Houston 15 6-7-2 16 23 -7 20 7. Orlando 15 5-6-4 24 23 1 19 8. Boston 14 3-7-5 10 16 -6 14 9. Kansas City 14 3-7-4 14 22 -8 13 10. Washington 14 3-8-3 17 24 -7 12
Pride Lose to Red Stars, Again, as Alanna Kennedy Scores 1st-ever Goal Against Chicago
Midfielder Alanna Kennedy scored her third goal of the season, the first-ever Pride goal against the Red Stars, in stoppage time. It wasn’t enough to stop Christen Press’ brace and the Red Stars’ 2-1 win on Saturday, though. Orlando’s loss now pushes them deeper into the bottom of the standings, as the Dash have skipped Orlando into sixth place with 20 points.
Click here for the full recap.
–
Camila Martins Pereira, Chioma Ubogagu Find Freedom, Opportunities with Pride
Utility player Camila Martins Pereira and forward Chioma Ubogagu each had stints with the Houston Dash. Camila played for the Texas side in 2015, and Ubogagu was with the team last season. Now with Orlando, both players are so far having career seasons in their first years with the Pride.
Click here for the full story.
–
Training-availability Transcripts
Click here for the full transcript of media availability with Sermanni, Camila and Ubogagu from July 20.
For more on Orlando City and all three of its teams, follow Victor Tan on Twitter at @NDR_VictorTan. |
package com.esri.terraformer.core;
import java.util.Arrays;
import java.util.Collection;
public final class MultiPolygon extends Geometry<Polygon> {
public static final String ERROR_PREFIX = "Error while parsing MultiPolygon: ";
/**
* A valid MultiPolygon contains 0 or more non-null {@link Polygon}'s.
*
* @param polygons
*/
public MultiPolygon(Polygon... polygons) {
addAll(Arrays.asList(polygons));
}
public MultiPolygon(int initialCapacity) {
super(initialCapacity);
}
public MultiPolygon(Collection<Polygon> c) {
super(c);
}
@Override
public GeometryType getType() {
return GeometryType.MULTIPOLYGON;
}
@Override
public boolean isValid() {
for (Polygon pg : this) {
if (pg == null || !pg.isValid()) {
return false;
}
}
return true;
}
@Override
public boolean isEquivalentTo(BaseGeometry<?> obj) {
Boolean equal = naiveEquals(this, obj);
if (equal != null) {
return equal;
}
MultiPolygon other;
try {
other = (MultiPolygon) obj;
} catch (ClassCastException e) {
return false;
}
// gotta do contains in both directions to account for duplicates that exist only on one side.
return multiPolygonContainsOther(this, other) && multiPolygonContainsOther(other, this);
}
static boolean multiPolygonContainsOther(MultiPolygon mpg1, MultiPolygon mpg2) {
for (Polygon pg : mpg1) {
if (pg == null) {
continue;
}
boolean success = false;
for (Polygon otherPg : mpg2) {
if (pg.isEquivalentTo(otherPg)) {
success = true;
break;
}
}
if (!success) {
return false;
}
}
return true;
}
}
|
/**
*
* @author Jon Hoppesch
*/
public class HL7AckTransforms {
private static final Logger LOG = Logger.getLogger(HL7AckTransforms.class);
public static final String ACK_DETAIL_TYPE_CODE_ERROR = "E";
public static final String ACK_DETAIL_TYPE_CODE_INFO = "I";
public static final String ACK_TYPE_CODE_ACCEPT = "CA";
public static final String ACK_TYPE_CODE_ERROR = "CE";
/**
* Create acknowledgment accept message from patient discovery request.
*
* @param request
* @param ackMsgText
* @return ackMsg
*/
public static MCCIIN000002UV01 createAckFrom201305(PRPAIN201305UV02 request, String ackMsgText) {
MCCIIN000002UV01 ack = new MCCIIN000002UV01();
II msgId = new II();
if (request != null) {
// Extract the message id
if (request.getId() != null) {
msgId = request.getId();
}
// Set the sender OID to the receiver OID from the original message
String senderOID = getMCCIMT000100UV01RepresentedOrganizationRootOID(request.getReceiver());
// Set the receiver OID to the sender OID from the original message
String receiverOID = getMCCIMT000100UV01RepresentedOrganizationRootOID(request.getSender());
// Create the ack message
ack = HL7AckTransforms.createAckMessage(null, msgId, ACK_TYPE_CODE_ACCEPT, ackMsgText, senderOID,
receiverOID);
}
return ack;
}
/**
* Create acknowledgment error message from patient discovery request.
*
* @param request
* @param ackMsgText
* @return ackMsg
*/
public static MCCIIN000002UV01 createAckErrorFrom201305(PRPAIN201305UV02 request, String ackMsgText) {
MCCIIN000002UV01 ack = new MCCIIN000002UV01();
II msgId = new II();
if (request != null) {
// Extract the message id
if (request.getId() != null) {
msgId = request.getId();
}
// Set the sender OID to the receiver OID from the original message
String senderOID = getMCCIMT000100UV01RepresentedOrganizationRootOID(request.getSender());
// Set the receiver OID to the sender OID from the original message
String receiverOID = senderOID;
// Create the ack message
ack = HL7AckTransforms.createAckMessage(null, msgId, ACK_TYPE_CODE_ERROR, ackMsgText, senderOID,
receiverOID);
}
return ack;
}
/**
* Create acknowledgment accept message from patient discovery response.
*
* @param request
* @param ackMsgText
* @return ackMsg
*/
public static MCCIIN000002UV01 createAckFrom201306(PRPAIN201306UV02 request, String ackMsgText) {
MCCIIN000002UV01 ack = new MCCIIN000002UV01();
II msgId = new II();
if (request != null) {
// Extract the message id
if (request.getId() != null) {
msgId = request.getId();
}
// Set the sender OID to the receiver OID from the original message
String senderOID = getMCCIMT000300UV01RepresentedOrganizationRootOID(request.getReceiver());
// Set the receiver OID to the sender OID from the original message
String receiverOID = getMCCIMT000300UV01RepresentedOrganizationRootOID(request.getSender());
// Create the ack message
ack = HL7AckTransforms.createAckMessage(null, msgId, ACK_TYPE_CODE_ACCEPT, ackMsgText, senderOID,
receiverOID);
}
return ack;
}
/**
* Create acknowledgment accept message from patient discovery response.
*
* @param request
* @param ackMsgText
* @return ackMsg
*/
public static MCCIIN000002UV01 createAckErrorFrom201306(PRPAIN201306UV02 request, String ackMsgText) {
MCCIIN000002UV01 ack = new MCCIIN000002UV01();
II msgId = new II();
if (request != null) {
// Extract the message id
if (request.getId() != null) {
msgId = request.getId();
}
// Set the sender OID to the receiver OID from the original message
String senderOID = getMCCIMT000300UV01RepresentedOrganizationRootOID(request.getReceiver());
// Set the receiver OID to the sender OID from the original message
String receiverOID = getMCCIMT000300UV01RepresentedOrganizationRootOID(request.getSender());
// Create the ack message
ack = HL7AckTransforms.createAckMessage(null, msgId, ACK_TYPE_CODE_ERROR, ackMsgText, senderOID,
receiverOID);
}
return ack;
}
/**
* Create acknowledgment message based on specific data values.
*
* @param localDeviceId
* @param origMsgId
* @param msgText
* @param senderOID
* @param receiverOID
* @return ackMsg
*/
public static MCCIIN000002UV01 createAckMessage(String localDeviceId, II origMsgId, String ackTypeCode,
String msgText, String senderOID, String receiverOID) {
MCCIIN000002UV01 ackMsg = new MCCIIN000002UV01();
// Create the Ack message header fields
ackMsg.setITSVersion(HL7Constants.ITS_VERSION);
ackMsg.setId(HL7MessageIdGenerator.GenerateHL7MessageId(localDeviceId));
ackMsg.setCreationTime(HL7DataTransformHelper.CreationTimeFactory());
ackMsg.setInteractionId(HL7DataTransformHelper.IIFactory(HL7Constants.INTERACTION_ID_ROOT, "MCCIIN000002UV01"));
ackMsg.setProcessingCode(HL7DataTransformHelper.CSFactory("T"));
ackMsg.setProcessingModeCode(HL7DataTransformHelper.CSFactory("T"));
ackMsg.setAcceptAckCode(HL7DataTransformHelper.CSFactory("NE"));
// Create the Sender
ackMsg.setSender(HL7SenderTransforms.createMCCIMT000200UV01Sender(senderOID));
// Create the Receiver
ackMsg.getReceiver().add(HL7ReceiverTransforms.createMCCIMT000200UV01Receiver(receiverOID));
// Create Acknowledgment section if an original message id or message text was specified
if (NullChecker.isNotNullish(msgText)
|| (origMsgId != null && NullChecker.isNotNullish(origMsgId.getRoot()) && NullChecker
.isNotNullish(origMsgId.getExtension()))) {
LOG.debug("Adding Acknowledgement Section");
ackMsg.getAcknowledgement().add(createAcknowledgement(origMsgId, ackTypeCode, msgText));
}
return ackMsg;
}
/**
* Create acknowledgment element based on specific data values.
*
* @param msgId
* @param msgText
* @return ack
*/
public static MCCIMT000200UV01Acknowledgement createAcknowledgement(II msgId, String ackTypeCode, String msgText) {
MCCIMT000200UV01Acknowledgement ack = new MCCIMT000200UV01Acknowledgement();
ack.setTypeCode(HL7DataTransformHelper.CSFactory(ackTypeCode));
if (msgId != null) {
ack.setTargetMessage(createTargetMessage(msgId));
}
if (msgText != null) {
ack.getAcknowledgementDetail().add(createAckDetail(ackTypeCode, msgText));
}
return ack;
}
/**
* Create targetMessage element based on specific data values.
*
* @param msgId
* @return targetMsg
*/
public static MCCIMT000200UV01TargetMessage createTargetMessage(II msgId) {
MCCIMT000200UV01TargetMessage targetMsg = new MCCIMT000200UV01TargetMessage();
if (msgId != null) {
LOG.debug("Setting original message id, root: " + msgId.getRoot() + ", extension: " + msgId.getExtension());
targetMsg.setId(msgId);
}
return targetMsg;
}
/**
* Create acknowledgementDetail element based on specific data values.
*
* @param msgText
* @return ackDetail
*/
public static MCCIMT000200UV01AcknowledgementDetail createAckDetail(String ackTypeCode, String msgText) {
MCCIMT000200UV01AcknowledgementDetail ackDetail = new MCCIMT000200UV01AcknowledgementDetail();
if (ackTypeCode.equals(ACK_TYPE_CODE_ERROR)) {
// Set the acknowledge detail type code as an error
ackDetail.setTypeCode(AcknowledgementDetailType.fromValue(ACK_DETAIL_TYPE_CODE_ERROR));
// Set the acknowledge detail code as an internal error
CE ceCode = new CE();
ceCode.setCode("INTERR");
ceCode.setCodeSystem("2.16.840.1.113883.5.1100");
ceCode.setCodeSystemName("AcknowledgementDetailCode");
ceCode.setDisplayName("Internal error");
ackDetail.setCode(ceCode);
} else {
// Set the acknowledge detail type code as an info
ackDetail.setTypeCode(AcknowledgementDetailType.fromValue(ACK_DETAIL_TYPE_CODE_INFO));
}
if (NullChecker.isNotNullish(msgText)) {
// Set the acknowledge message text
EDExplicit msg = new EDExplicit();
LOG.debug("Setting ack message text: " + msgText);
msg.getContent().add(msgText);
ackDetail.setText(msg);
}
return ackDetail;
}
private static String getMCCIMT000100UV01RepresentedOrganizationRootOID(List<MCCIMT000100UV01Receiver> receiverList) {
String root = null;
if (NullChecker.isNotNullish(receiverList)
&& receiverList.get(0) != null
&& receiverList.get(0).getDevice() != null
&& receiverList.get(0).getDevice().getAsAgent() != null
&& receiverList.get(0).getDevice().getAsAgent().getValue() != null
&& receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization() != null
&& receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue() != null
&& NullChecker.isNotNullish(receiverList.get(0).getDevice().getAsAgent().getValue()
.getRepresentedOrganization().getValue().getId())
&& receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue()
.getId().get(0) != null
&& NullChecker.isNotNullish(receiverList.get(0).getDevice().getAsAgent().getValue()
.getRepresentedOrganization().getValue().getId().get(0).getRoot())) {
root = receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue()
.getId().get(0).getRoot();
}
return root;
}
private static String getMCCIMT000100UV01RepresentedOrganizationRootOID(MCCIMT000100UV01Sender sender) {
String root = null;
if (sender != null
&& sender.getDevice() != null
&& sender.getDevice().getAsAgent() != null
&& sender.getDevice().getAsAgent().getValue() != null
&& sender.getDevice().getAsAgent().getValue().getRepresentedOrganization() != null
&& sender.getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue() != null
&& NullChecker.isNotNullish(sender.getDevice().getAsAgent().getValue().getRepresentedOrganization()
.getValue().getId())
&& sender.getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue().getId().get(0) != null
&& NullChecker.isNotNullish(sender.getDevice().getAsAgent().getValue().getRepresentedOrganization()
.getValue().getId().get(0).getRoot())) {
root = sender.getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue().getId().get(0)
.getRoot();
}
return root;
}
private static String getMCCIMT000300UV01RepresentedOrganizationRootOID(List<MCCIMT000300UV01Receiver> receiverList) {
String root = null;
if (NullChecker.isNotNullish(receiverList)
&& receiverList.get(0) != null
&& receiverList.get(0).getDevice() != null
&& receiverList.get(0).getDevice().getAsAgent() != null
&& receiverList.get(0).getDevice().getAsAgent().getValue() != null
&& receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization() != null
&& receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue() != null
&& NullChecker.isNotNullish(receiverList.get(0).getDevice().getAsAgent().getValue()
.getRepresentedOrganization().getValue().getId())
&& receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue()
.getId().get(0) != null
&& NullChecker.isNotNullish(receiverList.get(0).getDevice().getAsAgent().getValue()
.getRepresentedOrganization().getValue().getId().get(0).getRoot())) {
root = receiverList.get(0).getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue()
.getId().get(0).getRoot();
}
return root;
}
private static String getMCCIMT000300UV01RepresentedOrganizationRootOID(MCCIMT000300UV01Sender sender) {
String root = null;
if (sender != null
&& sender.getDevice() != null
&& sender.getDevice().getAsAgent() != null
&& sender.getDevice().getAsAgent().getValue() != null
&& sender.getDevice().getAsAgent().getValue().getRepresentedOrganization() != null
&& sender.getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue() != null
&& NullChecker.isNotNullish(sender.getDevice().getAsAgent().getValue().getRepresentedOrganization()
.getValue().getId())
&& sender.getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue()
.getId().get(0) != null
&& NullChecker.isNotNullish(sender.getDevice().getAsAgent().getValue().getRepresentedOrganization()
.getValue().getId().get(0).getRoot())) {
root = sender.getDevice().getAsAgent().getValue().getRepresentedOrganization().getValue().getId().get(0)
.getRoot();
}
return root;
}
} |
/// Receive message head from buffer. Returns following message kind and length.
pub fn recv_message_head(
&mut self,
rdsrc: &mut impl std::io::Read,
) -> Result<(message::BgpMessageType, usize), BgpError> {
let mut buf = [0 as u8; 19];
rdsrc.read_exact(&mut buf)?;
self.decode_message_head(&buf)
} |
<reponame>zainkai/LeetCodeChallenges<gh_stars>1-10
import "fmt"
type MaxStack struct {
data []int
maxData []int
}
/** initialize your data structure here. */
func Constructor() MaxStack {
return MaxStack{}
}
func (this *MaxStack) Push(x int) {
this.data = append(this.data, x)
if len(this.maxData) == 0 {
this.maxData = append(this.maxData, x)
return
}
max := this.data[len(this.data)-1]
if maxTemp := this.maxData[len(this.maxData)-1]; maxTemp > max {
max = maxTemp
}
this.maxData = append(this.maxData, max)
}
func (this *MaxStack) Pop() int {
top := this.Top()
this.data = this.data[:len(this.data)-1]
this.maxData = this.maxData[:len(this.maxData)-1]
return top
}
func (this *MaxStack) Top() int {
return this.data[len(this.data)-1]
}
func (this *MaxStack) PeekMax() int {
fmt.Println(this.maxData[len(this.maxData)-1])
return this.maxData[len(this.maxData)-1]
}
func (this *MaxStack) PopMax() int {
max := this.PeekMax()
stk := []int{}
for this.Top() != max {
stk = append(stk, this.Pop())
}
this.Pop()
for i := len(stk) - 1; i >= 0; i-- {
this.Push(stk[i])
}
return max
}
/**
* Your MaxStack object will be instantiated and called as such:
* obj := Constructor();
* obj.Push(x);
* param_2 := obj.Pop();
* param_3 := obj.Top();
* param_4 := obj.PeekMax();
* param_5 := obj.PopMax();
*/ |
def add_noise(
self,
simulation_df: pd.DataFrame,
noise_scaling_factor: float = 1,
**kwargs
) -> pd.DataFrame:
simulation_df_with_noise = simulation_df.copy()
simulation_df_with_noise[petab.C.MEASUREMENT] = [
sample_noise(
self.petab_problem,
row,
row[petab.C.MEASUREMENT],
self.noise_formulas,
self.rng,
noise_scaling_factor,
**kwargs,
)
for _, row in simulation_df_with_noise.iterrows()
]
return simulation_df_with_noise |
When singer-songwriter Mac DeMarco found out that his new album "This Old Dog" had leaked three weeks before its official release, he did something unusual. Instead of complaining, he actively encouraged fans to download a free copy from The Pirate Bay, Soulseek, or even long defunct pirate classics such as Napster, Limewire, and Kazaa.
“Piracy is killing the music industry” is a phrase we’ve been hearing from industry execs for many years now.
So in that regard, it can be quite refreshing to hear a different perspective from someone whose livelihood depends on music.
This is exactly what happened at the Coachella Valley Music and Arts Festival last Friday.
During his set, singer-songwriter Mac DeMarco told the crowd that his latest album “This Old Dog” had leaked online. That’s not insignificant, as it’s nearly three weeks before the official May 5 release date.
However, instead of begging fans to wait for the official release to come out, DeMarco said that he didn’t give a shit and encouraged them to download it from pirate sites.
“We’re going to play a song we’ve only played twice before. It’s a new song, came out a couple of days ago. But you know what? The album leaked yesterday, so I don’t give a shit anymore.”
“Download it. Pirate Bay, Torrents.to, Soulseek, Napster, Limewire, Kazaa. Just get it, just get it,” DeMarco added.
Pirate Bay, Torrents.to, Soulseek, Nepster, Limewire, Kazaa…
The comments are noteworthy since artists don’t regularly encourage fans to get their work on The Pirate Bay, for free. However, the sites and services that the singer-songwriter mentioned are also worth highlighting.
It appears that Mac DeMarco hasn’t been actively participating in the piracy scene recently as the references are a bit dated, to say the least.
The original Napster application ceased to operate in 2001, when Demarco was 11-years-old, and Kazaa and Limewire followed a few years later. Even Torrents.to is no longer operational from its original domain name.
The only two options that remain are The Pirate Bay and Soulseek, which are both icons in the file-sharing world. Perhaps it’s time for this old dog to learn some new tricks?
Despite the active “promo,” thus far interest in the leaked album is rather modest. The torrent on The Pirate Bay has roughly 100 people sharing it at the time of writing, and that’s the most popular one we’ve seen.
Or could it be that some fans just gave up after they tried to get outdated and malware infested copies of Kazaa and Limewire up and running?
Download… |
<reponame>xsjames/vsf
/*****************************************************************************
* Copyright(C)2009-2019 by VSF Team *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* *
****************************************************************************/
/*============================ INCLUDES ======================================*/
#define __VSF_DISP_CLASS_INHERIT
#include "vsf.h"
#if VSF_USE_UI == ENABLED && VSF_USE_TINY_GUI == ENABLED
#include "./images/demo_images.h"
#include "./fonts/demo_font.h"
/*============================ MACROS ========================================*/
/*============================ MACROFIED FUNCTIONS ===========================*/
/*============================ TYPES =========================================*/
/*============================ GLOBAL VARIABLES ==============================*/
/*============================ LOCAL VARIABLES ===============================*/
extern void vsf_tgui_demo_on_ready(void);
/*============================ PROTOTYPES ====================================*/
/*============================ IMPLEMENTATION ================================*/
static vsf_tgui_color_t vsf_tgui_get_pixel(vsf_tgui_location_t* ptLocation)
{
vsf_tgui_sv_color_t tColor = VSF_TGUI_COLOR_BLACK;
return tColor.tColor;
}
static void vsf_tgui_set_pixel(vsf_tgui_location_t* ptLocation, vsf_tgui_color_t tColor)
{
}
static uint8_t vsf_tgui_font_get_pixel_color(void* ptFont, uint32_t wChar, vsf_tgui_location_t* ptLocation)
{
VSF_TGUI_ASSERT(ptLocation != NULL);
return 0x55;
}
uint8_t vsf_tgui_proportional_font_get_char_width(const vsf_tgui_font_t* ptFont, uint32_t wChar)
{
return 55;
}
/********************************************************************************/
void vsf_tgui_draw_rect(vsf_tgui_location_t* ptLocation, vsf_tgui_size_t* ptSize, vsf_tgui_color_t tRectColor)
{
VSF_TGUI_ASSERT(ptLocation != NULL);
VSF_TGUI_ASSERT((0 <= ptLocation->iX) && (ptLocation->iX < VSF_TGUI_HOR_MAX)); // x_start point in screen
VSF_TGUI_ASSERT((0 <= ptLocation->iY) && (ptLocation->iY < VSF_TGUI_VER_MAX)); // y_start point in screen
VSF_TGUI_ASSERT(0 <= (ptLocation->iX + ptSize->iWidth)); // x_end point in screen
VSF_TGUI_ASSERT((ptLocation->iX + ptSize->iWidth) <= VSF_TGUI_HOR_MAX);
VSF_TGUI_ASSERT(0 <= (ptLocation->iY + ptSize->iHeight)); // y_end point in screen
VSF_TGUI_ASSERT((ptLocation->iY + ptSize->iHeight) <= VSF_TGUI_VER_MAX);
for (uint16_t i = 0; i < ptSize->iHeight; i++) {
for (uint16_t j = 0; j < ptSize->iWidth; j++) {
vsf_tgui_location_t tPixelLocation = { .iX = ptLocation->iX + j, .iY = ptLocation->iY + i };
vsf_tgui_color_t tPixelColor = vsf_tgui_get_pixel(&tPixelLocation);
tPixelColor = vsf_tgui_color_mix(tRectColor, tPixelColor, tRectColor.tChannel.chA);
vsf_tgui_set_pixel(&tPixelLocation, tPixelColor);
}
}
}
void vsf_tgui_draw_root_tile(vsf_tgui_location_t* ptLocation,
vsf_tgui_location_t* ptTileLocation,
vsf_tgui_size_t* ptSize,
const vsf_tgui_tile_t* ptTile)
{
VSF_TGUI_ASSERT(ptLocation != NULL);
VSF_TGUI_ASSERT(ptTileLocation != NULL);
VSF_TGUI_ASSERT(ptSize != NULL);
VSF_TGUI_ASSERT(ptTile != NULL);
VSF_TGUI_ASSERT((0 <= ptLocation->iX) && (ptLocation->iX < VSF_TGUI_HOR_MAX)); // x_start point in screen
VSF_TGUI_ASSERT((0 <= ptLocation->iY) && (ptLocation->iY < VSF_TGUI_VER_MAX)); // y_start point in screen
VSF_TGUI_ASSERT(0 <= (ptLocation->iX + ptSize->iWidth)); // x_end point in screen
VSF_TGUI_ASSERT((ptLocation->iX + ptSize->iWidth) <= VSF_TGUI_HOR_MAX);
VSF_TGUI_ASSERT(0 <= (ptLocation->iY + ptSize->iHeight)); // y_end point in screen
VSF_TGUI_ASSERT((ptLocation->iY + ptSize->iHeight) <= VSF_TGUI_VER_MAX);
VSF_TGUI_ASSERT(vsf_tgui_tile_is_root(ptTile));
vsf_tgui_size_t tTileSize = vsf_tgui_root_tile_get_size(ptTile);
VSF_TGUI_ASSERT(ptTileLocation->iX < tTileSize.iWidth);
VSF_TGUI_ASSERT(ptTileLocation->iY < tTileSize.iHeight);
const vsf_tgui_tile_buf_root_t* ptBufTile = &ptTile->tBufRoot;
const vsf_tgui_tile_core_t* ptCoreTile = &ptBufTile->use_as__vsf_tgui_tile_core_t;
uint32_t u32_size = (ptCoreTile->Attribute.u3ColorSize == VSF_TGUI_COLOR_ARGB_8888) ? 4 : 3;
vsf_tgui_region_t tDisplay;
tDisplay.tLocation = *ptLocation;
tDisplay.tSize.iWidth = min(ptSize->iWidth, tTileSize.iWidth - ptTileLocation->iX);
tDisplay.tSize.iHeight = min(ptSize->iHeight, tTileSize.iHeight - ptTileLocation->iY);
if (tDisplay.tSize.iHeight <= 0 || tDisplay.tSize.iWidth <= 0) {
return ;
}
for (uint16_t i = 0; i < tDisplay.tSize.iHeight; i++) {
uint32_t u32_offset = u32_size * ((ptTileLocation->iY + i) * tTileSize.iWidth + ptTileLocation->iX);
const char* pchData = (const char *)ptBufTile->ptBitmap;
pchData += u32_offset;
for (uint16_t j = 0; j < tDisplay.tSize.iWidth; j++) {
vsf_tgui_location_t tPixelLocation = { .iX = tDisplay.tLocation.iX + j, .iY = tDisplay.tLocation.iY + i };
vsf_tgui_color_t tTileColor;
tTileColor.tChannel.chR = *pchData++;
tTileColor.tChannel.chG = *pchData++;
tTileColor.tChannel.chB = *pchData++;
if (ptCoreTile->Attribute.u3ColorSize == VSF_TGUI_COLOR_ARGB_8888) {
tTileColor.tChannel.chA = *pchData++;
vsf_tgui_color_t tPixelColor = vsf_tgui_get_pixel(&tPixelLocation);
tPixelColor = vsf_tgui_color_mix(tTileColor, tPixelColor, tTileColor.tChannel.chA);
vsf_tgui_set_pixel(&tPixelLocation, tPixelColor);
} else if (ptCoreTile->Attribute.u3ColorSize == VSF_TGUI_COLOR_RGB8_USER_TEMPLATE) {
tTileColor.tChannel.chA = 0xFF;
vsf_tgui_set_pixel(&tPixelLocation, tTileColor);
}
}
}
}
void vsf_tgui_draw_char(vsf_tgui_location_t* ptLocation, vsf_tgui_location_t* ptFontLocation, vsf_tgui_size_t* ptSize, uint32_t wChar, vsf_tgui_color_t tCharColor)
{
VSF_TGUI_ASSERT(ptLocation != NULL);
VSF_TGUI_ASSERT(ptFontLocation != NULL);
VSF_TGUI_ASSERT(ptSize != NULL);
for (uint16_t j = 0; j < ptSize->iHeight; j++) {
for (uint16_t i = 0; i < ptSize->iWidth; i++) {
vsf_tgui_location_t tFontLocation = { .iX = ptFontLocation->iX + i, .iY = ptFontLocation->iY + j };
vsf_tgui_location_t tPixelLocation = { .iX = ptLocation->iX + i, .iY = ptLocation->iY + j };
uint8_t mix = vsf_tgui_font_get_pixel_color(NULL, wChar, &tFontLocation);
vsf_tgui_color_t tPixelColor = vsf_tgui_get_pixel(&tPixelLocation);
tPixelColor = vsf_tgui_color_mix(tCharColor, tPixelColor, mix);
vsf_tgui_set_pixel(&tPixelLocation, tPixelColor);
}
}
}
const vsf_tgui_sv_container_corner_tiles_t g_tContainerCornerTiles = {
.tTopLeft = {
.tChild = {
.parent_ptr = (vsf_tgui_tile_core_t *)&bg1_RGB,
.tSize = {.iWidth = 12, .iHeight = 12, },
.tLocation = {.iX = 0, .iY = 0},
},
},
.tTopRight = {
.tChild = {
.tSize = {.iWidth = 12, .iHeight = 12, },
.parent_ptr = (vsf_tgui_tile_core_t *)&bg1_RGB,
.tLocation = {.iX = 200 - 12, .iY = 0},
},
},
.tBottomLeft = {
.tChild = {
.tSize = {.iWidth = 12, .iHeight = 12, },
.parent_ptr = (vsf_tgui_tile_core_t *)&bg1_RGB,
.tLocation = {.iX = 0, .iY = 200 - 12},
},
},
.tBottomRight = {
.tChild = {
.tSize = {.iWidth = 12, .iHeight = 12, },
.parent_ptr = (vsf_tgui_tile_core_t *)&bg1_RGB,
.tLocation = {.iX = 200-12, .iY = 200-12},
},
},
};
const vsf_tgui_sv_label_tiles_t c_tLabelAdditionalTiles = {
.tLeft = {
.tChild = {
.tSize = {.iWidth = 16, .iHeight = 32, },
.parent_ptr = (vsf_tgui_tile_core_t *)&bg3_RGB,
.tLocation = {.iX = 0, .iY = 0},
},
},
.tRight = {
.tChild = {
.tSize = {.iWidth = 16, .iHeight = 32, },
.parent_ptr = (vsf_tgui_tile_core_t *)&bg3_RGB,
.tLocation = {.iX = 16, .iY = 0},
},
},
};
/**********************************************************************************/
/*! \brief begin a refresh loop
*! \param gui_ptr the tgui object address
*! \param ptPlannedRefreshRegion the planned refresh region
*! \retval NULL No need to refresh (or rendering service is not ready)
*! \retval !NULL The actual refresh region
*/
vsf_tgui_region_t *vsf_tgui_v_refresh_loop_begin(
vsf_tgui_t *gui_ptr,
const vsf_tgui_region_t *ptPlannedRefreshRegion)
{
return (vsf_tgui_region_t *)ptPlannedRefreshRegion;
}
volatile static bool s_bIsReadyToRefresh = true;
bool vsf_tgui_v_refresh_loop_end(vsf_tgui_t* gui_ptr)
{
vk_disp_area_t area = {
.pos = {.x = 0, .y = 0},
.size = {.x = VSF_TGUI_HOR_MAX, .y = VSF_TGUI_VER_MAX},
};
__vsf_sched_safe(
if (s_bIsReadyToRefresh) {
s_bIsReadyToRefresh = false;
// todo: refresh
}
)
return false;
}
static void vsf_tgui_on_ready(vk_disp_t* disp)
{
__vsf_sched_safe(
if (!s_bIsReadyToRefresh) {
s_bIsReadyToRefresh = true;
}
vsf_tgui_demo_on_ready();
)
}
bool vsf_tgui_port_is_ready_to_refresh(void)
{
return s_bIsReadyToRefresh;
}
#endif
/* EOF */
|
def StartVim(self,args=[]):
if not self.vim.IsRunning():
self.vim.Start(args)
else:
raise VimgdbError("Vim server already started.") |
Saosin - Along The Shadow Vinyl LP Hot Topic Exclusive is rated 5.0 out of 5 by 3 .
Rated 5 out of 5 by Ohitsjustjohnny_ from Love this album. I’m hella bummed Hot Topic doesn’t advertise their vinyl exclusives because I love this variant so much. I’ve bought this record three times now, just collecting all the different pressings, but I probably would have only bought it once if someone told me the HT Exclusive was a gorgeous purple & clear smash. They could really kill the vinyl game if HT advertised it better. Nerds like me eat these limited exclusives up.
Rated 5 out of 5 by Sims88 from Great album Anything with Anthony green is gold and getting back with Saosin to drop a full length album amazing hope they release more in the future |
<reponame>manbuyun/starrocks<gh_stars>1-10
// This file is licensed under the Elastic License 2.0. Copyright 2021-present, StarRocks Limited.
#pragma once
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <vector>
#include "storage/olap_common.h"
#include "storage/rowset/rowset.h"
#include "storage/tablet.h"
#include "util/threadpool.h"
namespace starrocks {
class DataDir;
class CompactionTask;
class CompactionCandidate;
// to schedule tablet to run compaction task
class CompactionScheduler {
public:
CompactionScheduler();
~CompactionScheduler() = default;
void schedule();
void notify();
private:
// wait until current running tasks are below max_concurrent_num
void _wait_to_run();
bool _can_schedule_next();
std::shared_ptr<CompactionTask> _try_get_next_compaction_task();
bool _can_do_compaction(const CompactionCandidate& candidate, bool* need_reschedule,
std::shared_ptr<CompactionTask>* compaction_task);
// if check fails, should not reschedule the tablet
bool _check_precondition(const CompactionCandidate& candidate);
bool _can_do_compaction_task(Tablet* tablet, CompactionTask* compaction_task);
private:
std::unique_ptr<ThreadPool> _compaction_pool;
std::mutex _mutex;
std::condition_variable _cv;
uint64_t _round;
};
} // namespace starrocks
|
// GetAddr returns ethernet address of the tap device whose name is 'name'
func GetAddr(name string) ([]byte, error) {
addr, err := SIOCGIFHWADDR(name)
if err != nil {
return nil, err
}
return addr, nil
} |
Israeli Jewish settlers from the illegal settlements of Salit and Miskiot in the Jordan Valley occupied wide swathes of Palestinian lands, Arab48.com reported yesterday.
Mutaz Bsharat, responsible for assessing settler activity in the West Bank city of Tubas, said that the settlers from Salit and Miskiot “seized more than 300 dunams [about 75 acres] from Himat Khilla area during the past two days,” noting that they laid down foundations for barracks and settlement units in addition to installing water pipes.
On Wednesday, Israeli Prime Minister Benjamin Netanyahu announced that his government will join a project carried out by El-Ad to search for Jewish remains in the rubble extracted from under the Al-Aqsa Mosque 17 years ago.
Netanyahu’s announcement was made during a speech he delivered at the inauguration ceremony of the Antiquities Village in Jerusalem, near the Museum of Israel.
Meanwhile, Netanyahu attacked UNESCO’s decision last week which denied Jewish links to the Al-Aqsa Mosque and its Western Wall.
He claimed that Israel is the sole place where Mosques and Churches are not being demolished by extremists, ignoring the systematic destruction of hundreds of mosques, the partition of the Al-Ibrahimi Mosque in Hebron and closing it and other mosques from time to time. |
ST. LOUIS, MI -- One day in 1974, Jane-Ann Nyerges got home from school and found her mother sitting at the kitchen table, crying. The family lived on a farm near Remus where they raised livestock and Nyerges had thought it odd the chicken coop was empty as she walked up the driveway.
Inside, she learned why.
"Some men came and killed them all with baseball bats because they were sick," her mother said. "And we're sick, too."
That was 42 years ago. In the decades since, Nyerges suffered 10 miscarriages and multiple ectopic pregnancies that she blames on exposure to polybrominated biphenyl, or PBB, an insidious poison she and 9 million other Michiganders ingested in 1973 and 1974, when a distribution snafu at a Gratiot County chemical plant contaminated the state food supply.
That colossal screw-up -- accidentally switching a ton of Nutrimaster, a cattle feed supplement that boosted a cow's milk supply, with Firemaster, a toxic flame retardant -- caused one of the largest chemical poisonings in the western world. Researchers say that many in Michigan still have elevated blood PBB levels from consuming contaminated eggs, milk, butter, cheese and meat.
Six in 10 people tested for PBB in Michigan today -- including some born after the disaster -- have levels above the national average, according to researchers at Emory University in Georgia. Because PBB lives for decades in body fat and mimics the effects of estrogen, people directly or indirectly exposed have become prone to reproductive health issues and thyroid problems.
"Among the people in Michigan, a vast majority are above the national average," said Michele Marcus, an Emory public health professor who has lead the research into the long-term effects of PBB for the past 15 years.
Without stable funding, Michigan Superfund cleanups creep along EPA doles out money to the "worst first."
Because PBB stores in fat, exposed mothers unwittingly passed it to children in breast milk. Emory researchers say daughters of women exposed after 1973 generally experienced their first menstruation a year earlier than normal, ended up shorter in stature as adults and have suffered increased pregnancy risks.
The rate of miscarriages among daughters of mothers exposed to high PPB levels was as high as 35 percent, Marcus said.
In male children, researchers found more genital abnormalities and other urinary and genital system problems. Exposed boys also matured more slowly.
Epigenetic researchers are studying whether PBB effects were also passed down from fathers exposed to the chemical and how the widespread contamination has affected the grandchildren of Michiganders exposed in the 1970s.
The long-term health consequences are poorly understood by doctors, which Marcus said has probably resulted in misdiagnosed illness causes.
"If these affects are being transmitted to future generations, it changes everything," she said. "It's a whole different equation."
"It's very important to understand if that's really happening."
Emory University graphic showing elevated PBB blood levels in Michigan.
Where did this stuff come from?
The PBBs came from Michigan Chemical Corp., a subsidiary of Velsicol Chemical, which operated a 52-acre plant on a bend in the Pine River in St. Louis; a town near the state's geographic center once known for its pure mineral spring water.
Today, St. Louis is known mostly for its toxic legacy -- a chemical past that scars the very land where the plant once stood. The factory was leveled after Veliscol closed up following investigation into the PBB incident, but the property has since become one of the largest and costliest Superfund sites in the country.
Under the grass, beneath a clay cap behind a chain link fence, are huge amounts of hazardous chemicals in concentrations that could pose a major health risk to humans and wildlife. Total cleanup is estimated to cost between $300 million and half a billion, paid for almost entirely by taxpayers.
"It's essentially going to be there forever," said Jane Keon, a local resident and founding member of the Pine River Superfund Citizen Task Force, which has advocated for cleanup and research on behalf of those exposed to Velsicol's PBB.
Although Velsicol exported its infamous contamination to nearly every refrigerator in Michigan, the company also did a number on the community that supplied its workforce. Production of PBB caused so much dust pollution that Velsicol moved its research lab to Alma College to find clean space to work. On the ecological end, natural topography meant the Pine River received the brunt of toxic drainage and erosion from the factory.
An aerial photo of the former Velsicol Chemical Co. factory along the Pine River in St. Louis
"It was a dangerous, dirty place to work and Velsicol was not interested in addressing any of those concerns," said Marcus Cheatham, officer at the Mid-Michigan Health Department.
After the strange livestock plague was traced to Velsicol PBBs, the company "cleaned up" by dumping waste in a pit across the river and the county landfill; both of which are now their own separate Superfund sites.
When the factory closed in 1978, workers had trouble landing new jobs because employers were leery of their exposure to hazardous chemicals. Velsicol, the main employer in St. Louis, destroyed not only the town's health and environment, but its economy and reputation as well.
Years of silent spring in St. Louis
While the company was going bankrupt in 1982, Veliscol and the U.S. Environmental Protection Agency signed an agreement that provided $20 million for cleanup, a pittance of the eventual cost. Nonetheless, a slurry wall was built around the perimeter and a cap was placed over the site. But there was nothing underneath to keep the buried chemicals out of the local water.
The encapsulation proved to be an utter failure. The slurry wall leaks in major areas and the EPA had to install a groundwater trench around the site that collects 20,000 gallons a week, which is shipped to Detroit for treatment.
In 1999, a 10-year, $100 million project began to remove 750,000 cubic yards of contaminated sediment from 36-acres of the riverbed across from the site.
Last summer, the EPA began digging up yards and replacing soil in the 9-block neighborhood bordering the site. Crews hauled away 50,000 tons of soil contaminated mostly with dichloro-diphenyl-trichloroethane, or DDT, a pesticide once widely used to control malaria, typhus, and the other insect-borne diseases.
Today, DDT is classified as a probable carcinogen and its use was banned after Rachel Carson's book "Silent Spring" exposed the hazard it posed to birds.
In 2014, a field study by a Michigan State University toxicologist determined that years of unexplained local bird deaths were being caused by eating worms and insects poisoned by soil filled with DDT, which was discovered in some of the highest tissue concentrations ever recorded in wild birds.
Bridgett Davis, whose North Street front door faces the fence, has seen many dead birds throughout her years in the neighborhood, including one in April. Last year, the EPA replaced her soil. Now, she says the buried plastic liner causes water to pool in neighborhood yards when it rains.
Her husband's grandfather worked at Velsicol. The young couple has two kids, an infant girl and a two-year-old boy. The neighborhood is inexpensive, she said.
Despite the contamination, "it's nice not having people across the street."
Among the older residents of St. Louis, the toxic legacy has become a fact of life. Many of their bodies are contaminated. Some are angered by it. Others shrug.
Everyone is happy to finally be drinking clean water, though. In October, St. Louis and nearby townships began tapping Alma for water after completing a $40 million project sparked by the 2005 discovery that St. Louis water was contaminated with para-chlorobenzene sulfonic acid, (p-CBSA) a DDT byproduct.
Redevelopment a distant hope
Taxpayers are funding the work now. Velsicol shed its liability for St. Louis long ago. The company is now owned by Arsenal Capital, a private equity firm. Chemtura Corp. now owns the patent on Firemaster, which is made with a slightly different chemical composition.
More St. Louis yard dig-ups are scheduled this year. The EPA plans to start in-situ thermal treatment (sticking electrified rods into the ground to boil off chemicals) at some point. Work on that could happen this year if EPA higher-ups allocate money for construction.
The city hopes to redevelop the site someday. A 12-year-old reuse plan on the city website includes designs for a park, amphitheater, sport fields and a commemorative area. Whether any of that comes to pass depends on time and money. The plans include the inevitable water treatment plant, a perpetual multi-million dollar annual expense.
The Pine River is there, but it's also not there. People fish, but throw their catch back. Signs between the Alma dam and the confluence with the Chippewa River 36 miles downstream warn about toxic fish. At Penny Park, directly across the river from the plant site, "No Swimming" signs warn people about the water.
Toxicologists are still studying exactly how much Velsicol contamination is getting into fish and animals downstream of St. Louis.
In the 1980s, instead of a typical Superfund sign, a judge ordered the property feature a marker that would last 300 years. Since then, carved in granite on a tombstone near the entrance were the words "Warning, Do Not Enter." After years of coaxing, the government allowed St. Louis to move the tombstone to the local historical society in 2013. Today, instead of a symbol of death, a granite bench declares the city's intent to someday reclaim the land for public use.
"We're so anxious for the plant site to start getting cleaned up," said Keon.
"We know it's going to take at least 20 years and some of us aren't spring chickens anymore."
Garret Ellison covers government, environment & the Great Lakes for MLive.com. Email him at [email protected] or follow on Twitter & Instagram |
// Copyright 2021 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "shill/vpn/vpn_connection.h"
#include <map>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <base/callback.h>
#include <base/location.h>
#include <base/logging.h>
#include <base/strings/string_util.h>
#include <base/task/post_task.h>
namespace shill {
namespace {
std::string StateToString(VPNConnection::State state) {
switch (state) {
case VPNConnection::State::kIdle:
return "Idle";
case VPNConnection::State::kConnecting:
return "Connecting";
case VPNConnection::State::kConnected:
return "Connected";
case VPNConnection::State::kDisconnecting:
return "Disconnecting";
case VPNConnection::State::kStopped:
return "Stopped";
default:
NOTREACHED();
}
}
// Checks if |current_state| is in |allowed_states|, if not, crashes (in the
// debug environment) or leaves a log.
void CheckCallWithState(const std::string& call,
VPNConnection::State current_state,
std::set<VPNConnection::State> allowed_states) {
if (allowed_states.find(current_state) != allowed_states.end()) {
return;
}
std::vector<std::string> state_names;
for (const auto state : allowed_states) {
state_names.push_back(StateToString(state));
}
LOG(DFATAL) << call << " should only be called if the state is in {"
<< base::JoinString(state_names, ",")
<< "}, but current state is " << current_state;
}
} // namespace
std::ostream& operator<<(std::ostream& stream,
const VPNConnection::State& state) {
return stream << StateToString(state);
}
VPNConnection::VPNConnection(std::unique_ptr<Callbacks> callbacks,
EventDispatcher* dispatcher)
: callbacks_(std::move(callbacks)),
state_(State::kIdle),
dispatcher_(dispatcher) {}
void VPNConnection::Connect() {
CheckCallWithState(__func__, state_, {State::kIdle});
state_ = State::kConnecting;
dispatcher_->PostTask(FROM_HERE, base::BindOnce(&VPNConnection::OnConnect,
weak_factory_.GetWeakPtr()));
}
void VPNConnection::Disconnect() {
CheckCallWithState(__func__, state_, {State::kConnecting, State::kConnected});
state_ = State::kDisconnecting;
dispatcher_->PostTask(FROM_HERE, base::BindOnce(&VPNConnection::OnDisconnect,
weak_factory_.GetWeakPtr()));
}
void VPNConnection::ResetCallbacks(std::unique_ptr<Callbacks> callbacks) {
callbacks_ = std::move(callbacks);
}
bool VPNConnection::IsConnectingOrConnected() const {
return state_ == State::kConnecting || state_ == State::kConnected;
}
void VPNConnection::NotifyConnected(const std::string& link_name,
int interface_index,
const IPConfig::Properties& ip_properties) {
CheckCallWithState(__func__, state_, {State::kConnecting});
state_ = State::kConnected;
dispatcher_->PostTask(FROM_HERE,
base::BindOnce(callbacks_->on_connected_cb, link_name,
interface_index, ip_properties));
}
void VPNConnection::NotifyFailure(Service::ConnectFailure reason,
const std::string& detail) {
CheckCallWithState(
__func__, state_,
{State::kConnecting, State::kConnected, State::kDisconnecting});
LOG(ERROR) << "VPN connection failed, current state: " << state_
<< ", reason: " << Service::ConnectFailureToString(reason)
<< ", detail: " << detail;
state_ = State::kDisconnecting;
dispatcher_->PostTask(
FROM_HERE, base::BindOnce(std::move(callbacks_->on_failure_cb), reason));
dispatcher_->PostTask(FROM_HERE, base::BindOnce(&VPNConnection::OnDisconnect,
weak_factory_.GetWeakPtr()));
}
void VPNConnection::NotifyStopped() {
CheckCallWithState(__func__, state_, {State::kDisconnecting});
state_ = State::kStopped;
dispatcher_->PostTask(FROM_HERE, std::move(callbacks_->on_stopped_cb));
}
} // namespace shill
|
import math
import numpy as np
A, B, H, M = (int(i) for i in input().split())
angleA = math.pi * 2 * (1 - (H/12 + 1/12 * M/60) + 1/4)
angleB = math.pi * 2 * (1 - M/60 + 1/4)
a = np.array([A * math.cos(angleA), A * math.sin(angleA)])
b = np.array([B * math.cos(angleB), B * math.sin(angleB)])
print(np.linalg.norm(a - b))
|
/**
* Setup wm hardware state. See page 225 of Volume 2
*/
static enum pipe_error
wm_unit_create_from_key(struct brw_context *brw, struct brw_wm_unit_key *key,
struct brw_winsys_reloc *reloc,
unsigned nr_reloc,
struct brw_winsys_buffer **bo_out)
{
struct brw_wm_unit_state wm;
enum pipe_error ret;
memset(&wm, 0, sizeof(wm));
wm.thread0.grf_reg_count = align(key->total_grf, 16) / 16 - 1;
wm.thread0.kernel_start_pointer = 0;
wm.thread1.depth_coef_urb_read_offset = 1;
wm.thread1.floating_point_mode = BRW_FLOATING_POINT_NON_IEEE_754;
if (BRW_IS_IGDNG(brw))
wm.thread1.binding_table_entry_count = 0;
else
wm.thread1.binding_table_entry_count = key->nr_surfaces;
if (key->total_scratch != 0) {
wm.thread2.scratch_space_base_pointer = 0;
wm.thread2.per_thread_scratch_space = key->total_scratch / 1024 - 1;
} else {
wm.thread2.scratch_space_base_pointer = 0;
wm.thread2.per_thread_scratch_space = 0;
}
wm.thread3.dispatch_grf_start_reg = key->dispatch_grf_start_reg;
wm.thread3.urb_entry_read_length = key->urb_entry_read_length;
wm.thread3.urb_entry_read_offset = 0;
wm.thread3.const_urb_entry_read_length = key->curb_entry_read_length;
wm.thread3.const_urb_entry_read_offset = key->curbe_offset * 2;
if (BRW_IS_IGDNG(brw))
wm.wm4.sampler_count = 0;
else
wm.wm4.sampler_count = (key->sampler_count + 1) / 4;
wm.wm4.sampler_state_pointer = 0;
wm.wm5.program_uses_depth = key->uses_depth;
wm.wm5.program_computes_depth = key->computes_depth;
wm.wm5.program_uses_killpixel = key->uses_kill;
if (key->has_flow_control)
wm.wm5.enable_8_pix = 1;
else
wm.wm5.enable_16_pix = 1;
wm.wm5.max_threads = key->max_threads - 1;
wm.wm5.thread_dispatch_enable = 1;
wm.wm5.legacy_line_rast = 0;
wm.wm5.legacy_global_depth_bias = 0;
wm.wm5.early_depth_test = 1;
wm.wm5.line_aa_region_width = 0;
wm.wm5.line_endcap_aa_region_width = 1;
wm.wm5.polygon_stipple = key->polygon_stipple;
if (key->offset_enable) {
wm.wm5.depth_offset = 1;
wm.global_depth_offset_constant = key->offset_units * 2;
wm.global_depth_offset_scale = key->offset_factor;
}
wm.wm5.line_stipple = key->line_stipple;
if ((BRW_DEBUG & DEBUG_STATS) || key->stats_wm)
wm.wm4.stats_enable = 1;
ret = brw_upload_cache(&brw->cache, BRW_WM_UNIT,
key, sizeof(*key),
reloc, nr_reloc,
&wm, sizeof(wm),
NULL, NULL,
bo_out);
if (ret)
return ret;
return PIPE_OK;
} |
/**
* Test compile.
*
* @throws IOException Signals that an I/O exception has occurred.
* @throws InstantiationException the instantiation exception
* @throws IllegalAccessException the illegal access exception
*/
@Test
public void testCompile() throws IOException, InstantiationException, IllegalAccessException {
buildBindProcessorTest(Bean81G.class, Bean81H.class, Bean81I.class, Bean81L.class, Bean81M.class, Bean81N.class, Bean81O.class, Bean81P.class,
Bean81U.class, Bean81R.class, Bean81S.class, Bean81T.class,
Bean81Enum.class);
} |
def is_full_integration():
return |
/**
* Description not yet available.
* \param
*/
dvector laplace_approximation_calculator::
banded_calculations_trust_region_approach(const dvector& _uhat,
function_minimizer * pfmin)
{
dvector& uhat=(dvector&) _uhat;
dvector uhat_old(uhat.indexmin(),uhat.indexmax());
dvector uhat_new(uhat.indexmin(),uhat.indexmax());
dvector uhat_best(uhat.indexmin(),uhat.indexmax());
double wght=0.0;
double delta=5.e-5;
dvector values(1,300);
double oldfbest=pmin->lapprox->fmc1.fbest;
double newfbest = 0.0;
int have_value=0;
int jj=1;
double lastval=oldfbest;
do
{
jj++;
wght+=delta;
uble wght=0.0;
double newval=0.0;
if (wght<0.0)
break;
int mmin=bHess->indexmin();
int mmax=bHess->indexmax();
banded_symmetric_dmatrix tmp(mmin,mmax,bHess->bandwidth());
tmp=*bHess;
uhat_old=uhat;
int ierr=0;
for (int i=mmin;i<=mmax;i++)
{
tmp(i,i)+=wght;
}
banded_lower_triangular_dmatrix bltd=choleski_decomp(tmp,ierr);
if (!ierr)
{
dvector v=solve(bltd,grad);
step=-solve_trans(bltd,v);
uhat_old=uhat;
uhat+=step;
evaluate_function(newval,uhat,pfmin);
if (have_value && newval>newfbest)
{
break;
}
if (jj>1)
{
if (newval<lastval)
{
delta*=2;
}
if (newval>lastval && !have_value)
{
wght-=delta;
delta/=16;
}
}
lastval=newval;
if (newval<newfbest)
{
newfbest=newval;
uhat_best=uhat;
have_value=jj;
}
uhat_new=uhat;
uhat=uhat_old;
}
else
{
delta*=2;
}
}
while(jj<10);
if (!have_value)
{
cerr << "can't improve function value in trust region calculations"
<< endl;
}
return uhat_best;
/**
@todo Unreachable code, check above return statement.
initial_params::set_active_only_random_effects();
if (!inner_lmnflag)
{
if (!ADqd_flag)
{
uhat=get_uhat_quasi_newton(uhat_new,pfmin);
uble maxg=fabs(fmc1.gmax);
uble f_from_1=fmc1.fbest;
}
else
{
uhat=get_uhat_quasi_newton_qd(uhat_new,pfmin);
}
}
else
{
uhat=get_uhat_lm_newton(uhat_new,pfmin);
}
return uhat;
*/
} |
/**
* @zm-api-response-description the response contains activities
* as described in GetActivityStreamResponse
*/
@XmlAccessorType(XmlAccessType.NONE)
@XmlRootElement(name=OctopusXmlConstants.E_GET_NOTIFICATIONS_RESPONSE)
@XmlType(propOrder = {"operations", "users", "activities"})
public class GetNotificationsResponse {
/**
* @zm-api-field-tag last-seen
* @zm-api-field-description Timestamp of when the notifications were last seen
*/
@XmlAttribute(name=OctopusXmlConstants.A_LASTSEEN /* lastSeen */, required=true)
private long lastSeen;
/**
* @zm-api-field-description Operations
*/
@XmlElement(name=OctopusXmlConstants.E_OPERATION /* op */, required=false)
private final List<NamedElement> operations = Lists.newArrayList();
/**
* @zm-api-field-description Users
*/
@XmlElement(name=MailConstants.A_USER /* user */, required=false)
private final List<IdEmailName> users = Lists.newArrayList();
/**
* @zm-api-field-description Activities
*/
@XmlElement(name=MailConstants.E_A /* a */, required=false)
private final List<ActivityInfo> activities = Lists.newArrayList();
public GetNotificationsResponse() {
}
public void setlastSeen(long lastSeen) {
this.lastSeen = lastSeen;
}
public void setOperations(Iterable <NamedElement> operations) {
this.operations.clear();
if (operations != null) {
Iterables.addAll(this.operations,operations);
}
}
public void addOperation(NamedElement operation) {
this.operations.add(operation);
}
public void setUsers(Iterable <IdEmailName> users) {
this.users.clear();
if (users != null) {
Iterables.addAll(this.users,users);
}
}
public void addUser(IdEmailName user) {
this.users.add(user);
}
public void setActivities(Iterable <ActivityInfo> activities) {
this.activities.clear();
if (activities != null) {
Iterables.addAll(this.activities,activities);
}
}
public void addActivity(ActivityInfo activity) {
this.activities.add(activity);
}
public long getLastSeen() {
return lastSeen;
}
public List<NamedElement> getOperations() {
return Collections.unmodifiableList(operations);
}
public List<IdEmailName> getUsers() {
return Collections.unmodifiableList(users);
}
public List<ActivityInfo> getActivities() {
return Collections.unmodifiableList(activities);
}
public Objects.ToStringHelper addToStringInfo(Objects.ToStringHelper helper) {
return helper
.add("lastSeen", lastSeen)
.add("operations", operations)
.add("users", users)
.add("activities", activities);
}
@Override
public String toString() {
return addToStringInfo(Objects.toStringHelper(this)).toString();
}
} |
/**
* This class discovers information about
* the computer where it is running.
*
* @author scott
*
*/
public class ComputerInfoDiscovery {
private static final String UNUX = "Unux";
public static final String LINUX = "Linux";
public static final String WINDOWS = "Windows";
public static final String MAC = "Mac";
public static String[] getCpuInfo(I_FabSystem sys, String os) {
try {
if (MAC.equals(os)) {
I_Executor exe = sys.getExecutor();
I_ExecutionResult er = exe.executeProcess( FabricationMemoryConstants.EMPTY_ENV,
".", "sysctl", "-a");
String cpu = er.getOutput();
int index = cpu.indexOf(".cpu.brand_string");
if (index != -1) {
index = index + 18;
return parseCpuInfo(cpu, index);
}
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
return new String[] {cpu, sysMessages.getUnknown()};
} else if (WINDOWS.equals(os)){
I_Executor exe = sys.getExecutor();
I_ExecutionResult er = exe.executeProcess(FabricationMemoryConstants.EMPTY_ENV,
".", "wmic", "cpu", "get", "name");
String cpu = er.getOutput();
cpu = cpu.replaceFirst("Name", "").trim();
return parseCpuInfo(cpu, 0);
} else {
I_Executor exe = sys.getExecutor();
I_ExecutionResult er = exe.executeProcess(FabricationMemoryConstants.EMPTY_ENV,
".", "cat", "/proc/cpuinfo");
String cpu = er.getOutput();
int index = cpu.indexOf("model name\t: ");
if (index != -1) {
index = index + 13;
return parseCpuInfo(cpu, index);
}
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
return new String[] {cpu, sysMessages.getUnknown()};
}
} catch (Exception x) {
//do nothing
}
return new String[] {"unknown","unknown"};
}
public static String[] parseCpuInfo(String cpu, int index) {
char[] cpuChars = cpu.toCharArray();
StringBuilder sbCpu = new StringBuilder();
StringBuilder sbSpeed = new StringBuilder();
StringBuilder sb = sbCpu;
boolean lastWasSpace = false;
for (int i = index; i < cpuChars.length; i++) {
char c = cpuChars[i];
if (c == '@') {
lastWasSpace = false;
sb = sbSpeed;
} else if (c == 'z') {
lastWasSpace = false;
sb.append(c);
break;
} else if (lastWasSpace) {
if (!Character.isWhitespace(c)) {
lastWasSpace = false;
sb.append(c);
}
} else {
if (Character.isWhitespace(c)) {
lastWasSpace = true;
sb.append(" ");
} else {
lastWasSpace = false;
sb.append(c);
}
}
}
String cpuResult = sbCpu.toString().trim();
String speed = sbSpeed.toString().trim();
if (cpuResult.length() >= 40) {
cpuResult = cpuResult.substring(0, 40);
}
if (speed.length() >= 40) {
speed = speed.substring(0, 40);
}
return new String[] {cpuResult,speed};
}
public static String getCpuSpeed(I_FabSystem sys) {
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
String cpu = sys.getProperty("cpu.brand_string",sysMessages.getUnknown());
int atIdx = cpu.indexOf("@");
if (atIdx != -1) {
if (atIdx != cpu.length() - 1) {
return cpu.substring(atIdx + 1, cpu.length());
}
}
return cpu;
}
public static String getOperatingSystem(I_FabSystem sys) {
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
String os = sys.getProperty("os.name", sysMessages.getUnknown());
os = os.toLowerCase();
if ((os.indexOf("mac") >= 0) || (os.indexOf("darwin") >= 0)) {
return MAC;
} else if (os.indexOf("win") >= 0) {
return WINDOWS;
} else if (os.indexOf("linux") >= 0) {
return LINUX;
} else if (os.indexOf("nux") >= 0) {
return UNUX;
} else {
return os;
}
}
public static String getJavaVersion(I_FabSystem sys) {
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
return sys.getProperty("java.version", sysMessages.getUnknown());
}
public static String getOperatingSystemVersion(I_FabSystem sys, String os) {
if (MAC.equals(os)) {
try {
I_Executor exe = sys.getExecutor();
I_ExecutionResult er = exe.executeProcess(FabricationMemoryConstants.EMPTY_ENV,
".", "sw_vers", "-productVersion");
String ver = er.getOutput();
if (ver != null) {
StringBuilder sb = getVersionNumbersAndDots(ver);
ver = sb.toString();
return ver;
}
} catch (Exception x) {
//do nothing
}
} else if (WINDOWS.equals(os)) {
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
String osName = sys.getProperty("os.name", sysMessages.getUnknown());
StringBuilder sb = getVersionNumbersAndDots(osName);
return sb.toString();
} else if (LINUX.equals(os)) {
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
String osName = sys.getProperty("os.version", sysMessages.getUnknown());
StringBuilder sb = getVersionNumbersAndDots(osName);
return sb.toString();
}
I_FabricateConstants constants = sys.getConstants();
I_SystemMessages sysMessages = constants.getSystemMessages();
return sysMessages.getUnknown();
}
public static StringBuilder getVersionNumbersAndDots(String ver) {
StringBuilder sb = new StringBuilder();
char [] chars = ver.toCharArray();
for (int i = 0; i < chars.length; i++) {
char c = chars[i];
if (Character.isDigit(c) || '.' == c) {
sb.append(c);
}
}
return sb;
}
public static String getHostname(I_FabSystem sys) {
try {
String result = sys.getInetAddressHostname();
if (!StringUtils.isEmpty( result))
return result;
} catch (UnknownHostException e) {
// failed; try alternate means.
}
String host = sys.getenv("COMPUTERNAME");
if (host != null) {
return host;
}
host = sys.getenv("HOSTNAME");
return host;
}
} |
import formencode
class ConnForm(formencode.Schema):
allow_extra_fields = True
filter_extra_fields = True
server = formencode.validators.String(not_empty=True)
nickname = formencode.validators.String(not_empty=True)
channel = formencode.validators.String(not_empty=True)
|
/**
* Helper for recursing on a block.
* @param node The BlockNode to recurse on.
*/
private void visitBlockHelper(BlockNode node) {
if (node.needsEnvFrameDuringInterp() != Boolean.FALSE /*true or unknown*/) {
env.push(Maps.<String, SoyValue>newHashMap());
visitChildren(node);
env.pop();
} else {
visitChildren(node);
}
} |
def GetImageFromResource(t,imageName,large):
pass |
// Fulfilled sets the value of the 'fulfilled' attribute to the given value.
//
//
func (b *AddOnRequirementStatusBuilder) Fulfilled(value bool) *AddOnRequirementStatusBuilder {
b.fulfilled = value
b.bitmap_ |= 2
return b
} |
// resizeCells for AddColumn and AddColumns
func (dt *DataTable) resizeCells() {
for i, r := range dt.Rows {
r.Cells = append(r.Cells, Cell{
ColumnIndex: len(dt.Columns) - 1,
RowIndex: i,
Value: nil})
}
} |
Improvement of the air-sea flux parameterization scheme in the ocean circulation model
COARE (Coupled Ocean-Atmosphere Response Experiment) algorithm is the more advanced algorithm, calculating the air-sea flux. COARE 3.0 is the latest version. In this paper, the COARE 3.0 bulk sea-air flux parameterization scheme is introduced into the SBPOM model. Compared the simulated sea surface temperature (SST) of the SBPOM ocean model with COARE 3.0 and without COARE 3.0 in the areas near equator, and analysed the reasons for the differences in the simulated SST. The results showed that the COARE 3.0 bulk air-sea flux parameterization scheme is good in the SBPOM ocean model. The simulation accuracy of the SBPOM ocean model in the areas near equator has been improved after the COARE 3.0 bulk sea-air flux parameterization scheme was introduced, and the simulation error of most areas was within 1°C, which was mainly due to the accuracy of the calculation of momentum flux improved by COARE 3.0 bulk sea-air flux parameterization scheme.
Introduction
Due to the influence of nature and mankind, the weather and climate of the earth are gradually changing in the 21st century, such as "global warming". These weather and climate anomalies affect the energy transport between the ocean and the atmosphere, such as momentum flux and heat flux between the sea and the air. They not only affect the distribution of temperature-salinity and ocean circulation in the ocean, but also play a driving role in the numerical simulation .
At present, there are four main parameterization schemes for obtaining air-sea flux in the world, namely bulk parameteration scheme, flux profile scheme, inertial dissipation scheme and vortex correlation scheme. Each scheme has its merits or defects . The advantages of the bulk parameteration scheme are that the algorithm structure is clear, the exchange coefficient can be obtained by iteration, and the operation speed is fast. It is widely used in various ocean models. The main disadvantage of this scheme is that the parameteration scheme is difficult to determine, and each parameterization scheme has its own unique exchange coefficient . Besides, exchange coefficient needs to be supported by a large amount of observations and can be only used in areas with enough data. The final disadvantage is that the scheme has higher accuracy of air-sea flux calculated at a certain wind speed (middle-low wind speed) and poor applicability under high wind speed conditions . The advantage of the flux profile scheme is that the requirements of the measuring instrument are low, and REES2019 IOP Conf. Series: Earth and Environmental Science 300 (2019) 032093 IOP Publishing doi:10.1088/1755-1315/300/3/032093 2 the low-frequency measuring instrument can complete the measurement of the average variables profile of the atmospheric boundary layer. The disadvantages are that the atmospheric boundary layer variable profile is affected by many factors, and the use of a large number of empirical formulas makes the calculated air-sea flux error larger . The advantages of the inertial dissipation scheme are that the required air-sea flux data can be directly measured by the instrument, and the result is intuitive and easy to operate. Since this scheme is based on the similarity theory, one disadvantage is that the calculated air-sea flux is the absolute value of the flux, and other data is needed to determine the sign of the flux. The advantage of the vortex correlation scheme is that the calculation method is direct, the air-sea flux obtained is highly accurate, and the flux at different heights can also be obtained; the disadvantage is the high requirement of the measuring devices and high cost in equipment purchase and maintenance, and it is not suitable for large-scale use. In addition, it has higher requirements for hydro-meteorological conditions during observation, and the error of data obtained is large when the observation environment is poor. Through the discussion of the advantages and disadvantages of these four kinds of air-sea flux parameterization schemes, this paper chooses the bulk parameteration scheme to improve the model airsea flux. Because the bulk parameteration scheme is simple and only needs several elements such as wind speed, temperature and specific humidity, furthermore the calculation speed is fast and the calculation accuracy is high, and it has been applied in other models .
The earliest bulk algorithm Liu-Katsaros-Businger (LKB) was developed in 1979, and this algorithm works well in calculating sea surface air-sea flux. In the early 1990s, based on COARE experimental mapping data as well as other experimental data of voyage and the development of air-sea interaction theory, the LKB bulk algorithm empirical formula and the air-sea exchange coefficient in the formula were further improved. The improved LKB algorithm was renamed to the COARE 1.0 bulk algorithm. After the generation of COARE 1.0 algorithm, with the increase of the amount of observed data and the deepening of theoretical research, the COARE algorithm is gradually improving, and there are improved versions such as COARE 2.0 and COARE 2.5. The main difference between these versions is the improvement of the empirical exchange coefficient and the expansion of the application of the model. Based on the analysis of the early versions of COARE and the combination of new theoretical research results, the COARE 3.0 bulk algorithm was in use in 2003. Some Studies show that the current COARE 3.0 bulk algorithm is an internationally recognized optimal parameterization scheme for calculating momentum flux and heat flux between sea and air.
The POM is a three-dimensional bar clinic ocean circulation model based on the original equations developed at Princeton University . The Stony Brook Parallel Ocean Model (SBPOM) used in this paper is a parallel version based on the POM model developed by Antoni and Wang . The SBPOM model does not contain a parameterization scheme for calculating air-sea flux, and its air-sea flux needs to be provided by reanalysis data. In order to realize the coupling operation between SBPOM model and atmospheric model, the parameterization scheme of air-sea flux calculation is needed to be integrated into SBPOM model. This paper improved the SBPOM ocean model based on COARE 3.0 bulk parameterization scheme. The improved SBPOM ocean model is used to simulate the sea surface temperature (SST) in the sea near the equator. Then, the simulated SST of improved SBPOM ocean model was compared with the ECMWF data, and the differences between the air-sea flux calculated by the COARE 3.0 bulk parameterization scheme and the NCEP reanalysis data was analysed. And the main reasons for the changes in the simulated SST before and after the improvement.
Scheme
The COARE 3.0 bulk parameterization scheme is based on the MOST similarity theory, which expresses the air-sea flux as a function of the mean value of the atmospheric or oceanological elements near the sea surface : Where x is the wind speed component of the direction u , v , is the potential temperature, q is the atmospheric or oceanological elements near the sea surface; x c is the corresponding bulk exchange is the total bulk experience exchange coefficient; X is the difference between the sea surface variable and the mean value of the sea surface atmospheric variable; S represents the relative average wind speed of the sea surface, which is the vector sum of the average wind speed of the sea surface and the gust ( g U ) of the sea surface. The profile of X and S is: The empirical exchange coefficient in the MOST similarity theory is related to the sea surface stability of the air-sea interface: Where n means that the profile is applicable to neutral stability conditions( =0 Where T represents the temperature, g is the acceleration of gravity, w z represents the component of the different variables along the streamline direction. The COARE 3.0 bulk parameterization scheme integrates a variety of modules such as sea surface roughness and skin cooling of the warm water layer, and the influence of various factors on the calculation of air-sea flux was taken into account. This makes the calculated air-sea flux more accurate and can meet the needs of model simulation.
Materials
NECP reanalysis data contains ocean and sea surface atmospheric data, using a Gaussian grid with coverage of 88°S-88°N, 0°-360°, with the spatial resolution of 1.9° 1.875° and the time resolution of 6h . The SODA data is a monthly average reanalysis data provided by the Global Ocean Data Assimilation Analysis System with a spatial resolution of 0.5° and coordinate . The spatial REES2019 IOP Conf. Series: Earth and Environmental Science 300 (2019) 032093 IOP Publishing doi:10.1088/1755-1315/300/3/032093 4 resolution of the ETOPO5 topographic data is 5' 5'. The ECMWF reanalysis data is the reanalysis data provided by the European Medium-Range Weather Forecast Centre . It contains information on global atmosphere, ocean and air-sea flux. It used T55 grid and coordinate. This data assimilate a large number of high-precision observatory data and satellite data. This paper uses this data as verification data for simulated results. The Argo buoy data was derived from the international Argo project proposed in 1998, and 181 Argo were used as the observed data to compare with the simulated results .
Models and settings
Compared with the POM model, the Computational efficiency of the SBPOM is greatly improved. The simulated area is 5°S~15°N, 0°~360°, with the spatial resolution of 0.5°, and divided into 40 layers with coordinate. The terrain data is provided by ETOPO5 the initial field and boundary are provided by the SODA data. The initial SBPOM model was drived by the momentum flux and heat flux from NECP. The improved SBPOM model was drived by the momentum flux and heat flux calculated by the COARE 3.0 bulk parameterization scheme. The simulated time of the model is January 2014. Fig. 1shows that the difference of the SST simulated by the improved SBPOM model and the ECMWF reanalysis data is mostly in the range of ±1.5°C, and the simulated error is large in the local area. The areas with large error are mainly distributed along the equator and on the west coast of Mexico and North Africa, and the simulated error in the coast of the Somali Peninsula is largest, reaching 3°C. Combined with Fig. 2 and Fig. 3, it can be seen that the improved SBPOM model simulated results have a large improvement, and the simulation error is in the range of ±1°C, especially in the area where the simulated error is large in Fig. 1, and the simulation accuracy is greatly improved.
Result
In order to further compare the difference between the simulated SST of the improved SBPOM mode and the measured SST, the simulated results are interpolated to the Argo buoy point. And the scatter plot of the simulated results and the Argo measured data is shown in Fig. 4. Comparing the fitted line with the line y x , the red line in Fig.4 (A) shows that the red line is below the black line, indicating that the SST of the initial SBPOM model is generally high and the error is large. It can be seen from
Discussion
The simulated SST after the introduction of the COARE 3.0 bulk parameterization scheme causes the improvement of the simulation compared to the simulated SST with NCEP reanalysis data .According to the setting of the mode, the heat flux and the momentum flux are respectively divided into the sensible heat flux term, the other heat flux term, the zonal momentum flux term, and the meridional momentum flux term to discuss the reason . Fig. 5 and Fig. 6, it can be seen that the sensible heat flux in the near-equatorial sea is small, and the distribution of the sensible heat flux of the NECP reanalysis data is almost the same with the distribution of that calculated by COARE 3.0 bulk parameterization scheme. Combined with Fig. 7, the sensible heat flux calculated by the COARE 3.0 bulk parameterization scheme is slightly higher than that of the NECP reanalysis data, and the difference is about 20 W/m 2 .
Other heat flux terms (Q) are composed of long-wave radiation ( L Q ), latent heat flux ( E Q ) and shortwave radiation ( S Q ). The profile is: Fig. 8 and Fig. 9 show that high value of the other heat flux was distributed in the area between the Indian Ocean and the Pacific Ocean and the mid-western Pacific Ocean, with a maximum of about 900 W/m 2 ; in the Atlantic and the mid-eastern Pacific, the value is lower than 400 W/m 2 . Fig. 10 shows that in most areas, the calculated result of the CORAE 3.0 bulk parameterization scheme is slightly higher than that of the NCEP reanalysis data. The absolute value of difference is greater than 50 W/m 2 in the central Indian Ocean, the Gulf of Thailand, the west coast of Mexico, and the eastern Atlantic. In the sea near the equator, the wind speed is small. Fig. 11, Fig. 12, Fig. 14 and Fig. 15 show that the momentum flux is relatively small, the distribution of momentum flux of NCEP and that of the momentum flux calculated by CORAE 3.0 bulk parameterization scheme are basically the same, and the difference is in the range of -0.08 N/m 2 -0.02 N/m 2 . Compared with the NCEP reanalysis data, for the zonal momentum flux, the calculated result of the CORAE 3.0 bulk parameterization scheme is small in the north of the equator in the Pacific and Atlantic, and is large in the south of the equator in Indian Ocean and the eastern part of the Pacific and Atlantic; for the meridional momentum flux, the calculated result of the CORAE 3.0 bulk parameterization scheme is small in the western Indian Ocean, the South China Sea, the Gulf of Mexico, and the north of the equator in the Pacific and Atlantic; while in the eastern Pacific and the southern Atlantic, that is large.
By analysing the distribution of heat flux and momentum flux of NCEP and that calculated by CORAE 3.0 bulk parameterization scheme and simulated SST improvement portion, it is suspected that the main reason for the difference of simulated SST between the initial SBPOM and the improved SBPOM is the improvements in momentum flux due to CORAE 3.0 bulk parameterization scheme. In order to verify this hypothesis, only the heat flux term is improved for the SBPOM, and the momentum flux term is not changed. The simulated result was compared with the ECMWF. Comparing the Fig. 1 Fig. 17, it is found that the improvement of heat flux has little effect on the SST simulation. We can conclude that the improvement of the momentum flux by the CORAE 3.0 bulk parameterization scheme is the main reason for the improvement in SST.
Conclusion
The CORAE 3.0 bulk parameterization scheme can be applied not only in other models, but also be good in the SBPOM model. The introduction of the CORAE 3.0 bulk parameterization scheme simplifies the model preparation process, and is helpful to the coupling between SBPOM ocean model and the atmosphere model in the future. In the numerical simulation of SST in the areas near equator, compared with the use of NCEP reanalysis data, the CORAE 3.0 bulk parameterization scheme can improve the SST simulation accuracy. And the simulation error of most areas was within ±1°C, The main reason is that the accuracy of the calculation of momentum flux improved by COARE 3.0 bulk sea-air flux parameterization scheme. |
''' Batched Room-to-Room navigation environment '''
import sys
sys.path.append('buildpy36')
import MatterSim
import csv
import numpy as np
import math
import base64
import utils
import json
import os
import random
import networkx as nx
from param import args
from speaker import Speaker
import pdb
from utils import load_datasets, load_nav_graphs, Tokenizer
csv.field_size_limit(sys.maxsize)
class EnvBatch():
''' A simple wrapper for a batch of MatterSim environments,
using discretized viewpoints and pretrained features '''
def __init__(self, feature_store=None, batch_size=100):
"""
1. Load pretrained image feature
2. Init the Simulator.
:param feature_store: The name of file stored the feature.
:param batch_size: Used to create the simulator list.
"""
if feature_store:
if type(feature_store) is dict: # A silly way to avoid multiple reading
self.features = feature_store
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.feature_size = next(iter(self.features.values())).shape[-1]
print('The feature size is %d' % self.feature_size)
else:
print('Image features not provided')
self.features = None
self.image_w = 640
self.image_h = 480
self.vfov = 60
self.featurized_scans = set([key.split("_")[0] for key in list(self.features.keys())])
self.sims = []
for i in range(batch_size):
sim = MatterSim.Simulator()
sim.setRenderingEnabled(False)
sim.setDiscretizedViewingAngles(True) # Set increment/decrement to 30 degree. (otherwise by radians)
sim.setCameraResolution(self.image_w, self.image_h)
sim.setCameraVFOV(math.radians(self.vfov))
sim.init()
self.sims.append(sim)
def _make_id(self, scanId, viewpointId):
return scanId + '_' + viewpointId
def newEpisodes(self, scanIds, viewpointIds, headings):
for i, (scanId, viewpointId, heading) in enumerate(zip(scanIds, viewpointIds, headings)):
# print("New episode %d" % i)
# sys.stdout.flush()
self.sims[i].newEpisode(scanId, viewpointId, heading, 0)
def getStates(self):
"""
Get list of states augmented with precomputed image features. rgb field will be empty.
Agent's current view [0-35] (set only when viewing angles are discretized)
[0-11] looking down, [12-23] looking at horizon, [24-35] looking up
:return: [ ((30, 2048), sim_state) ] * batch_size
"""
feature_states = []
for i, sim in enumerate(self.sims):
state = sim.getState()
long_id = self._make_id(state.scanId, state.location.viewpointId)
if self.features:
feature = self.features[long_id] # Get feature for
feature_states.append((feature, state))
else:
feature_states.append((None, state))
return feature_states
def makeActions(self, actions):
''' Take an action using the full state dependent action interface (with batched input).
Every action element should be an (index, heading, elevation) tuple. '''
for i, (index, heading, elevation) in enumerate(actions):
self.sims[i].makeAction(index, heading, elevation)
class TestBatch():
''' Implements the Room to Room navigation task, using discretized viewpoints and pretrained features '''
def __init__(self, feature_store, batch_size=100, seed=10, splits=['test'], tokenizer=None,
name=None):
self.env = EnvBatch(feature_store=feature_store, batch_size=batch_size)
if feature_store:
self.feature_size = self.env.feature_size
self.data = []
if tokenizer:
self.tok = tokenizer
scans = []
test_startpoints = []
for split in splits:
for item in load_datasets([split]):
scans.append(item['scan'])
test_startpoints.append(item['path'][0])
self.scans = set(scans)
self._load_nav_graphs()
#Now we have graphs and distances
# for each scan, we need to randomly generate a bunch of trajectories
self.seed = seed
pathidx = 0
for split in splits:
for item in load_datasets([split]):
current_scan = item['scan']
current_startpoint = item['path'][0]
candidate_paths = self.paths[current_scan][current_startpoint]
for possible_endpoint in candidate_paths:
new_item = dict(item)
try:
new_item['path'] = candidate_paths[possible_endpoint]
new_item['distance'] = self.distances[current_scan][current_startpoint][possible_endpoint]
except KeyError:
continue
if new_item['distance'] <=0 : # if obstructed
continue
new_item['path_id'] = pathidx
pathidx += 1
self.data.append(new_item)
# Now it is pretty much like the augmentation data without instructions
# Next call speaker to fill in the instruction part !!
def dumpdata(self,output_dir):
with open(output_dir, 'w') as f:
json.dump(self.data, f,indent=4, sort_keys=True)
def _load_nav_graphs(self):
"""
load graph from self.scan,
Store the graph {scan_id: graph} in self.graphs
Store the shortest path {scan_id: {view_id_x: {view_id_y: [path]} } } in self.paths
Store the distances in self.distances. (Structure see above)
Load connectivity graph for each scan, useful for reasoning about shortest paths
:return: None
"""
print('Loading navigation graphs for %d scans' % len(self.scans))
self.graphs = load_nav_graphs(self.scans)
self.paths = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.paths[scan] = dict(nx.all_pairs_dijkstra_path(G))
self.distances = {}
for scan, G in self.graphs.items(): # compute all shortest paths
self.distances[scan] = dict(nx.all_pairs_dijkstra_path_length(G))
|
/**
* Datart
*
* Copyright 2021
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { IJsonModel } from 'flexlayout-react';
export enum LayoutComponentType {
CONTROL = 'ChartPresentControllerPanel',
PRESENT = 'ChartPresentWrapper',
VIEW = 'ChartDataViewPanel',
CONFIG = 'ChartConfigPanel',
}
const layoutConfig: IJsonModel = {
global: {
tabEnableFloat: true,
tabEnableClose: false,
tabSetEnableTabStrip: false,
splitterSize: 2,
},
layout: {
type: 'row',
id: 'container',
children: [
{
type: 'tabset',
id: 'model-dragbar',
width: 256,
children: [
{
type: 'tab',
id: 'model-dragbar-component',
component: LayoutComponentType.VIEW,
},
],
},
{
type: 'tabset',
id: 'config',
width: 360,
children: [
{
type: 'tab',
id: 'config-component',
component: LayoutComponentType.CONFIG,
},
],
},
{
type: 'tabset',
id: 'present',
children: [
{
type: 'tab',
id: 'present-wrapper',
component: LayoutComponentType.PRESENT,
},
],
},
],
},
};
export default layoutConfig;
|
<reponame>Tsonewa/to-do-bulgaria
package com.example.todobulgaria.models.entities;
import com.example.todobulgaria.models.BaseEntity;
import com.example.todobulgaria.models.enums.CategoryEnum;
import javax.persistence.Entity;
import javax.persistence.EnumType;
import javax.persistence.Enumerated;
import javax.persistence.Table;
import java.io.Serializable;
@Entity
@Table(name = "categories")
public class CategoryEntity extends BaseEntity implements Serializable {
@Enumerated(value = EnumType.STRING)
private CategoryEnum name;
public CategoryEntity() {
}
public CategoryEnum getName() {
return name;
}
public void setName(CategoryEnum name) {
this.name = name;
}
}
|
// deletableBy is the underlying operation used by DeletableBy controller
func (c *Domain) deletableBy(addr sdk.AccAddress) error {
if err := c.requireDomain(); err != nil {
panic("validation check not allowed on a non existing domain")
}
switch c.domain.Type {
case types.ClosedDomain:
if err := c.gracePeriodFinished(); err != nil {
if err := c.isAdmin(addr); err != nil {
return sdkerrors.Wrap(types.ErrUnauthorized, "only admin delete domain before grace period is finished")
}
}
case types.OpenDomain:
if err := c.gracePeriodFinished(); err != nil {
return sdkerrors.Wrap(types.ErrDomainGracePeriodNotFinished, "cannot delete open domain before grace period is finished")
}
}
return nil
} |
def fetchone(self):
d = psycopg2.extensions.cursor.fetchone(self)
if d is None:
raise NoDataError("no more data")
return d |
/**
* Attempts to resolve a name or ID into a thing from multiple potential
* matches. Either a name or ID must be provided, but not both.
*
* @param name name for a thing
* @param id ID for a thing
* @param type expected type of thing to resolve
* @param ids IDs of candidate things
* @param context thing providing context for resolution
* @return resolved thing (empty if unresolved)
*/
private <T extends Thing> Optional<T> resolveAmong(String name,
UUID id, Class<T> type, Iterable<UUID> ids, Thing context) {
for (UUID iid : ids) {
Optional<Thing> thingOpt = Universe.getCurrent().getThing(iid);
if (thingOpt.isEmpty()) {
continue;
}
Thing thing = thingOpt.get();
if (!type.isAssignableFrom(thing.getClass())) {
continue;
}
if (context instanceof Player && thing instanceof Extension &&
!(Player.GOD.equals(context)) &&
!Universe.getCurrent().getRoles((Player) context).stream()
.anyMatch(r -> Extension.PERMITTED_ROLES.contains(r))) {
continue;
}
if ((name != null && thing.getName().equalsIgnoreCase(name)) ||
thing.getId().equals(id)) {
return Optional.of(type.cast(thing));
}
}
return Optional.empty();
} |
/**
* Convert the specified value given in base unit to this unit.
*
* @param baseValue Base value to convert.
* @return Value converted to this unit.
*/
public double fromBase(double baseValue)
{
double value = (b_ - d_ * baseValue) / (c_ * baseValue - a_);
return value;
} |
"It often does more harm than good to force definitions on things we don't understand. Besides, only in logic and mathematics do definitions ever capture concepts perfectly. The things we deal with in practical life are usually too complicated to be represented by neat, compact expressions. Especially when it comes to understanding minds, we still know so little that we can't be sure our ideas about psychology are even aimed in the right directions. In any case, one must not mistake defining things for knowing what they are (Minsky, 1985).
This dictionary of cognitive science terms was initiated by Dr. Michael Dawson, and introduced as a class project . The project was designed to give students the opportunity to learn more about the basic concepts of cognitive science, and also to learn about the delivery of information via the world wide web.
Beginning October, 2009 this site is being reworked -- existing definitions are being edited and revised, and new terms are being added.
The changes will be advertised as tweets: search Twitter for #cogsci #wotd |
#include<cstdio>
#include<cstring>
#include<algorithm>
#include<vector>
using namespace std;
const int maxn=800000+10;
int n,m;
int c[maxn];
int a[maxn],b[maxn];
namespace TwoSat{
struct Edge{
int next,v;
Edge(){}
Edge(int next,int v): next(next),v(v){}
}e[maxn<<1];
int head[maxn<<1],cnt;
int st[maxn<<1],top;
bool mark[maxn<<1];
inline void init()
{
memset(head,0,sizeof(head));
cnt=0;
}
inline void AddEdge(int u,int v)
{
e[++cnt]=Edge(head[u],v);
head[u]=cnt;
}
bool dfs(int u)
{
if (mark[u^1]) return false;
if (mark[u]) return true;
mark[u]=true;
st[++top]=u;
for (int i=head[u];i;i=e[i].next)
if (!dfs(e[i].v))
return false;
return true;
}
inline bool solve()
{
memset(mark,0,sizeof(mark));
for (int i=2,_=m<<1;i<=_;i+=2)
if (!mark[i]&&!mark[i+1])
{
top=0;
if (!dfs(i))
{
while (top) mark[st[top--]]=0;
if (!dfs(i+1))
return false;
}
}
return true;
}
}
int main()
{
if (fopen("D.in","r")!=NULL)
{
freopen("D.in","r",stdin);
freopen("D.out","w",stdout);
}
scanf("%d%d",&n,&m);
for (int i=1;i<=n;i++)
scanf("%d",&c[i]);
for (int i=1;i<=m;i++)
{
int k;
scanf("%d",&k);
for (int j=1;j<=k;j++)
{
int x;
scanf("%d",&x);
if (!a[x]) a[x]=i;
else b[x]=i;
}
}
for (int i=1;i<=n;i++)
{
if (c[i])
{
TwoSat::AddEdge(b[i]<<1,a[i]<<1);
TwoSat::AddEdge(a[i]<<1,b[i]<<1);
TwoSat::AddEdge(b[i]<<1|1,a[i]<<1|1);
TwoSat::AddEdge(a[i]<<1|1,b[i]<<1|1);
}
else
{
TwoSat::AddEdge(a[i]<<1,b[i]<<1|1);
TwoSat::AddEdge(b[i]<<1,a[i]<<1|1);
TwoSat::AddEdge(a[i]<<1|1,b[i]<<1);
TwoSat::AddEdge(b[i]<<1|1,a[i]<<1);
}
}
printf(TwoSat::solve() ? "YES":"NO");
return 0;
}
|
Combined effects of exercise training and high doses of anabolic steroids on cardiac autonomic modulation and ventricular repolarization properties in rats.
Several studies have reported that high doses of synthetic anabolic androgenic steroids (AAS) can have serious negative effects on health, including the cardiovascular system. The aim of this study was to evaluate the combined effects of AAS and exercise training on ventricular repolarization and cardiac autonomic modulation in rats. Male Wistar rats were allocated into four groups: CON-S: sedentary treated with vehicle, ND-S: sedentary treated with nandrolone decanoate, CON-T: swimming trained treated with vehicle, ND-T: swimming trained treated with nandrolone decanoate. Ventricular repolarization was evaluated by electrocardiographic analysis of QT interval and QT dispersion. Cardiac autonomic modulation was assessed by heart rate variability. Our results show that AAS increased QT interval and QT dispersion in sedentary rats (ND-S) as compared to sedentary rats treated with vehicle (CON-S), indicating AAS-induced ventricular repolarization abnormalities. When rats treated with nandrolone decanoate were subjected to concomitant exercise training (ND-T), ventricular repolarization was normalized. On the other hand, AAS-induced reduction in cardiac parasympathetic modulation was not prevented by exercise training. In conclusion, AAS produced cardiac autonomic dysfunction and ventricular repolarization disturbances in rats. Combining an exercise training protocol during the AAS treatment attenuated the ventricular repolarization abnormalities and did not prevent cardiac autonomic dysfunction. |
<reponame>positive-js/mosaic-builds
import { AfterViewInit, ElementRef, Renderer2 } from '@angular/core';
import { CanDisable, CanDisableCtor } from '@ptsecurity/mosaic/core';
import * as i0 from "@angular/core";
/** @docs-private */
export declare class McTabLabelWrapperBase {
}
export declare const McTabLabelWrapperMixinBase: CanDisableCtor & typeof McTabLabelWrapperBase;
/**
* Used in the `mc-tab-group` view to display tab labels.
* @docs-private
*/
export declare class McTabLabelWrapper extends McTabLabelWrapperMixinBase implements CanDisable, AfterViewInit {
elementRef: ElementRef;
private renderer;
constructor(elementRef: ElementRef, renderer: Renderer2);
ngAfterViewInit(): void;
/** Sets focus on the wrapper element */
focus(): void;
getOffsetLeft(): number;
getOffsetWidth(): number;
private addClassModifierForIcons;
static ɵfac: i0.ɵɵFactoryDeclaration<McTabLabelWrapper, never>;
static ɵdir: i0.ɵɵDirectiveDeclaration<McTabLabelWrapper, "[mcTabLabelWrapper]", never, { "disabled": "disabled"; }, {}, never>;
}
|
/**
* Realtime bolt that can subscribe to any ChatAlytics {@link Serializable} object and publish it
* out to the socket clients.
*
* @author giannis
*/
@ClientEndpoint(encoders = { ChatAlyticsEventEncoder.class, ConnectionTypeEncoderDecoder.class })
public class RealtimeBolt extends ChatAlyticsBaseBolt {
private static final long serialVersionUID = -214311696491358951L;
private static final Logger LOG = LoggerFactory.getLogger(RealtimeBolt.class);
public static final String BOLT_ID = "RT_SOCKET_BOLT_ID";
private Session session;
@Override
public void prepare(ChatAlyticsConfig config, @SuppressWarnings("rawtypes") Map conf,
TopologyContext context, OutputCollector collector) {
WebSocketContainer webSocketContainer = getWebSocketContainer();
this.session = openRealtimeConnection(webSocketContainer,
config.computeConfig.rtComputePort);
}
@Override
public void execute(Tuple input) {
for (Object obj : input.getValues()) {
Serializable serObj;
if (obj instanceof Serializable) {
serObj = (Serializable) obj;
} else {
LOG.warn("Received a non-serializable object. Skipping...");
continue;
}
String type = UPPER_CAMEL.to(LOWER_UNDERSCORE, serObj.getClass().getSimpleName());
ChatAlyticsEvent event = new ChatAlyticsEvent(DateTime.now(DateTimeZone.UTC),
type,
serObj);
publishEvent(event);
}
}
private void publishEvent(ChatAlyticsEvent event) {
try {
session.getBasicRemote().sendObject(event);
} catch (IOException | EncodeException e) {
LOG.error("Can't publish event to realtime compute server. {}", event, e);
}
}
/**
* Opens a connection to the compute socket. This method will return an optional session. If the
* session is absent then this resource will reject user connections
*
* @param webSocketContainer
* The container
* @param config
* ChatAlytics config
* @return An optional session
*/
private Session openRealtimeConnection(WebSocketContainer webSocketContainer, int rtPort) {
URI rtURI = URI.create(String.format("ws://localhost:%d%s/%s",
rtPort,
RT_COMPUTE_ENDPOINT,
ConnectionType.PUBLISHER));
try {
LOG.info("Connecting to {}", rtURI);
return webSocketContainer.connectToServer(this, rtURI);
} catch (DeploymentException | IOException e) {
throw new RuntimeException("Unable to connect to RT compute server. Is it up?");
}
}
private WebSocketContainer getWebSocketContainer() {
return ContainerProvider.getWebSocketContainer();
}
@Override
public void declareOutputFields(OutputFieldsDeclarer fields) {
// no output
}
@Override
public void cleanup() {
LOG.debug("Cleaning up {}", this.getClass().getSimpleName());
try {
session.close();
} catch (IOException e) {
LOG.warn("Unable to close session. Reason: {}", e.getMessage());
}
}
} |
/**
* <p>Used in testing; should be enhanced.</p>
*
* @author Abe White
*/
@Entity
@DiscriminatorValue("query2")
public class QueryTest2 extends QueryTest1 {
@OneToOne(cascade = { CascadeType.ALL })
private QueryTest2 oneToOne = null;
@PersistentCollection
private List<String> stringCollection = null;
@OneToMany(cascade = { CascadeType.ALL })
private List<QueryTest2> oneToMany = null;
@PersistentMap
@KeyColumn(name = "SMAP")
private Map<String, String> stringMap = null;
@OneToMany(cascade = { CascadeType.ALL })
@KeyColumn(name = "QT2")
private Map<String, QueryTest2> stringToManyMap = null;
public QueryTest2() {
}
public QueryTest2(int id) {
super(id);
}
public QueryTest2 getOneToOne() {
return oneToOne;
}
public void setOneToOne(QueryTest2 val) {
oneToOne = val;
}
public Collection getStringCollection() {
return stringCollection;
}
public void setStringCollection(List<String> val) {
stringCollection = val;
}
public Collection getOneToMany() {
return oneToMany;
}
public void setOneToMany(List<QueryTest2> val) {
oneToMany = val;
}
public Map getStringMap() {
return stringMap;
}
public void setStringMap(Map val) {
stringMap = val;
}
public Map getStringToManyMap() {
return stringToManyMap;
}
public void setStringToManyMap(Map val) {
stringToManyMap = val;
}
} |
/**
* Responsible for validate confidential client specific authorization.
* Methods which authenticates based only on client are implemented here.
*/
@Component
public class ClientAuthInterceptorImpl extends MultiTenantAuthInterceptor {
private static final Logger LOGGER = LoggerFactory.getLogger(ClientAuthInterceptorImpl.class);
@Autowired
public ClientAuthInterceptorImpl(CredentialStoreServiceClient credentialStoreServiceClient, TenantProfileClient tenantProfileClient, IdentityClient identityClient) {
super(credentialStoreServiceClient, tenantProfileClient, identityClient);
}
@Override
public <ReqT> ReqT intercept(String method, Metadata headers, ReqT reqT) {
if (method.equals("deleteUserProfile")) {
UserProfileRequest request = (UserProfileRequest) reqT;
AuthClaim claim = authorize(headers, request.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
return (ReqT) ((UserProfileRequest) reqT).toBuilder()
.setClientId(oauthId)
.setClientSecret(oauthSec)
.setTenantId(tenantId)
.build();
} else if (method.equals("registerUser")) {
RegisterUserRequest request = (RegisterUserRequest) reqT;
AuthClaim claim = authorize(headers, request.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
org.apache.custos.iam.service.RegisterUserRequest registerUserRequest =
((RegisterUserRequest) reqT).toBuilder()
.setTenantId(tenantId)
.setClientId(oauthId)
.setClientSec(oauthSec)
.build();
return (ReqT) registerUserRequest;
} else if (method.equals("enableUser") || method.equals("disableUser") ||
method.equals("isUserEnabled") || method.equals("isUsernameAvailable")) {
UserSearchRequest request = (UserSearchRequest) reqT;
AuthClaim claim = authorize(headers, request.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
UserSearchRequest info = ((UserSearchRequest) reqT)
.toBuilder()
.setClientId(oauthId)
.setClientSec(oauthSec)
.setTenantId(tenantId)
.build();
return (ReqT) info;
} else if (method.equals("getUserProfile")) {
UserProfileRequest req = (UserProfileRequest) reqT;
AuthClaim claim = authorize(headers, req.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
UserProfileRequest request = ((UserProfileRequest) reqT)
.toBuilder()
.setTenantId(tenantId).build();
return (ReqT) request;
} else if (method.equals("getAllUserProfilesInTenant")) {
UserProfileRequest req = (UserProfileRequest) reqT;
AuthClaim claim = authorize(headers, req.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
UserProfileRequest request = ((UserProfileRequest) reqT)
.toBuilder().setTenantId(tenantId).build();
return (ReqT) request;
} else if (method.equals("getUserProfileAuditTrails")) {
AuthClaim claim = authorize(headers);
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
GetUpdateAuditTrailRequest request = ((GetUpdateAuditTrailRequest) reqT)
.toBuilder()
.setTenantId(tenantId)
.build();
return (ReqT) request;
} else if (method.equals("resetPassword")) {
ResetUserPassword req = (ResetUserPassword) reqT;
AuthClaim claim = authorize(headers, req.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
ResetUserPassword request = ((ResetUserPassword) reqT)
.toBuilder()
.setClientId(oauthId)
.setClientSec(oauthSec)
.setTenantId(tenantId)
.build();
return (ReqT) request;
} else if (method.equals("getUser")) {
UserSearchRequest req = (UserSearchRequest) reqT;
AuthClaim claim = authorize(headers, req.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
UserSearchRequest request = ((UserSearchRequest) reqT)
.toBuilder()
.setClientId(oauthId)
.setTenantId(tenantId)
.setClientSec(oauthSec)
.build();
return (ReqT) request;
} else if (method.equals("findUsers")) {
FindUsersRequest req = (FindUsersRequest) reqT;
AuthClaim claim = authorize(headers, req.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
FindUsersRequest request = ((FindUsersRequest) reqT)
.toBuilder()
.setClientId(oauthId)
.setClientSec(oauthSec)
.setTenantId(tenantId).build();
return (ReqT) request;
} else if (method.equals("updateUserProfile")) {
UserProfileRequest userProfileRequest = (UserProfileRequest) reqT;
AuthClaim claim = authorize(headers, userProfileRequest.getClientId());
if (claim == null) {
throw new NotAuthorizedException("Request is not authorized", null);
}
String oauthId = claim.getIamAuthId();
String oauthSec = claim.getIamAuthSecret();
long tenantId = claim.getTenantId();
AuthToken token = getSAToken(claim.getIamAuthId(), claim.getIamAuthSecret(), claim.getTenantId());
if (token == null || token.getAccessToken() == null) {
throw new NotAuthorizedException("Request is not authorized SA token is invalid", null);
}
return (ReqT) ((UserProfileRequest) reqT).toBuilder()
.setAccessToken(token.getAccessToken())
.setTenantId(tenantId)
.setClientId(oauthId)
.setClientSecret(oauthSec)
.setPerformedBy(Constants.SYSTEM)
.build();
}
return reqT;
}
} |
import React, { useState } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import Table from '@material-ui/core/Table';
import TableBody from '@material-ui/core/TableBody';
import TableCell from '@material-ui/core/TableCell';
import TableContainer from '@material-ui/core/TableContainer';
import TableHead from '@material-ui/core/TableHead';
import TableRow from '@material-ui/core/TableRow';
import EditIcon from '@material-ui/icons/Edit';
import AddEditAssetDialog, { FormFields } from './AddEditAssetDialog';
import SettingsSection from '../../components/SettingsSection';
import IconButtonHeading from '../../components/IconButtonHeading';
import { changeAccountingCurrency, selectActiveAccountAccountingCurrency } from './accountsSlice';
function AccountMainSettings() {
const [accountingCurrencyDialogOpen, setAccountingCurrencyDialogOpen] = useState(false);
const dispatch = useDispatch();
const accountingCurrency = useSelector(selectActiveAccountAccountingCurrency);
const handleChangeAccountingCurrency = (fields: FormFields) => {
dispatch(changeAccountingCurrency({currency: fields}));
setAccountingCurrencyDialogOpen(false);
};
return (
<React.Fragment>
<SettingsSection>
<IconButtonHeading
variant="h6"
gutterBottom
title={'Accounting Currency'}
icon={<EditIcon fontSize="small" />}
onClick={() => setAccountingCurrencyDialogOpen(true)}
/>
<TableContainer>
<Table>
<TableHead>
<TableRow>
<TableCell>Name</TableCell>
<TableCell align="center">Ticker</TableCell>
<TableCell align="center">Precision</TableCell>
<TableCell align="center">Price Precision</TableCell>
<TableCell align="center">Currency</TableCell>
<TableCell align="center">Symbol</TableCell>
</TableRow>
</TableHead>
<TableBody>
<TableRow>
<TableCell>{ accountingCurrency?.name }</TableCell>
<TableCell align="center">{ accountingCurrency?.ticker }</TableCell>
<TableCell align="center">{ accountingCurrency?.precision }</TableCell>
<TableCell align="center">{ accountingCurrency?.pricePrecision }</TableCell>
<TableCell align="center">{ accountingCurrency?.isCurrency ? 'Yes' : 'No' }</TableCell>
<TableCell align="center">{ accountingCurrency?.symbol }</TableCell>
</TableRow>
</TableBody>
</Table>
</TableContainer>
</SettingsSection>
<AddEditAssetDialog
open={accountingCurrencyDialogOpen}
onDialogClose={() => setAccountingCurrencyDialogOpen(false)}
asset={accountingCurrency}
onSubmit={handleChangeAccountingCurrency}
/>
</React.Fragment>
);
}
export default AccountMainSettings; |
def line2dict(inline):
result = {}
if inline[0:2] == 'x ':
result['done'] = True
possible_date = inline.split()[1]
else:
result['done'] = False
return result |
<reponame>fgrid/iso20022<filename>ClosingDate1Choice.go
package iso20022
// Choice between a date or a code.
type ClosingDate1Choice struct {
// Closing date is defined as a choice between a date or a date and time format.
Date *DateAndDateTimeChoice `xml:"Dt"`
// Closing date is defined using a code or data source scheme.
Code *Date2Choice `xml:"Cd"`
}
func (c *ClosingDate1Choice) AddDate() *DateAndDateTimeChoice {
c.Date = new(DateAndDateTimeChoice)
return c.Date
}
func (c *ClosingDate1Choice) AddCode() *Date2Choice {
c.Code = new(Date2Choice)
return c.Code
}
|
/**
* Set the customerVirtualNetwork property: The name of virtual network to which Azure-SSIS integration runtime will
* join.
*
* @param customerVirtualNetwork the customerVirtualNetwork value to set.
* @return the ManagedIntegrationRuntimeTypeProperties object itself.
*/
public ManagedIntegrationRuntimeTypeProperties withCustomerVirtualNetwork(
IntegrationRuntimeCustomerVirtualNetwork customerVirtualNetwork) {
this.customerVirtualNetwork = customerVirtualNetwork;
return this;
} |
def prepare_update_properties(self, json_snippet):
p = Properties(self.properties_schema,
json_snippet.get('Properties', {}),
self._resolve_runtime_data,
self.name,
self.context)
props = dict((k, v) for k, v in p.items()
if p.props.get(k).schema.update_allowed)
return props |
/**
* Displays TCP details of a packet, assuming the packet is an TCP
*
* TCPPacket pkt - received packet
*/
void printTCPDetails(TCPPacket pkt) {
System.out.println("Protocol Info");
System.out.print(" Proto=TCP ");
System.out.print(" SrcIPAddr=" + pkt.src_ip.getHostAddress() + " ");
System.out.print(" DstIPAddr=" + pkt.dst_ip.getHostAddress() + " ");
System.out.print(" SrcPort=" + pkt.src_port + " ");
System.out.print(" DstPort=" + pkt.dst_port + " ");
System.out.println("");
} |
package errors
import "github.com/gofrs/uuid"
// Is return true if err is an error value created by one of the constructor
// of this package and c has the same string representation (value returned by
// the String method) and the same message (value returned by the Message
// method) to the error code, otherwise it returns false.
func Is(err error, c Code) bool {
var derr, ok = err.(derror)
if !ok {
return false
}
return derr.c.String() == c.String() && derr.c.Message() == c.Message()
}
// GetCode returns the Code of the err and true; if err isn't created by any of
// the constructors of this package, false is returned and Code value can be
// ignored.
func GetCode(err error) (Code, bool) {
var derr, ok = err.(derror)
if !ok {
return nil, false
}
return derr.c, true
}
// GetID returns the ID of the err and true; if err isn't created by any of the
// constructors of this package, false is returned and ID value can be ignored.
func GetID(err error) (uuid.UUID, bool) {
var derr, ok = err.(derror)
if !ok {
return uuid.UUID{}, false
}
return derr.id, true
}
|
class NetworkRecovery:
"""Generate a disaster and recovery object for storing simulation-related information and settings."""
def __init__(self, network, sim_step):
self.base_network = network
self.network = copy.deepcopy(self.base_network)
self.sim_step = sim_step
def schedule_recovery(self, repair_order):
if len(repair_order) > 0:
# Schedule component performance at the start and disruption of the simulation.
self.initiate_schedule_tables()
for _, row in self.network.disruptive_events.iterrows():
component = row["component"]
time_stamp = row["time_stamp"]
fail_perc = row["fail_perc"]
self.add_functional_state(component, 0)
self.add_disrupted_state(component, time_stamp, fail_perc)
if component.startswith("T_"):
self.fail_transpo_link(component)
# update transportation link flows and costs only if there is any change to transportation network due to the event
disrupted_infra_dict = self.network.get_disrupted_infra_dict()
if len(disrupted_infra_dict["transpo"]) > 0:
self.update_traffic_model()
self.transpo_updated_model_dict[
self.network.disruption_time
] = copy.deepcopy(self.network.tn)
def add_functional_state(self, component, time_stamp):
"""Add a row to the event table for a component that is functional or complered repair."""
self.event_table = self.event_table.append(
{
"time_stamp": time_stamp,
"components": component,
"perf_level": 100,
"component_state": "Functional",
},
ignore_index=True,
)
def add_disrupted_state(self, component, time_stamp, impact_level):
"""Add a row to the event table for a component that is disrupted."""
self.event_table = self.event_table.append(
{
"time_stamp": time_stamp,
"components": component,
"perf_level": 100 - impact_level,
"component_state": "Service Disrupted",
},
ignore_index=True,
)
def fail_transpo_link(self, link_compon):
"""Fails the given transportation link by changing the free-flow travel time to a very large value.
Args:
link_compon (string): Name of the transportation link.
"""
self.network.tn.link[link_compon].freeFlowTime = 9999
def initiate_schedule_tables(self):
columns_list = [
"time_stamp",
"component",
"perf_level",
"component_state",
"crew_id",
]
self.event_table = pd.DataFrame(columns=columns_list)
column_list_et_short = [
"component",
"disrupt_level",
"repair_start",
"functional_start",
]
self.event_table_wide = pd.DataFrame(columns=column_list_et_short) |
Rayleigh–Taylor instability in strongly coupled plasma
Rayleigh–Taylor instability (RTI) is the prominent energy mixing mechanism when heavy fluid lies on top of light fluid under the gravity. In this work, the RTI is studied in strongly coupled plasmas using two-dimensional molecular dynamics simulations. The motivation is to understand the evolution of the instability with the increasing correlation (Coulomb coupling) that happens when the average Coulombic potential energy becomes comparable to the average thermal energy. We report the suppression of the RTI due to a decrease in growth rate with increasing coupling strength. The caging effect is expected a physical mechanism for the growth suppression observed in both the exponential and the quadratic growth regimes. We also report that the increase in shielding due to background charges increases the growth rate of the instability. Moreover, the increase in the Atwood number, an entity to quantify the density gradient, shows the enhancement of the growth of the instability. The dispersion relation obtained from the molecular dynamics simulation of strongly coupled plasma shows a slight growth enhancement compared to the hydrodynamic viscous fluid. The RTI and its eventual impact on turbulent mixing can be significant in energy dumping mechanisms in inertial confinement fusion where, during the compressed phases, the coupling strength approaches unity.
Rayleigh-Taylor instability in strongly coupled plasma
Rauoof Wani 1,2 , Ajaz Mir 1,2 , Farida Batool 1 & Sanat Tiwari 1* Rayleigh-Taylor instability (RTI) is the prominent energy mixing mechanism when heavy fluid lies on top of light fluid under the gravity. In this work, the RTI is studied in strongly coupled plasmas using two-dimensional molecular dynamics simulations. The motivation is to understand the evolution of the instability with the increasing correlation (Coulomb coupling) that happens when the average Coulombic potential energy becomes comparable to the average thermal energy. We report the suppression of the RTI due to a decrease in growth rate with increasing coupling strength. The caging effect is expected a physical mechanism for the growth suppression observed in both the exponential and the quadratic growth regimes. We also report that the increase in shielding due to background charges increases the growth rate of the instability. Moreover, the increase in the Atwood number, an entity to quantify the density gradient, shows the enhancement of the growth of the instability. The dispersion relation obtained from the molecular dynamics simulation of strongly coupled plasma shows a slight growth enhancement compared to the hydrodynamic viscous fluid. The RTI and its eventual impact on turbulent mixing can be significant in energy dumping mechanisms in inertial confinement fusion where, during the compressed phases, the coupling strength approaches unity.
Rayleigh-Taylor instability (RTI) 1,2 occurs in a fluid system in which a heavier fluid (density, ρ h ) lies on top of a lighter fluid (density, ρ l ) under the effect of the gravity 3,4 . As it evolves, the modes at the fluid interface grow in amplitude, forming bubbles that rise due to buoyancy and spikes, which fall due to the gravity, eventually leading to turbulent mixing 5 . The instability is a primary mixing mechanism in supernovae explosions 6,7 , solar corona 8 , volcanic eruptions 9 , tokamaks 10 , Bose-Einstein condensate (BEC) 11,12 , paramagnetic fluids 13,14 , laser generated high-energy-density (HED) plasmas 15,16 , and inertial confinement fusion (ICF) 17,18 covering multiple orders of length scales. Usually, hydrodynamic models explain the RTI for fluids, whether neutral or charged, using the Navier-Stokes (NS) model without or with Maxwell's set of equations. This paper focuses on RTI growth and its nonlinear evolution in strongly coupled plasmas (SCP). Under strong inter-particle correlations, these plasmas reflect visco-elastic nature that can not appropriately be represented using the standard hydrodynamic model. Also, kinetic effects become significant enough to influence the continuum effects in such scenarios. We employ a classical two-dimensional (2D) molecular dynamics (MD) model to study the growth and mixing properties of RTI. The work highlights the impact of strong inter-particle correlations and includes contributions from all scales, including thermal fluctuations.
In the recent past, MD simulations have been carried out at a microscopic level to study several hydrodynamic instabilities such as Kelvin-Helmholtz instability (KHI) 19,20 , RTI 21-23 , Rayleigh-Bénard instability 24 , and bumpon-tail (BOT) instability 25 . Kadau et al. 21 first carried out a three-dimensional (3D) MD simulation for RTI in Lennard-Jones (LJ) fluids. Their results, in general, matched with linear stability analysis of the Navier-Stokes model and paved the way to explore mixing at microscopic scales. Further, Ding et al. 23 carried out RTI studies for Ar/He interfaces through LJ pairwise interactions. The work suggested the considerable difference in the formation and evolution of spikes at the microscopic level to the macroscopic scale. It also showed the detached droplet formation due to the thermal fluctuations. In both the works mentioned above, the focus was primarily on the role of microscopic fluctuations. Our focus is towards systems comprising a large number of charged particles, where dynamics is governed by the Coulomb force. As surrounding charges shield each charged particle, the effective pairwise potential takes the form of Yukawa/Debye-Hückel interaction potential given by 26 (1) Here r ij is the distance between the ith and jth particles, q is the charge on each particle and D is the Debye screening length. The Yukawa/Debye-Hückel fluids in nature include soft-matter systems e.g., charged colloids 27,28 and concentrated protein systems 29 , strongly coupled plasmas e.g., quark-gluon plasma 30,31 , and dusty plasma 32 , and many ionic-liquids 33,34 . Using the potential of the form Eq. (1), we have modelled RTI in SCPs. With known appropriate pairwise interaction, the MD provides the most fundamental and comprehensive picture of a system's micro-and macroscopic dynamical process. The advantage of MD is that it is based on the fundamental nature of the forces. Physical processes such as shear thinning 26 and negative entropy production 35 having their origin at fluctuations in natural fluids are missed in the most hydrodynamic models. The micro-scale fluctuations captured by MD allow us to probe the emergence of macroscopic hydrodynamic quantities as the averages of these micro-scale fluctuations inherently. Moreover, the purpose of carrying out MD is to overcome one of the limitations of hydrodynamic models in incorporating strong inter-particle correlation effects. Correlations let the viscous liquids reflect solid-like properties usually characterised as a family of visco-elastic fluids. Electrolytes, ionic liquids, and plasmas are the charged liquids that belong to this family, where individual particles interact via Yukawa/Debye-Hückel interaction potential. Especially plasmas, in extreme conditions (high charge on particles, extremely low temperatures, or at high density), reflect solid-like properties and also show the presence of transverse shear waves. Their solid-like reflection can be quantified through the coupling parameter, Ŵ as: The coupling strength is defined as the ratio of average Coulomb potential energy E p and average thermal kinetic energy E k . Here a = (πn) −1/2 is the average inter-particle separation or Wigner-Seitz radius, and n is the areal number density. T is the temperature of particles. The Yukawa nature is quantified by screening parameter κ = a/ D . The limit κ → 0 represents a pure Coulomb system, while the limit κ → ∞ represents the hard-sphere like interactions. Two dimensionless parameters characterize the thermodynamic and transport properties of Yukawa one-component plasmas; the Coulomb coupling strength Ŵ and inverse Debye screening length κ 36,37 . We found the enhanced correlations (i.e., the increase in Ŵ ) suppress the RTI in Yukawa fluids. This result is supported by the findings of Das et al. 38 and Avinash et al. 39 that propose the reduced growth of instability with increasing coupling strength using a phenomenological generalized hydrodynamic (GHD) visco-elastic model. Their results from the GHD model suggest a decrease in growth rate as γ = gkA t − η/τ m k 2 . Here, η and τ m are the viscosity and relaxation time, respectively, and both depend on the coupling strength Ŵ . In weakly coupled plasma, the growth rate attains the standard incompressible hydrodynamic limit gkA t 38,39 . Also, g is the acceleration due to the gravity, A t = (ρ h − ρ l )/(ρ h + ρ l ) is the Atwood number to quantify the density gradient and k is the wave vector of the excited mode. The penetration depths of spikes H S (t) into the light fluid and bubbles H B (t) into the heavy fluid are usually governed quadratically in time H S,B = γ q S ,q B A t gt 2 using the inherent inviscid NS model. The quadratic growth rate of spike γ q S and bubble γ q B is found to be dependent and independent of the variation of the Atwood number A t respectively using the continuum NS model as well as the LJ atomistic simulations 40 . It isnoteworthythat the viscosity for suchstrongly correlated mediums acts to support elasticity rather than playing a viscous damping role. We have also studied the effect of potential shielding over the instability explicitly through parameter κ . The shielding was found to increase the diffusive nature of the medium and the growth rate of RTI. Both these observations may be seen as a development of RTI on an effectively lower value of coupling strength which may be approximated as Ŵ * = Ŵ exp (−κ) 41 .
While results in this paper are generalized and represent any fluid with pairwise Yukawa interactions, our terminologies and approach are inclined towards strongly coupled plasmas 42,43 . The SCPs include dusty plasma 44 , ultracold plasmas and dense plasmas 49 depending on charge, temperature and density as factors responsible for strong correlations. In all three forms of SCPs, the interaction in bulk plasma is represented by shielded Coulomb potential. Our results could interest the inertial confinement fusion community as RTI is an unavoidable mixing mechanism in the ICF process. During the ICF process, the plasma has been claimed to be reaching close to moderate coupling strengths 50,51 . In such coupling regimes, the RTI growth should be lower than that predicted by hydrodynamics models.
Results
RTI: natural growth through maximally growing mode. We let the equilibrated system evolve naturally after removing the partition between heavy (top) and light (bottom) fluids as in Fig. 1a. Initially, the instability grows from thermal fluctuations at the interface. The insets in Fig. 1b,c show the early stage growth of modes growing from fluctuations. Quickly, the maximally growing mode dominates due to the higher growth rate and becomes visible. At later stages of the evolution, the maximally growing mode (of the chosen system) typically corresponding to k ≈ 4k 0 is visible in subplots Fig. 1d,e at times t = 4000 and t = 5000 ω −1 pl respectively. Here k 0 = (2π)/L x is the fundamental mode at the interface in x-dimension. We observe the growth of mushroom clouds over the interface, a characteristic feature of RTI. The bubbles of lighter fluid (in blue) can be seen moving upwards against the gravity, and the spike of the heavier fluid (in green) penetrates, the lighter fluid downwards along the direction of gravity. A slight compression of the light fluid layer due to the early-stage free fall-like motion of the heavy fluid is visible during evolution from 0 to 500 ω −1 pl . But the same has no significant impact on characteristic RTI features. We have brought this effect to a negligible level in the rest of the simulations by adjusting the system dimension as L y = 10L x . We will remain confined to the same system configuration throughout the manuscript.
As the focus of present studies is to compare the growth rate dependence on various physical parameters of the strongly coupled plasma, RTI growing via natural modes is not suitable. Individual modes cannot be tackled precisely as they grow simultaneously in a naturally evolving system. As the prime motive is to observe the effect www.nature.com/scientificreports/ of Ŵ , κ and A t on the growth, it is useful to fix the initial perturbation on individual mode dominantly. It also helps in capturing the early exponential dynamical regime. The early exponential growth (in linear regime) is hard to analyse. Moreover, it takes longer to explicitly see the maximally growing mode if the system evolves naturally. Thus, we will artificially perturb the system at the interface for the rest of the paper.
RTI: growth through single-mode perturbation.
To observe single-mode evolution, we perturb the interface with the mode k 0 = (2π)/L x that fit the system along x-direction. The sinusoidal perturbation added to the velocity of particles in a region of a few average inter-particle thicknesses at the interface as shown in Fig. 8b ("Methods"). The form of the velocity perturbation is v y = v thermal y + ξ 0 cos (k x x) with ξ 0 = 1.5v thermal y and k x = k 0 . It takes about 10 ω −1 pl for velocity perturbation to reflect in particle positions and hence in the density profile. Figure 2 shows the excitation of single-mode through perturbation and its evolution due to the RTI. Subplot Fig. 2b clearly shows a growing sinusoidal perturbation with wavelength corresponding to system width L x . In time, as shown in subplots Fig. 2b-i, the sinusoidal perturbation grows with heavy fluid penetrating within the light fluid as a spike that eventually forms the mushroom structure at later stages. Simultaneously, the bubble of the light fluid grows upward into the heavy fluid. Though, the growth of the bubble is usually slow compared to the spike growth and attributed to the A t . For larger values of A t , the free-fall of the spike is expected, leading to higher growth of spike compared to the bubble. Figure 3A shows the spike penetration ( H s = H s (0) − H s (t) ) within the light fluid for the system at coupling strength Ŵ = 10 . Here H s (0) is the position of unperturbed interface i.e., L y /2 and H s (t) is the position of the tip of spike in time. We have recorded the spike amplitude evolution for 6000 ω −1 pl s as shown in blue line with square marker. It broadly passes through the three dynamical stages, (1) exponential growth (green-colored), (2) spike and bubble formation with secondary KHI (red-colored) and (3) the nonlinear saturation leading to the turbulent mixing (black-colored) of RTI. In the first region (inset plots from (a) to (d)), the amplitude of the spike grows exponentially as per the linear stability analysis. An incompressible, inviscid hydrodynamic model 3 suggests the RTI growth rate as γ e = gA t k x . Though in the present scenario, the viscosity and solid-like properties also play a significant role in deciding the growth rate. In the second region (inset plots (e) to (h)), the sinusoidal www.nature.com/scientificreports/ perturbation evolves nonlinearly into bubbles and spikes of lighter and heavier fluids, respectively. The secondary KHI that develops due to the shear velocity between two penetrating fluids gives rise to the formation of mushroom clouds. Finally, in the third region (inset plots (i) to (l)), the nonlinear saturation of the instability is observed as the spike amplitude almost stops growing. During this stage, turbulent mixing occurs, distributing the energy associated with excited/perturbed mode to smaller scales up to kinetic levels.
We understand that being a kinetic simulation, there will be statistical fluctuations in the growth of RTI for different replicas of the same ensemble. It is computationally expensive to carry out multiple simulations with different initial particle configurations. Though, to provide an idea of the possible statistical error, we have attempted multiple simulations for a single case as in Fig. 3B,C. Each simulation starts with a different arrangement of position and velocity but the same Ŵ = 10 , κ = 0.1 and A t = 0.7 . They all follow the same trend in growth, suggesting statistical error to be very small and have the least impact on RTI growth. This suggests that the statistically different replicas of systems do not impact the growth rate of the RTI. We have also plotted the error bar over the average growth rate obtained in each replica and finally best fitted to get the growth rate in each regime. Further, kinetic simulations make it challenging to demarcate a clear separation of linear growth, quadratic growth, and full nonlinear evolution. Though our best fits are a good representation in giving the message that the growth rate decreases with the increase in coupling strength Ŵ without any ambiguity, as shown in Fig. 4. pl s, we observed the spike amplitude growth following the quadratic dependence over the time H S (t) = γ q A t gt 2 in Fig. 3C. The exponential regime has a higher growth rate due to the abundance of free energy available at the early evolution stage in the system. During the quadratic evolution stage, the growth rate decreases because the instability is heading towards the nonlinear saturation stage due to the exhaustion of free energy available in the system. Though we have not explicitly calculated the free energy changes in the simulation, we understand that due to complex nature of nonlinearity not all the available free energy might get exhausted at the saturation stage.
Effect of Coulomb coupling strength (Ŵ) on the growth of single-mode RTI. Here, we estimate the growth rate of RTI at three different values of coupling strengths Ŵ = 10 , 50 and Ŵ = 100 . Figure 4 shows the time evolution of instability in single-mode from subplots (a) to (i). Each row represents a different value of Ŵ increasing from top to bottom. The snapshots of spike amplitude at different times clearly show the decrease in the growth rate of RTI as Ŵ increases from 10 to 100. The exponential γ e and quadratic γ q growth rates for different coupling strengths have been calculated. We found the exponential growth rate γ e and the quadratic growth www.nature.com/scientificreports/ rate γ q both decreasing with the increase in coupling strength. Figure 5a shows the exponential growth rate for single-mode RTI at the early evolution stage that indicates a reduction in the growth rate with increasing Ŵ . Figure 5b shows the quadratic growth of single-mode RTI at a later evolution stage that also indicates a decrease in growth with an increase in Ŵ . The solid traits get prominent with increasing coupling strength Ŵ . Thus, the RTI growth rate reduces as the medium attains more and more solid-like properties. Figure 5c shows the RTI growth rate variations with coupling strength for single-mode perturbation. The exponential growth rate reduction with Ŵ is significant compared to the quadratic growth rate.
Effect of screening parameter (κ) on the growth of single-mode RTI. In plasmas and electrolytes, the Coulombic interaction between charges is shielded by surrounding charged particles leading to effective Debye-Hückel interaction among themselves. The shielding parameter κ reflects the effective range over which a single charge's electric field is felt. Hence, the change in κ also impacts the effective coupling strength of medium that has been quantified as Ŵ ⋆ = Ŵ exp (−κ) 41 and later was improvised to Ŵ ⋆ = Ŵ(1 + κ + κ 2 /2) exp (−κ) 52 .
In both interpretations of Ŵ ⋆ , the correlation gets weak with the increase in κ . We observe the increase in the growth rate of instability as κ increases in molecular dynamics simulation of RTI. Figure 6a shows the evolution of RTI for three different values of screening parameter κ = 0.1, 1 and κ = 2 . It is visible at early times, in the exponential growth regime, the growth rate γ e shows a slight enhancement with increasing κ . The differences in growth rate are small and well within the statistical error range. At later times, in the quadratic growth regime, the spike amplitude shows a significant increase in height ( H S ) with the increasing κ . Table 1(left) lists out the calculated values γ e and γ q for three κ values. As κ varies 0.1 → 2 , the growth rate significantly increase from 0.105 → 0.127 in the quadratic regime. We also observe the reduced and diffused mushroom-cloud formation at the tip of the spike with increasing shielding. This can be interpreted as the decrease in the effective coupling, leading to increased diffusivity.
Effect of Atwood number (A t ) on the growth of single-mode RTI. The Atwood number A t reflects
the density contrast of heavy and light fluids. In the standard hydrodynamics, linear analysis suggests squareroot dependence between the exponential regime growth rate and the Atwood number. Also, at higher values of A t , spike penetration is significantly larger compared to the bubble rise due to the free fall. For SCPs, Fig. 6b shows the RTI growth for three values of A t = 0.3, 0.5 and A t = 0.7 from top to bottom rows. Corresponding growth rates in exponential, γ e and quadratic regime, γ q are shown in Table 1(right). We found the growth rate increasing with A t in both growth regimes. We also observe that the mushroom-cloud vortices are prominent
Discussion
With the increase in coupling strength Ŵ , the plasma shows traits of solid. It remains in an intermediate state with coexisting features of fluid and solid before reaching a critical value Ŵ c at which the complete crystallization occurs. We have focused on this intermediate paradigm for a one-component shielded plasma, keeping the range of Ŵ between 10 and 100. The main result is the suppression of the RTI growth rate with increasing coupling strength Ŵ and may have many physical explanations. A possible physical mechanism is that the charged particles experience an increasing caging effect 53 . Now, any collective mode or dynamics has to invest more energy in taking particles out of their inertia under the confining potential. One may also expect the slow down against the increase in viscosity with coupling strength in the kinetic regime. We have validated our explanation based on this approach by calculating the hydrodynamic and kinetic viscosity domains' growth rate and observed a consistent growth reduction from Ŵ = 1 to Ŵ = 100 values. To further support our explanation indirectly is to look into the effect of κ on the growth rate. With increase in κ (i.e., with decrease in effective coupling Ŵ ⋆ ), the growth rate increase. Our results regarding the suppression of RTI growth rate are also supported by the GHD model 38,54 . The visco-elastic GHD fluid model and molecular dynamics are the two useful approaches to analyze the collective dynamics in the intermediate coupling regime. For SCPs, the GHD model is useful in explaining the 55,57 , coherent structures in strongly coupled plasmas 58 , and dynamic properties of SCPs 59 in the linear dynamical regime. In nonlinear regime the phenomenological GHD model has also predicted the recurrences of KHI 20,56 , elastic-turbulence 60,61 and cusp like structures 62 , which need a quantitative experimental and other simulation support. However, this model relies on MD simulations for transport coefficients for nonlinear dynamical studies. On the other hand, we attempt a realistic SCP model that includes all transport properties using a molecular dynamics simulation approach. MD also covers physics involvement through all possible scales from fluctuations to system size. The MD has been a realistic representative of SCPs 42-49 for providing a better insight into collective processes when computation power is no more a restriction. Our MD simulations suggest that the strongly coupled plasma under intermediate states supports the fluid instability. This indicates that a quantitative fluid model, different from hydrodynamics and inline to GHD models, may be developed. Our results support the findings of the GHD model on the suppression of RTI in the linear regime. We further found that the suppression of growth rate with Ŵ is also visible in the quadratic nonlinear dynamic regime. The MD results on suppression of RTI growth rate with Ŵ is qualitatively supported by the GHD model predictions in the linear growth regime as in Das et al. 38 and Avinash et al. 39 . We have not made any growth rate comparison in the nonlinear growth regime of dynamics.
We study the growth of modes at different scales in the system through the dispersion relation. Once we established a way to calculate growth for single-mode, we extended our studies up to six modes in the system. We have given six perturbations each individually with k x = nk 0 , where n = 1, 2, . . . , 6 is the mode number (see Supplementary 2 for k x = 2 and 3 modes). Each time, we have calculated the growth rate in the exponential and the quadratic regimes. Using the exponential growth rate data for different k values we draw the dispersion relation as shown in Fig. 7. We also plotted the dispersion relation of inviscid and viscous fluid for the comparison study. For the latter case, the value of viscosity is taken from MD simulation results in the literature 26 . As in our system configuration, the viscosity value is different from top to bottom region, and a mean value is used for fluid dispersion relation calculation. Figure 7 suggests that the hydrodynamic inviscid growth rate is higher compared to both the viscous as well as SCP. This is due to the lack of a damping mechanism in an inviscid fluid. However, the growth rate for the SCP is obtained to be higher than the pure viscous fluid. We do not have any definite understanding of the possible physical reason behind such difference. One possible reason can be thought of that some part of the viscous contribution is now being used towards the solid-like nature. We provide a probable qualitative explanation of why the RTI growth rate falls within the limits of inviscid and viscous fluids in the strong coupling. The explanation is based on the phenomenological GHD model. For the limit, τ m → 0 , the GHD model represents a viscous fluid. In the opposite limit of τ m → ∞ , the growth rate from Das et al. 38 and Avinash et al. 39 can be referred as γ = gk x A t − η/τ m k 2 x . We see the growth rate will lead towards the inviscid hydrodynamic limit as τ m → ∞ . Thus, we may consider the growth rate values within viscous and inviscid limits for any strong coupling intermediate parameter regime. A similar result has earlier been reported for KHI using the GHD model 63 . For a given set of Ŵ, κ , the growth dispersion relation for MD seems first to increase, reach an optimal value and then decrease. For Ŵ = 10, κ = 0.1 , this optimal value reaches at k x a ∼ 0.75 . The possible reason may be that the viscous effect is dominant over the growth of RTI compared to the low wavenumber regime for high wavenumbers. The GHD model does not quantitatively predict this optimal wavenumber. Though, qualitatively, the GHD model-based RTI growth rate γ = gk x A t − η/τ m k 2 x Figure 7. Comparison of dispersion relation of RTI for inviscid (blue stars), viscous (red squares) and Yukawa (black circles) fluids. Inviscid-hydrodynamic growth rate 3 is γ e = gA t k x and viscous-hydrodynamic growth rate (Eq. (8) in Ding et al. 23 ) is γ e = ν 2 k 4 x + gA t k x − νk 2 x , with shear viscosity ν taken from Donkó et al. 26 . Yukawa fluid growth rate is obtained from MD simulations for Ŵ = 10 , κ = 0.1 , and A t = 0.7. www.nature.com/scientificreports/ does predict an optimal growth rate as we increase the wavenumber. Brown et al. 50 and Lyon et al. 51 suggest that the plasma accesses the moderate coupling regime during the ICF process, a stage appears when density is large enough, the temperature has not raised enough . Also, during the ICF process, the strong density gradient at the spherical capsule interface is prone to RTI when lasers squeeze the capsule from all directions. Suppression of RTI can be advantageous in such a parameter regime. This scenario is represented as strongly coupled and shielded ions in the present model. However, we must caution that our results include only electrostatic physics. Perturbations on specific modes at the interface in our studies can be visualized in line with the askew interface created by finite laser beam assembly. While present results guide the importance of strong coupling over the ICF process, full-scale modelling with spherical geometry, appropriate density difference, and acceleration caused by the laser assembly can provide a qualitative picture. Present studies can be an excellent test-bed to explore turbulent characteristics with RTI as a seed for nonlinear mixing. Direct particle-based modelling eliminates any grid-dependent scaling associated with fluid models. It is also a generalized approach that helps understand linear and nonlinear fluid processes lacking a quantitative fluid representation. Most rheological fluids fall under this category where fluid behaviour is far from Navier-Stokes governing dynamics. It would be interesting to know if Kolmogorov scales get altered for SCPs or how closely the turbulent scaling follows elastic turbulence features at low Reynolds number flows. Further, the kinetic simulations will help check the heating rate of the medium during the mixing as the energy eventually gets lost in the form of temperature. Finally, it will be worth comparing the computational cost incurred for kinetic and fluid models to visualize equivalent turbulence features. In the present work, we explicitly observe the development and progress of RTI in SCPs (or other representative Yukawa fluids) at different coupling strengths. A few open questions such as the effect of compressibility, roles of surface tension, and Reynolds number are under exploration and will be reported elsewhere. Compressibility could be a possible cause of sedimentation at high acceleration values. For now, we have significantly minimized it by reducing the acceleration due to gravity. We have tested the RTI for different gravity values to establish the elimination of sedimentation before choosing a value. A comprehensive study of sedimentation in Yukawa fluids is carried out by Charan et al. 22 who reported asymmetry effect arising due to gravitation in the lighter fluid. We also studied the effect of dimensionality on presented growth rate values. We found that the effect of the coupling strength on the RTI growth in 3D simulations is also of a suppressing nature and is qualitatively similar to what we reported in 2D simulation results. To get an idea of how different can be the effect of strong coupling in a 3D system compared to the present 2D system, we have carried out a few 3D simulations keeping most of the features of plasma the same and only changing the 2D slab ( L y = 10L x ) to a 3D beam with ( L y = 10L x = 10L z ). We changed the initial perturbation from a line sinusoidal perturbation to a similar form of sheet perturbation in 3D simulations. While the growth rates for the 2D and 3D cases are different, the impact of coupling strength is the same, i.e., the growth of instability decreases with the increase in the coupling strength ( Figure S1 in the Supplementary). Being molecular dynamics studies governed by electrostatic Coulomb potential and classical equation of motion, electromagnetic effects can't be incorporated into our model. Such plasma modelling with the significant role of the self-consistently generated magnetic field and kinetic model is required and is done using Particle-In-Cell (PIC) simulations. However, PIC simulations are not suitable for explicitly looking into the strong coupling effects due to the small-angle collision approximation. Thus for motives of understanding strong coupling effects, molecular dynamics is an excellent simulation tool at the expense of heavy computation. Further, while our results represent all classical strongly coupled plasmas, the choice of the parameters is specific to dusty plasmas. For such systems, the time scales are very slow, and the velocity of heavy dust particles is slow enough that a self-generated magnetic field is insignificant for such a physical scenario. Also, to externally magnetize such a medium, an enormous magnetic field of about 4-10 Tesla is required, available only at a few facilities worldwide. The external magnetic field can be modelled in MD simulations by modifying the Velocity-Verlet algorithm. The same we will extend as future scope of our work.
Methods
The classical molecular dynamics simulation is carried out using open-source Large-scale Atomic/Molecular Massively Parallel Simulator (LAMMPS) package 67 for a system in which particles interact through repulsive Debye-Hückel/Yukawa potential as given by Eq. (1). The particle trajectories r i (t) are obtained by integrating the equation of motion mr i = −∇ φ ij . The form of interaction potential and the charge on particle is same for all particles in the system. A two-dimensional rectangular system in the x-y plane is configured, keeping periodic and reflecting boundaries in x and y directions, respectively. Throughout the paper, we have followed one particular system dimension (i.e., rectangular system except in Fig. 1) to keep the wave-number associated with single-mode perturbation identical. Thus, while comparing growth rates, we could focus on the effect on one parameter from Ŵ , A t and κ at a time. Though the results are generalized and can be produced for any system dimension that can reflect collective dynamics. The system is divided into two regions top (high-density fluid ρ h ) and bottom (low-density fluid ρ l ), separated by a reflecting interface at the middle in the y-direction. The ρ h and ρ l can be expressed in terms of number density and mass through the relation ρ s = m s n s with s = h, l . In this work, the number density of both species is kept the same i.e., n h = n l = n and the mass density has been changed through varying the mass of top and bottom fluids. The advantage of keeping number densities the same for heavy and light fluids is that the complete system remains at one Ŵ value in the initial configuration. This configuration helps us understand the effect of coupling strength on instability growth. Thus, the Atwood number, A t depends on the difference of masses of both species. In simulations, we choose a value of A t , fix the mass ( m l ) of the light fluid and then calculate the mass of the heavy fluid using m h = m l (1 + A t )/(1 − A t ) . A t ranges from 0 ( ρ h = ρ l ) to 1 ( ρ h >> ρ l ). The parameters used for the simulation are tabulated in Table 2. While the simulation parameters look unusual for hydrodynamic fluids and plasmas, they are a typical for any laboratory dusty plasma experiment. 68,69 where each heavy dust grain acquires large charge. Also, such dusty plasma experiments are often carried out in microgravity conditions and zero-gravity flight experiments where gravity values are close to what has been adopted in the present work. The system lengths and timescales have been normalized in terms of average inter-particle separation a = (nπ) −1/2 and plasma period ω −1 pl of light species. While the simulation results apply to any liquid with Yukawa form of inter-particle interaction potential, the normalization of timescales is motivated by its plasma representation where ω pl = (n l q 2 /ǫ 0 m l 2a) is the characteristic plasma frequency of the light fluid. Figure 8a shows the initial two-fluid system configuration with step mass density profile at the interface in the y-direction. The gravity is in the negative y-direction. Particles of heavy and light masses are created randomly and homogeneously in top and bottom regions, respectively. Both density regions (i.e., top and bottom in Fig. 8) have been independently equilibrated using Nosé-Hoover 70,71 thermostat for 400 ω −1 pl , enough time for both regions to attain the required temperature hence the coupling strength. Further, we detached the thermostats and let the system evolve under an NVE ensemble condition for the next 400 ω −1 pl . During this phase, we observed no heating, a reflection of a naturally equilibrated system. At this stage, the system is ready for RTI studies. Under the NVE conditions, we remove the interface between heavy and light fluids under gravity and let the instability evolve. A maximally growing mode will appear unstable from natural perturbations. To study the single mode or double mode instability growth specifically, we apply weak artificial perturbation as shown in Fig. 8b,c. Mass of lighter species, m l 6.9 × 10 −13 kg 6.9 × 10 −13 kg Acceleration due to gravity, g 10 −4 g Earth ; g Earth = 9.81m/s 2 10 −4 g Earth ; g Earth = 9.81m/s 2 |
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"strconv"
)
// addCmd represents the add command
var addCmd = &cobra.Command{
Use: "add",
Short: "Add two numbers and display the results",
Long: `Add two numbers and display the results.
It will take two parameters only numerical value else it will throw error`,
Run: func(cmd *cobra.Command, args []string) {
addTwoNumbersAndDisplay(cmd)
},
// You can add PreRunE check on flags value or type etc
PreRunE: func(cmd *cobra.Command, args []string) error {
return checkIfValueAreNotLarge(cmd)
},
}
func init() {
rootCmd.AddCommand(addCmd)
addCmd.PersistentFlags().IntP("ValueA", "A", 0, "-A <some value>")
addCmd.PersistentFlags().IntP("ValueB", "B", 0, "-B <some value>")
// Tell cli that these are mandatory params.
addCmd.MarkPersistentFlagRequired("ValueA")
addCmd.MarkPersistentFlagRequired("ValueB")
err := addCmd.PreRunE
if err != nil {
fmt.Println("Error: ", err)
return
}
}
func addTwoNumbersAndDisplay(cmd *cobra.Command) {
a,_ := strconv.Atoi(cmd.Flag("ValueA").Value.String())
b,_ := strconv.Atoi(cmd.Flag("ValueB").Value.String())
fmt.Println("Sum: ", a+b)
}
func checkIfValueAreNotLarge(cmd *cobra.Command) error {
a,_ := strconv.Atoi(cmd.Flag("ValueA").Value.String())
if a > 1000 {
return fmt.Errorf("Too large value of flag: %s : %d", "ValueA", a)
}
b,_ := strconv.Atoi(cmd.Flag("ValueB").Value.String())
if b > 1000 {
return fmt.Errorf("Too large value of flag: %s : %d", "ValueB", b)
}
return nil
} |
U.S. evicting Point Reyes oyster farmer U.S. closing decades-old operation to restore Drakes Bay wilderness
Drakes Bay Oyster Co. employees Francisco Manzo (left) and Alonzo Olea process the shellfish on federal property. Drakes Bay Oyster Co. employees Francisco Manzo (left) and Alonzo Olea process the shellfish on federal property. Photo: Michael Macor, The Chronicle Photo: Michael Macor, The Chronicle Image 1 of / 38 Caption Close U.S. evicting Point Reyes oyster farmer 1 / 38 Back to Gallery
U.S. Interior Secretary Ken Salazar told a popular oyster farm at Drakes Bay on Thursday to pack up and leave, effectively ending more than a century of shellfish harvesting on the picturesque inlet where Europeans first set foot in California.
Salazar's decision ends a long-running dispute between the Drakes Bay Oyster Co. and the National Park Service over the estuary at Point Reyes National Seashore where Sir Francis Drake landed more than 400 years ago.
The National Park Service intends to turn the 2,700-acre area into the first federally designated marine wilderness area on the West Coast, giving the estuary special protected status as an unaltered ecological region. To do that, Salazar rejected the oyster company's proposal to extend its 40-year lease to harvest shellfish on 1,100 acres of the property.
Salazar gave the farm 90 days to move out, issuing his decision a day before the lease was set to expire and one week after visiting the Point Reyes National Seashore for a tour.
"After careful consideration of the applicable law and policy, I have directed the National Park Service to allow the permit for the Drakes Bay Oyster Co. to expire at the end of its current term and to return the Drakes Estero to the state of wilderness that Congress designated for it in 1976," Salazar said in a statement. "I believe it is the right decision for Point Reyes National Seashore and for future generations who will enjoy this treasured landscape."
The estuary, known as Drakes Estero, is home to tens of thousands of endangered birds, including 90 species, and the largest seal colony on the coast. It is within the boundaries of the national seashore, which is visited by 2 million people a year, providing $85 million in economic activity and 1,000 jobs to surrounding communities, according to park officials.
Salazar had the option to extend the lease for 10 years after Sen. Dianne Feinstein, D-Calif., included the provision in a rider on an appropriations bill.
Owner shocked
Kevin Lunny, a local rancher who bought the shellfish operation from Johnson Oyster Co. in 2004, said he was shocked when he got a call directly from Salazar on Thursday morning telling him that the 40-year occupancy agreement would not be renewed.
"It's disbelief and excruciating sorrow," he said of the mood at the oyster farm, where 30 people are employed, including seven families that live on the property.
"There are 30 people, all in tears this morning, who are going to lose their jobs and their homes," Lunny said. "They are experts in seafood handling and processing in the last oyster cannery in California, and there is nowhere for them to go."
Many local conservationists were nevertheless overjoyed. Congressional representatives, including Rep. Lynn Woolsey, D-Petaluma, former Park Service employees, the Sierra Club, Natural Resources Defense Council, the Wilderness Society and the Marin Audubon Society applauded the decision.
"A heartfelt salute to Secretary Salazar for his wisdom and statesmanship in choosing long-term public good over short-term private interests," said Sylvia Earle, a local environmentalist and the former chief scientist at the National Oceanic and Atmospheric Administration. "Protecting Drakes Estero, America's only West Coast marine wilderness park, will restore health and hope for the ocean and for the interests of all of the people of this country."
Impact on supply
The decision to shut down the shellfish operation and establish a marine wilderness will have a major impact in rural west Marin County, where many consider the oysters from Drakes Bay a delicacy. The vast coastal area is home to 15 historic dairy farms and cattle ranches, sheepherders and organic farmers who live and work next to, and in some cases on, National Park Service land.
The oyster farm has been in business for nearly 80 years. It is California's largest commercial shellfish operation, producing 460,000 pounds of shucked oysters a year, an amount the proprietor says is almost 40 percent of all the oysters harvested in California. It far outstrips the production of growers in nearby Tomales Bay.
Salazar, who is a strong supporter of sustainable agriculture, promised to maintain the seashore's ranching and farming heritage, directing Park Service officials to pursue extensions of agriculture permits from 10 to 20 years within the seashore's pastoral zone, but the promise did little to calm the many shellfish lovers along the coast.
Wade Childress, 59, of San Anselmo, was among the afternoon crowd who stopped by the Drakes Bay oyster shack after news spread that the doors would soon close. Childress said he came to the shack as a boy to eat oysters with his parents and later took his daughter for a tradition they called "seafood day."
Oyster lovers shocked
"I'm mourning right now," Childress said.
Other customers called it a travesty perpetrated by the government.
"This is a good organic food source in our backyard," said Sarah Cane, 48, of San Rafael. "We can co-exist. A department head in Washington, D.C., shouldn't be able to tell this community it can't eat oysters."
There were still unanswered questions as Lunny, his son, Sean, and daughter, Brigid, tried to comfort longtime customers. One was what Lunny is expected to do with the millions of oysters that are still in plastic grow bags in the bay, many of which won't reach market size for another two years. The order requires him to immediately begin bringing them onshore.
"We've got 5 to 10 million juvenile oysters out there," Lunny said. "So what do we do with these oysters, just kill them all? That would be forcing us to destroy the entire inventory, which has incredible financial consequences."
Wilderness advocates said Lunny knew when he bought the oyster farm that the lease was going to expire and should have prepared.
"This isn't about an oyster company, for us," said Neal Desai, the associate director of the National Parks Conservation Association. "This is about taking care of our national parks for future generations and honoring a decades-old agreement to protect our heritage and create a marine wilderness. Letting the lease expire, removing all the motorboats and removing all the non-native oysters is good for the environment."
Powerful supporters
Lunny's request for an extension had powerful supporters, including Feinstein, Marin County Supervisor Steve Kinsey and former Peninsula Rep. Pete McCloskey, who put up a major fight to keep the operation going.
Park officials had long contended that the oyster company was harming the ecosystem, but Lunny's supporters accused them of selectively presenting information, misrepresenting facts and essentially fudging data in an effort to oust the oyster company.
The complaints gained momentum when the National Academy of Sciences, and the Interior Department's office of the solicitor found major flaws in Park Service reports, including what they termed mistake-ridden and, in some cases, biased work by park scientists.
"I am extremely disappointed," Feinstein said Thursday in a statement. "The National Park Service's review process has been flawed from the beginning with false and misleading science, which was also used in the Environmental Impact Statement. The secretary's decision effectively puts this historic California oyster farm out of business. As a result, the farm will be forced to cease operations and 30 Californians will lose their jobs."
Salazar ordered the Park Service to help the oyster company remove property, oysters and racks from the estuary and assist oyster company employees in relocating and finding jobs and employment training.
"We are taking the final step to recognize this pristine area as wilderness," Salazar said. "The estero is one of our nation's crown jewels, and today we are fulfilling the vision to protect this special place for generations to come." |
/**
* Constructs and signs outgoing URLs using the JWT protocol.
* See {@link JwtService} and {@link JwtAuthorizationGenerator} for more details.
*/
public class JwtSigningRemotablePluginAccessor extends DefaultRemotablePluginAccessorBase {
private final ConsumerService consumerService;
private final ConnectApplinkManager connectApplinkManager;
private final JwtAuthorizationGenerator authorizationGenerator;
public JwtSigningRemotablePluginAccessor(ConnectAddonBean addon,
Supplier<URI> baseUrlSupplier,
JwtJsonBuilderFactory jwtBuilderFactory,
JwtService jwtService,
ConsumerService consumerService,
ConnectApplinkManager connectApplinkManager,
HttpContentRetriever httpContentRetriever) {
super(addon.getKey(), addon.getName(), baseUrlSupplier, httpContentRetriever);
this.consumerService = consumerService;
this.connectApplinkManager = connectApplinkManager;
this.authorizationGenerator = new JwtAuthorizationGenerator(jwtService, jwtBuilderFactory, sharedSecretSupplier(getAppLink()), consumerService, URI.create(addon.getBaseUrl()));
}
@Override
public String signGetUrl(URI targetPath, Map<String, String[]> params) {
assertThatTargetPathAndParamsDoNotDuplicateParams(targetPath, params);
String encodedJwt = authorizationGenerator.encodeJwt(HttpMethod.GET, targetPath, getBaseUrl(), params, consumerService.getConsumer().getKey(), requireSharedSecret(getAppLink()));
final UriBuilder uriBuilder = new UriBuilder(Uri.fromJavaUri(URI.create(createGetUrl(targetPath, params))));
uriBuilder.addQueryParameter(JwtConstants.JWT_PARAM_NAME, encodedJwt);
return uriBuilder.toString();
}
@Override
public String createGetUrl(URI targetPath, Map<String, String[]> params) {
assertThatTargetPathAndParamsDoNotDuplicateParams(targetPath, params);
return super.createGetUrl(targetPath, params);
}
@Override
public AuthorizationGenerator getAuthorizationGenerator() {
return authorizationGenerator;
}
private ApplicationLink getAppLink() {
return this.connectApplinkManager.getAppLink(getKey());
}
private static Supplier<String> sharedSecretSupplier(final ApplicationLink applicationLink) {
return () -> requireSharedSecret(applicationLink);
}
private static String requireSharedSecret(ApplicationLink applicationLink) {
String sharedSecret = (String) applicationLink.getProperty(SHARED_SECRET_PROPERTY_NAME);
if (sharedSecret == null) {
throw new NotAJwtPeerException(applicationLink);
}
return sharedSecret;
}
} |
/**
* @author liujinkun
* @Title: MyFactoryBean
* @Description: TODO
* @date 2019/7/1 5:40 PM
*/
@Component
public class MyFactoryBean implements FactoryBean<IndexBean> {
@Override
public IndexBean getObject() throws Exception {
return new IndexBean();
}
@Override
public Class<?> getObjectType() {
return IndexBean.class;
}
} |
def p_photoInit(p):
p[0] = photoClip(image=p[3], duration=p[5]) |
<reponame>zhangjie201412/Smart<filename>libs/mosquitto-1.4.14/src/db_dump/db_dump.c
/*
Copyright (c) 2010-2012 <NAME> <<EMAIL>>
All rights reserved. This program and the accompanying materials
are made available under the terms of the Eclipse Public License v1.0
and Eclipse Distribution License v1.0 which accompany this distribution.
The Eclipse Public License is available at
http://www.eclipse.org/legal/epl-v10.html
and the Eclipse Distribution License is available at
http://www.eclipse.org/org/documents/edl-v10.php.
Contributors:
<NAME> - initial implementation and documentation.
*/
#include <arpa/inet.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <time.h>
#include <mosquitto_broker.h>
#include <memory_mosq.h>
#include <persist.h>
static uint32_t db_version;
static int stats = 0;
static int _db_client_chunk_restore(struct mosquitto_db *db, FILE *db_fd)
{
uint16_t i16temp, slen, last_mid;
char *client_id = NULL;
int rc = 0;
time_t disconnect_t;
read_e(db_fd, &i16temp, sizeof(uint16_t));
slen = ntohs(i16temp);
if(!slen){
fprintf(stderr, "Error: Corrupt persistent database.");
fclose(db_fd);
return 1;
}
client_id = calloc(slen+1, sizeof(char));
if(!client_id){
fclose(db_fd);
fprintf(stderr, "Error: Out of memory.");
return 1;
}
read_e(db_fd, client_id, slen);
if(!stats) printf("\tClient ID: %s\n", client_id);
read_e(db_fd, &i16temp, sizeof(uint16_t));
last_mid = ntohs(i16temp);
if(!stats) printf("\tLast MID: %d\n", last_mid);
if(db_version == 2){
disconnect_t = time(NULL);
}else{
read_e(db_fd, &disconnect_t, sizeof(time_t));
if(!stats) printf("\tDisconnect time: %ld\n", disconnect_t);
}
free(client_id);
return rc;
error:
fprintf(stderr, "Error: %s.", strerror(errno));
if(db_fd >= 0) fclose(db_fd);
if(client_id) free(client_id);
return 1;
}
static int _db_client_msg_chunk_restore(struct mosquitto_db *db, FILE *db_fd)
{
dbid_t i64temp, store_id;
uint16_t i16temp, slen, mid;
uint8_t qos, retain, direction, state, dup;
char *client_id = NULL;
read_e(db_fd, &i16temp, sizeof(uint16_t));
slen = ntohs(i16temp);
if(!slen){
fprintf(stderr, "Error: Corrupt persistent database.");
fclose(db_fd);
return 1;
}
client_id = calloc(slen+1, sizeof(char));
if(!client_id){
fclose(db_fd);
fprintf(stderr, "Error: Out of memory.");
return 1;
}
read_e(db_fd, client_id, slen);
if(!stats) printf("\tClient ID: %s\n", client_id);
read_e(db_fd, &i64temp, sizeof(dbid_t));
store_id = i64temp;
if(!stats) printf("\tStore ID: %ld\n", (long )store_id);
read_e(db_fd, &i16temp, sizeof(uint16_t));
mid = ntohs(i16temp);
if(!stats) printf("\tMID: %d\n", mid);
read_e(db_fd, &qos, sizeof(uint8_t));
if(!stats) printf("\tQoS: %d\n", qos);
read_e(db_fd, &retain, sizeof(uint8_t));
if(!stats) printf("\tRetain: %d\n", retain);
read_e(db_fd, &direction, sizeof(uint8_t));
if(!stats) printf("\tDirection: %d\n", direction);
read_e(db_fd, &state, sizeof(uint8_t));
if(!stats) printf("\tState: %d\n", state);
read_e(db_fd, &dup, sizeof(uint8_t));
if(!stats) printf("\tDup: %d\n", dup);
free(client_id);
return 0;
error:
fprintf(stderr, "Error: %s.", strerror(errno));
if(db_fd >= 0) fclose(db_fd);
if(client_id) free(client_id);
return 1;
}
static int _db_msg_store_chunk_restore(struct mosquitto_db *db, FILE *db_fd)
{
dbid_t i64temp, store_id;
uint32_t i32temp, payloadlen;
uint16_t i16temp, slen, source_mid, mid;
uint8_t qos, retain, *payload = NULL;
char *source_id = NULL;
char *topic = NULL;
int rc = 0;
bool binary;
int i;
read_e(db_fd, &i64temp, sizeof(dbid_t));
store_id = i64temp;
if(!stats) printf("\tStore ID: %ld\n", (long)store_id);
read_e(db_fd, &i16temp, sizeof(uint16_t));
slen = ntohs(i16temp);
if(slen){
source_id = calloc(slen+1, sizeof(char));
if(!source_id){
fclose(db_fd);
fprintf(stderr, "Error: Out of memory.");
return 1;
}
if(fread(source_id, 1, slen, db_fd) != slen){
fprintf(stderr, "Error: %s.", strerror(errno));
fclose(db_fd);
free(source_id);
return 1;
}
if(!stats) printf("\tSource ID: %s\n", source_id);
free(source_id);
}
read_e(db_fd, &i16temp, sizeof(uint16_t));
source_mid = ntohs(i16temp);
if(!stats) printf("\tSource MID: %d\n", source_mid);
read_e(db_fd, &i16temp, sizeof(uint16_t));
mid = ntohs(i16temp);
if(!stats) printf("\tMID: %d\n", mid);
read_e(db_fd, &i16temp, sizeof(uint16_t));
slen = ntohs(i16temp);
if(slen){
topic = calloc(slen+1, sizeof(char));
if(!topic){
fclose(db_fd);
free(source_id);
fprintf(stderr, "Error: Out of memory.");
return 1;
}
if(fread(topic, 1, slen, db_fd) != slen){
fprintf(stderr, "Error: %s.", strerror(errno));
fclose(db_fd);
free(source_id);
free(topic);
return 1;
}
if(!stats) printf("\tTopic: %s\n", topic);
free(topic);
}else{
fprintf(stderr, "Error: Invalid msg_store chunk when restoring persistent database.");
fclose(db_fd);
free(source_id);
return 1;
}
read_e(db_fd, &qos, sizeof(uint8_t));
if(!stats) printf("\tQoS: %d\n", qos);
read_e(db_fd, &retain, sizeof(uint8_t));
if(!stats) printf("\tRetain: %d\n", retain);
read_e(db_fd, &i32temp, sizeof(uint32_t));
payloadlen = ntohl(i32temp);
if(!stats) printf("\tPayload Length: %d\n", payloadlen);
if(payloadlen){
payload = malloc(payloadlen+1);
if(!payload){
fclose(db_fd);
free(source_id);
free(topic);
fprintf(stderr, "Error: Out of memory.");
return 1;
}
memset(payload, 0, payloadlen+1);
if(fread(payload, 1, payloadlen, db_fd) != payloadlen){
fprintf(stderr, "Error: %s.", strerror(errno));
fclose(db_fd);
free(source_id);
free(topic);
free(payload);
return 1;
}
binary = false;
for(i=0; i<payloadlen; i++){
if(payload[i] == 0) binary = true;
}
if(binary == false && payloadlen<256){
if(!stats) printf("\tPayload: %s\n", payload);
}
free(payload);
}
return rc;
error:
fprintf(stderr, "Error: %s.", strerror(errno));
if(db_fd >= 0) fclose(db_fd);
if(source_id) free(source_id);
if(topic) free(topic);
return 1;
}
static int _db_retain_chunk_restore(struct mosquitto_db *db, FILE *db_fd)
{
dbid_t i64temp, store_id;
if(fread(&i64temp, sizeof(dbid_t), 1, db_fd) != 1){
fprintf(stderr, "Error: %s.", strerror(errno));
fclose(db_fd);
return 1;
}
store_id = i64temp;
if(!stats) printf("\tStore ID: %ld\n", (long int)store_id);
return 0;
}
static int _db_sub_chunk_restore(struct mosquitto_db *db, FILE *db_fd)
{
uint16_t i16temp, slen;
uint8_t qos;
char *client_id;
char *topic;
int rc = 0;
read_e(db_fd, &i16temp, sizeof(uint16_t));
slen = ntohs(i16temp);
client_id = calloc(slen+1, sizeof(char));
if(!client_id){
fclose(db_fd);
fprintf(stderr, "Error: Out of memory.");
return 1;
}
read_e(db_fd, client_id, slen);
if(!stats) printf("\tClient ID: %s\n", client_id);
read_e(db_fd, &i16temp, sizeof(uint16_t));
slen = ntohs(i16temp);
topic = calloc(slen+1, sizeof(char));
if(!topic){
fclose(db_fd);
fprintf(stderr, "Error: Out of memory.");
free(client_id);
return 1;
}
read_e(db_fd, topic, slen);
if(!stats) printf("\tTopic: %s\n", topic);
read_e(db_fd, &qos, sizeof(uint8_t));
if(!stats) printf("\tQoS: %d\n", qos);
free(client_id);
free(topic);
return rc;
error:
fprintf(stderr, "Error: %s.", strerror(errno));
if(db_fd >= 0) fclose(db_fd);
return 1;
}
int main(int argc, char *argv[])
{
FILE *fd;
char header[15];
int rc = 0;
uint32_t crc;
dbid_t i64temp;
uint32_t i32temp, length;
uint16_t i16temp, chunk;
uint8_t i8temp;
ssize_t rlen;
struct mosquitto_db db;
char *filename;
long cfg_count = 0;
long msg_store_count = 0;
long client_msg_count = 0;
long retain_count = 0;
long sub_count = 0;
long client_count = 0;
if(argc == 2){
filename = argv[1];
}else if(argc == 3 && !strcmp(argv[1], "--stats")){
stats = 1;
filename = argv[2];
}else{
fprintf(stderr, "Usage: db_dump [--stats] <mosquitto db filename>\n");
return 1;
}
memset(&db, 0, sizeof(struct mosquitto_db));
fd = fopen(filename, "rb");
if(!fd) return 0;
read_e(fd, &header, 15);
if(!memcmp(header, magic, 15)){
if(!stats) printf("Mosquitto DB dump\n");
// Restore DB as normal
read_e(fd, &crc, sizeof(uint32_t));
if(!stats) printf("CRC: %d\n", crc);
read_e(fd, &i32temp, sizeof(uint32_t));
db_version = ntohl(i32temp);
if(!stats) printf("DB version: %d\n", db_version);
while(rlen = fread(&i16temp, sizeof(uint16_t), 1, fd), rlen == 1){
chunk = ntohs(i16temp);
read_e(fd, &i32temp, sizeof(uint32_t));
length = ntohl(i32temp);
switch(chunk){
case DB_CHUNK_CFG:
cfg_count++;
if(!stats) printf("DB_CHUNK_CFG:\n");
if(!stats) printf("\tLength: %d\n", length);
read_e(fd, &i8temp, sizeof(uint8_t)); // shutdown
if(!stats) printf("\tShutdown: %d\n", i8temp);
read_e(fd, &i8temp, sizeof(uint8_t)); // sizeof(dbid_t)
if(!stats) printf("\tDB ID size: %d\n", i8temp);
if(i8temp != sizeof(dbid_t)){
fprintf(stderr, "Error: Incompatible database configuration (dbid size is %d bytes, expected %ld)",
i8temp, sizeof(dbid_t));
fclose(fd);
return 1;
}
read_e(fd, &i64temp, sizeof(dbid_t));
if(!stats) printf("\tLast DB ID: %ld\n", (long)i64temp);
break;
case DB_CHUNK_MSG_STORE:
msg_store_count++;
if(!stats) printf("DB_CHUNK_MSG_STORE:\n");
if(!stats) printf("\tLength: %d\n", length);
if(_db_msg_store_chunk_restore(&db, fd)) return 1;
break;
case DB_CHUNK_CLIENT_MSG:
client_msg_count++;
if(!stats) printf("DB_CHUNK_CLIENT_MSG:\n");
if(!stats) printf("\tLength: %d\n", length);
if(_db_client_msg_chunk_restore(&db, fd)) return 1;
break;
case DB_CHUNK_RETAIN:
retain_count++;
if(!stats) printf("DB_CHUNK_RETAIN:\n");
if(!stats) printf("\tLength: %d\n", length);
if(_db_retain_chunk_restore(&db, fd)) return 1;
break;
case DB_CHUNK_SUB:
sub_count++;
if(!stats) printf("DB_CHUNK_SUB:\n");
if(!stats) printf("\tLength: %d\n", length);
if(_db_sub_chunk_restore(&db, fd)) return 1;
break;
case DB_CHUNK_CLIENT:
client_count++;
if(!stats) printf("DB_CHUNK_CLIENT:\n");
if(!stats) printf("\tLength: %d\n", length);
if(_db_client_chunk_restore(&db, fd)) return 1;
break;
default:
fprintf(stderr, "Warning: Unsupported chunk \"%d\" in persistent database file. Ignoring.", chunk);
fseek(fd, length, SEEK_CUR);
break;
}
}
if(rlen < 0) goto error;
}else{
fprintf(stderr, "Error: Unrecognised file format.");
rc = 1;
}
fclose(fd);
if(stats){
printf("DB_CHUNK_CFG: %ld\n", cfg_count);
printf("DB_CHUNK_MSG_STORE: %ld\n", msg_store_count);
printf("DB_CHUNK_CLIENT_MSG: %ld\n", client_msg_count);
printf("DB_CHUNK_RETAIN: %ld\n", retain_count);
printf("DB_CHUNK_SUB: %ld\n", sub_count);
printf("DB_CHUNK_CLIENT: %ld\n", client_count);
}
return rc;
error:
fprintf(stderr, "Error: %s.", strerror(errno));
if(fd >= 0) fclose(fd);
return 1;
}
|
/**
* Created with IntelliJ IDEA.
* User: sergiuchuckmisha
* Date: 9/16/15
* Time: 3:03 PM
* purpose of the class is to describe html elements on page
* https://exonum.com/demo/voting/#/elections/decrypted
* pageObject pattern is implemented
*/
public class DecryptedBallotPage implements IPage, INavigateToUrl, ITopMenu, IBottomMenu {
@Inject private DecryptConfirmationPopUpPage decryptConfirmationPopUpPage;
@Override
public String getUrl() {
return "https://exonum.com/demo/voting/#/elections/decrypted";
}
@Override
public boolean isOnPage() {
return isTopMenuNamePresent();
}
@Override
public String getTopMenuName() {
return "Your Decrypted Ballot";
}
@Override
@Deprecated
public void pressTopMenuBackArrow(){
//method should not be used because TopMenuBackArrow is absent on DecryptedBallotPage
}
public void pressReturnButton(){
DriverHelper.click(By.xpath("//div[@class='button button-link' and @ng-click='electionWizardReset()' and text() = 'RETURN']"));
}
public void pressTallyingAuthoritiesAggregatePublicKey(){
DriverHelper.click(By.xpath("//td[text() = 'Tallying authorities aggregate public key']"));
}
public void pressCandidateOptionSelectedAndEncryptionRandomness(){
DriverHelper.click(By.xpath("//td[text() = 'Candidate option selected and encryption randomness']"));
}
public void pressEncryptedBallot(){
DriverHelper.click(By.xpath("//td[text() = 'Encrypted ballot']"));
}
public void pressBallotSHA256Hash(){
DriverHelper.click(By.xpath("//td[text() = 'Ballot SHA-256 hash']"));
}
public void press3WordMemoByBIPMnemonicCodeAlgorithm(){
DriverHelper.click(By.xpath("//td[contains(text(), '3-word memo by BIP 0039 Mnemonic')]"));
}
@Override
public boolean defaultNavigateTo() {
if (isOnPage()) {
return true;
}
decryptConfirmationPopUpPage.defaultNavigateTo();
decryptConfirmationPopUpPage.pressDecryptBallotConfirmationPopUpButton();
return isOnPage();
}
} |
/**
* Unit tests for {@link Position}.
*
* @author lare96 <http://github.com/lare96>
*/
final class PositionTest {
@Test
void invalidX() {
assertThrows(IllegalArgumentException.class, () -> new Position(-1, 1, 1));
}
@Test
void invalidY() {
assertThrows(IllegalArgumentException.class, () -> new Position(1, -1, 1));
}
@Test
void invalidUpperZ() {
assertThrows(IllegalArgumentException.class, () -> new Position(1, 1, 4));
}
@Test
void invalidLowerZ() {
assertThrows(IllegalArgumentException.class, () -> new Position(1, 1, -1));
}
} |
import { Color, LocalSvelteComponent } from './shared';
export interface FormTextProps {
inline?: boolean;
color?: Color;
}
declare class FormText extends LocalSvelteComponent<FormTextProps> {}
export default FormText;
|
mod helpers;
mod structures;
use std::{
time::{Duration, SystemTime, UNIX_EPOCH},
env,
sync::Arc
};
use egg_mode::{
tweet::{retweet, mentions_timeline, like},
user::{relation_lookup, Connection}
};
use tokio::time::delay_for;
use dashmap::DashMap;
use once_cell::sync::Lazy;
use crate::structures::Config;
use rust_clock::Clock;
static TWEET_MAP: Lazy<Arc<DashMap<u64, u64>>> = Lazy::new(|| {
let tweet_map = DashMap::new();
Arc::new(tweet_map)
});
pub type BotResult<T> = Result<T, Box<dyn std::error::Error>>;
#[tokio::main]
async fn main() -> BotResult<()> {
pretty_env_logger::init();
let args: Vec<String> = env::args().collect();
if args.len() < 2 {
let first_err_string = "You need an info.json file!";
let second_err_string = "Please create one using the github sample and run the program with the path as an argument!";
eprintln!("{} \n{}", first_err_string, second_err_string);
return Ok(());
}
let config = Config::generate(&args[1]).await;
// Informs the user of various parameters
let mut rt_delay_clock = Clock::new();
rt_delay_clock.set_time_secs(config.rt_delay as i64);
let mut informative_string = String::new();
informative_string.push_str(&format!("Your retweet delay is currently: {} \n", rt_delay_clock.get_time()));
informative_string.push_str(&format!("With a page size of: {} \n\n", config.page_size));
informative_string.push_str(&format!("If you want to change these values, please edit them in info.json \n"));
println!("{} \n", informative_string);
tokio::spawn(async move {
delay_for(Duration::from_secs(43200)).await;
clear_cache().await
});
perform_retweet(&config).await?;
Ok(())
}
/*
* Fetch the mentions timeline and do two things
*
* 1. If the tweet is new, store the id along with a timestamp in the cache
* 2. Retweet the tweet
*
* Doing this prevents rate limiting when a user wants to poll the API every few seconds
*/
async fn perform_retweet(config: &Config) -> BotResult<()> {
loop {
let mentions = mentions_timeline(&config.token).with_page_size(config.page_size);
let (_mentions, feed) = match mentions.start().await {
Ok(resp) => resp,
Err(e) => {
println!("There was an error when fetching the timeline!: {} \nPlease open a github issue with this output!", e);
break;
}
};
for status in feed.iter() {
let start = SystemTime::now();
let since_epoch = start.duration_since(UNIX_EPOCH).expect("Time went backwards?").as_secs();
if let Some(guard) = TWEET_MAP.get(&status.id) {
if guard.value() < &since_epoch {
TWEET_MAP.remove(&status.id);
}
} else {
if status.in_reply_to_status_id.is_none() {
let other_user = status.user.as_ref().unwrap();
let lookup = relation_lookup([other_user.id].iter().cloned(), &config.token).await?;
if lookup[0].connections.iter().any(|c| matches!(c, &Connection::FollowedBy)) {
let _ = retweet(status.id, &config.token).await;
let _ = like(status.id, &config.token).await;
TWEET_MAP.insert(status.id, since_epoch + 3600);
}
}
}
}
delay_for(Duration::from_secs(config.rt_delay)).await;
}
Ok(())
}
/*
* If there are any remaining cached tweets, flush them out.
* This function is rarely used on smaller retweet accounts with longer delays
*/
async fn clear_cache() {
loop {
let start = SystemTime::now();
let since_epoch = start.duration_since(UNIX_EPOCH).expect("Time went backwards?").as_secs();
for guard in TWEET_MAP.iter() {
if guard.value() < &since_epoch {
TWEET_MAP.remove(guard.key());
}
}
delay_for(Duration::from_secs(43200)).await;
}
} |
.
PURPOSE
To judge the influence of consequent non-weight-bearing on the postoperative results, we reviewed 28 patients in whom we performed intertrochanteric osteotomies for idiopathic osteonecrosis of the femoral head during the years 1996 and 1997. Postoperatively 12 patients were supplied with a walking device for non-weight-bearing which was used for 1 year. 16 patients were mobilized by using 2 crutches for 15 kp partial weight-bearing until the osteotomy healed up and were then allowed to bear full weight.
METHOD
For clinical evaluation of the early results after about 18 month the Merle d'Aubigné hip score was used. Furthermore we analyzed pelvic radiographs for assessing the stage of the necrosis (Ficat staging) and the necrotic angle.
RESULTS
Overall 82% good and excellent results were achieved. Due to a longer period of non-weight-bearing the results were again clearly improved (92% versus 75% of patients with partial weight-bearing). It was noticed that there were discrepancies between good clinical results and the less satisfying radiographic findings.
CONCLUSIONS
Postoperative non-weight-bearing for about 1 year with the use of a walking device by patients with idiopathic osteonecrosis of the femoral head has clearly a positive effect on clinical results. |
/**
* An utils class that provides common used look-ups and other logic for querying the domain model and the rules model,
* that is used along the application.
* <p>
* // TODO: Some kind of cache to avoid frequently used lookups? Consider performance and memory, this class
* is shared on both server and client sides.
*/
@ApplicationScoped
public class CommonLookups {
private static Logger LOGGER = Logger.getLogger(CommonLookups.class.getName());
private final DefinitionUtils definitionUtils;
private final DefinitionLookupManager definitionLookupManager;
private final RuleManager ruleManager;
private final RuleLookupManager ruleLookupManager;
private final FactoryManager factoryManager;
protected CommonLookups() {
this(null,
null,
null,
null,
null);
}
@Inject
public CommonLookups(final DefinitionUtils definitionUtils,
final RuleManager ruleManager,
final DefinitionLookupManager definitionLookupManager,
final RuleLookupManager ruleLookupManager,
final FactoryManager factoryManager) {
this.definitionUtils = definitionUtils;
this.ruleManager = ruleManager;
this.definitionLookupManager = definitionLookupManager;
this.ruleLookupManager = ruleLookupManager;
this.factoryManager = factoryManager;
}
/**
* Returns the allowed edge definition identifiers that can be added as outgoing edges for the given source node.
*/
public <T> Set<String> getAllowedConnectors(final String defSetId,
final Node<? extends Definition<T>, Edge> sourceNode,
final int page,
final int pageSize) {
final Set<String> result = new LinkedHashSet<>();
if (null != defSetId && null != sourceNode) {
final T definition = sourceNode.getContent().getDefinition();
final Set<String> connectionAllowedEdges = getConnectionRulesAllowedEdges(defSetId,
definition,
page,
pageSize);
if (null != connectionAllowedEdges && !connectionAllowedEdges.isEmpty()) {
final RuleSet ruleSet = getRuleSet(defSetId);
connectionAllowedEdges.stream().forEach(allowedEdgeId -> {
final int edgeCount = countOutgoingEdges(sourceNode,
allowedEdgeId);
final boolean oeCardinalityAllowed = getDefinitionLabels(definition).stream()
.filter(role -> pass(ruleManager.evaluate(ruleSet,
RuleContextBuilder.DomainContexts.edgeCardinality(sourceNode.getLabels(),
allowedEdgeId,
edgeCount,
EdgeCardinalityContext.Direction.OUTGOING,
Optional.of(CardinalityContext.Operation.ADD)))))
.findAny()
.isPresent();
log(Level.FINEST,
"Outgoing edge cardinality rules evaluation - Result = [" + oeCardinalityAllowed + "]");
if (oeCardinalityAllowed) {
result.add(allowedEdgeId);
}
});
}
}
return result;
}
private RuleSet getRuleSet(final String defSetId) {
checkNotNull("defSetId",
defSetId);
final Object definitionSet = getDefinitionManager().definitionSets().getDefinitionSetById(defSetId);
return getDefinitionManager()
.adapters()
.registry()
.getDefinitionSetRuleAdapter(definitionSet.getClass())
.getRuleSet(definitionSet);
}
/**
* Returns the allowed definition identifiers that can be used as target node for the given source node and
* the given edge (connector) identifier.
* This method only returns the definition identifiers that are considered the default types for its morph type,
* it does NOT return all the identifiers for all the allowed target definitions.
* <p>
* TODO: Handle several result pages.
*/
public <T> Set<String> getAllowedMorphDefaultDefinitions(final String defSetId,
final Graph<?, ? extends Node> graph,
final Node<? extends Definition<T>, ? extends Edge> sourceNode,
final String edgeId,
final int page,
final int pageSize) {
final Set<Object> allowedDefinitions = getAllowedTargetDefinitions(defSetId,
graph,
sourceNode,
edgeId,
page,
pageSize);
log(Level.FINEST,
"Target definitions allowed " +
"for [" + sourceNode + "] and using the " +
"connector [" + edgeId + "] " +
"ARE [" + allowedDefinitions + "]");
final Set<String> result = new LinkedHashSet<>();
allowedDefinitions.stream().forEach(definition -> {
final String defId = getDefinitionManager().adapters().forDefinition().getId(definition);
final MorphDefinition morphDefinition = definitionUtils.getMorphDefinition(definition);
final boolean hasMorphBase = null != morphDefinition;
final String id = hasMorphBase ? morphDefinition.getDefault() : defId;
result.add(id);
});
log(Level.FINEST,
"Target definitions group by morph base type allowed " +
"for [" + sourceNode + "] and using the " +
"connector [" + edgeId + "] " +
"ARE [" + result + "]");
return result;
}
/**
* Returns the allowed definition identifiers that can be used as target node for the given source node and
* the given edge (connector) identifier.
* <p>
* TODO: Handle several result pages.
*/
@SuppressWarnings("unchecked")
public <T> Set<Object> getAllowedTargetDefinitions(final String defSetId,
final Graph<?, ? extends Node> graph,
final Node<? extends Definition<T>, ? extends Edge> sourceNode,
final String edgeId,
final int page,
final int pageSize) {
final Set<Object> result = new LinkedHashSet<>();
if (null != defSetId && null != graph && null != sourceNode && null != edgeId) {
final T definition = sourceNode.getContent().getDefinition();
final RuleSet ruleSet = getRuleSet(defSetId);
log(Level.FINEST,
"*** Checking the target definitions allowed " +
"for [" + definition + "] and using the " +
"connector [" + edgeId + "] ***");
// Check outgoing connectors cardinality for the source node ( plus the new one to be added ).
final int outConnectorsCount = countOutgoingEdges(sourceNode,
edgeId);
log(Level.FINEST,
"The source node has " + outConnectorsCount + "] outgoing connections.");
final RuleViolations oev =
ruleManager.evaluate(ruleSet,
RuleContextBuilder.DomainContexts.edgeCardinality(sourceNode.getLabels(),
edgeId,
outConnectorsCount,
EdgeCardinalityContext.Direction.OUTGOING,
Optional.of(CardinalityContext.Operation.ADD)));
final boolean oeCardinalityAllowed = pass(oev);
log(Level.FINEST,
"Outgoing edge cardinality rules evaluation " +
"result = [" + oeCardinalityAllowed + "]");
if (oeCardinalityAllowed) {
// Obtain allowed target roles that pass connection rules.
final Set<String> allowedConnectionRoles = getConnectionRulesAllowedTargets(defSetId,
definition,
edgeId,
page,
pageSize);
log(Level.FINEST,
"Allowed target roles that pass connection rules " +
"ARE [" + allowedConnectionRoles + "]");
if (null != allowedConnectionRoles) {
// Obtain a first set of candidate Defintiion identifiers.
final Set<String> allowedDefinitions = getDefinitions(defSetId,
allowedConnectionRoles);
log(Level.FINEST,
"Allowed target definitions that pass connection rules " +
"ARE [" + allowedConnectionRoles + "]");
if (null != allowedDefinitions) {
final Map<String, Integer> graphLabelCount = GraphUtils.getLabelsCount(graph,
allowedConnectionRoles);
final int inConnectorsCount = countIncomingEdges(sourceNode,
edgeId);
allowedDefinitions
.stream()
.forEach(defId -> {
final Object targetDefinition = createDefinition(defId);
if (null != targetDefinition) {
final Set<String> targetDefinitionRoles =
getDefinitionManager()
.adapters()
.forDefinition()
.getLabels(targetDefinition);
// Check cardinality for each of the roles for this potential target node.
final boolean hasCardinalityViolations = targetDefinitionRoles
.stream()
.filter(role -> {
final Integer roleCount = Optional.ofNullable(graphLabelCount.get(role)).orElse(0);
final RuleViolations violations =
ruleManager.evaluate(ruleSet,
RuleContextBuilder.DomainContexts.cardinality(Collections.singleton(role),
roleCount,
Optional.of(CardinalityContext.Operation.ADD)));
return !pass(violations);
})
.findFirst()
.isPresent();
log(Level.FINEST,
"Cardinality rules evaluation " +
"result = [" + hasCardinalityViolations + "]");
if (!hasCardinalityViolations) {
// Check incoming connector cardinality for each the target node.
final RuleViolations iev =
ruleManager.evaluate(ruleSet,
RuleContextBuilder.DomainContexts.edgeCardinality(Collections.singleton(defId),
edgeId,
inConnectorsCount,
EdgeCardinalityContext.Direction.INCOMING,
Optional.of(CardinalityContext.Operation.ADD)));
final boolean ieCardinalityAllowed = pass(iev);
log(Level.FINEST,
"Incoming edge cardinality rules evaluation " +
"result = [" + ieCardinalityAllowed + "]");
if (ieCardinalityAllowed) {
// This potential node can be used as target one, as it passes all rule checks.
result.add(targetDefinition);
}
}
}
});
return result;
}
}
}
}
return result;
}
/**
* Returns all the Definition Set's definition identifiers that contains the given labels.
* <p>
* TODO: Handle several result pages.
*/
private Set<String> getDefinitions(final String defSetId,
final Set<String> labels) {
if (null != labels && !labels.isEmpty()) {
final DefinitionLookupRequest request =
new DefinitionLookupRequest.Builder()
.definitionSetId(defSetId)
.labels(labels)
.page(0)
.pageSize(100)
.build();
final LookupManager.LookupResponse<DefinitionRepresentation> response = definitionLookupManager.lookup(request);
final List<DefinitionRepresentation> definitionRepresentations = response.getResults();
if (null != definitionRepresentations && !definitionRepresentations.isEmpty()) {
final Set<String> result = new LinkedHashSet<>();
for (final DefinitionRepresentation definitionRepresentation : definitionRepresentations) {
final String id = definitionRepresentation.getDefinitionId();
result.add(id);
}
return result;
}
}
return new HashSet<>(0);
}
/**
* Returns the allowed edge identifiers that satisfy connection rules for the given
* source definition.
* @oaram sourceDefinition The domain model object ( not a graph element ).
*/
private <T> Set<String> getConnectionRulesAllowedEdges(final String defSetId,
final T sourceDefinition,
final int page,
final int pageSize) {
final List<Rule> rules = lookupConnectionRules(defSetId,
sourceDefinition,
null,
page,
pageSize);
if (null != rules && !rules.isEmpty()) {
final Set<String> result = new LinkedHashSet<>();
for (final Rule rule : rules) {
final CanConnect cr = (CanConnect) rule;
final String edgeId = cr.getRole();
result.add(edgeId);
}
return result;
}
return null;
}
/**
* Returns the allowed ROLES that satisfy connection rules for a given source
* definition ( domain model object, not a node ).and the given edge (connector) identifier.
* <p>
* TODO: Handle several result pages.
*/
private <T> Set<String> getConnectionRulesAllowedTargets(final String defSetId,
final T sourceDefinition,
final String edgeId,
final int page,
final int pageSize) {
final List<Rule> rules = lookupConnectionRules(defSetId,
sourceDefinition,
edgeId,
page,
pageSize);
if (null != rules && !rules.isEmpty()) {
final Set<String> result = new LinkedHashSet<>();
final Set<String> sourceDefLabels = getDefinitionLabels(sourceDefinition);
for (final Rule rule : rules) {
final CanConnect cr = (CanConnect) rule;
final List<CanConnect.PermittedConnection> connections = cr.getPermittedConnections();
if (null != connections && !connections.isEmpty()) {
for (final CanConnect.PermittedConnection connection : connections) {
if (sourceDefLabels != null && sourceDefLabels.contains(connection.getStartRole())) {
result.add(connection.getEndRole());
}
}
}
}
return result;
}
return null;
}
private <T> List<Rule> lookupConnectionRules(final String defSetId,
final T sourceDefinition,
final String edgeId,
final int page,
final int pageSize) {
if (null != defSetId) {
final Set<String> defLabels = getDefinitionLabels(sourceDefinition);
final RuleLookupRequest.Builder builder = new RuleLookupRequest.Builder();
builder.definitionSetId(defSetId)
.type(RuleLookupRequest.Builder.RuleType.CONNECTION)
.from(defLabels)
.page(page)
.pageSize(pageSize);
if (null != edgeId) {
builder.id(edgeId);
}
final RuleLookupRequest request = builder.build();
final LookupManager.LookupResponse<Rule> response = ruleLookupManager.lookup(request);
return response.getResults();
}
return null;
}
private <T> int countIncomingEdges(final Node<? extends Definition<T>, ? extends Edge> sourceNode,
final String edgeId) {
final List<? extends Edge> edges = sourceNode.getInEdges();
return GraphUtils.countEdges(getDefinitionManager(),
edgeId,
edges);
}
private <T> int countOutgoingEdges(final Node<? extends Definition<T>, ? extends Edge> sourceNode,
final String edgeId) {
final List<? extends Edge> edges = sourceNode.getOutEdges();
return GraphUtils.countEdges(getDefinitionManager(),
edgeId,
edges);
}
private <T> Set<String> getDefinitionLabels(final T definition) {
return getDefinitionManager().adapters().forDefinition().getLabels(definition);
}
private boolean pass(final RuleViolations violations) {
return null == violations || !violations.violations(RuleViolation.Type.ERROR).iterator().hasNext();
}
private Object createDefinition(final String defId) {
// TODO: Avoid new instances here.
return factoryManager.newDefinition(defId);
}
private DefinitionManager getDefinitionManager() {
return definitionUtils.getDefinitionManager();
}
private void log(final Level level,
final String message) {
LOGGER.log(level,
message);
}
} |
def super_smoother(data, length):
ssf = []
for i, _ in enumerate(data):
if i < 2:
ssf.append(0)
else:
arg = 1.414 * 3.14159 / length
a_1 = math.exp(-arg)
b_1 = 2 * a_1 * math.cos(4.44/float(length))
c_2 = b_1
c_3 = -a_1 * a_1
c_1 = 1 - c_2 - c_3
ssf.append(c_1 * (data[i] + data[i-1]) / 2 + c_2 * ssf[i-1] + c_3 * ssf[i-2])
return ssf |
Maimonides and Spinoza as sources for Maimon's solution of the “problem quid juris” in Kant's theory of knowledge
Abstract Maimon once described the philosophical project underlying his Essay on Transcendental Philosophy as an attempt “to unify Kantian philosophy with Spinozism”. But in the only reference to Spinoza in the Essay, he stresses that Spinoza was not the source of his argument. In this paper I will argue that, notwithstanding the disclaimer, Maimon's solution for the problems that in his view haunted Kant's theory of knowledge was indeed significantly influenced by Spinoza, as well as by the medieval Jewish Aristotelian Maimonides. Since the key concept in the solution proposed by Maimon is the metaphysical doctrine of the “infinite intellect”, my focus will be on clarifying how this doctrine is related to Maimonides' doctrine of the divine intellect and to Spinoza's doctrine of Deus sive Natura. My main contention is that important aspects of Maimon's doctrine of the “infinite intellect” are based on a Spinozistic interpretation of Maimonides' doctrine of the divine intellect. |
import {
BadRequestException,
Controller,
Get,
Param,
ParseIntPipe,
} from '@nestjs/common';
import { CustomNotFoundExceptionFixture } from './exceptions/custom-not-found.exception.fixture';
/**
* Controller.
*/
@Controller('test')
export class AppControllerFixture {
@Get('unknown')
getError(): void {
throw new Error();
}
@Get('bad-request')
getErrorBadRequest(): void {
throw new BadRequestException();
}
@Get(':id')
getErrorNotFound(@Param('id', ParseIntPipe) id: number): void {
throw new CustomNotFoundExceptionFixture(id);
}
}
|
///< Returns the length of the quaternion squared (prevents a sqrt)
kmScalar kmQuaternionLengthSq(const kmQuaternion* pIn)
{
return pIn->x * pIn->x + pIn->y * pIn->y +
pIn->z * pIn->z + pIn->w * pIn->w;
} |
def _get_text_plain(self, msg):
tp = None
if not msg.is_multipart():
if msg.get_content_type() == 'text/plain':
tp = msg
else:
return u"No text/plain found"
else:
for p in msg.walk():
if p.get_content_type() == 'text/plain':
tp = p
break
if tp:
return self.__decode_email_body(tp.get_payload(), tp)
else:
return u"No text/plain found" |
def has_unique_names(self):
atom_ids = []
for i in range(self.natoms):
atom_ids.append(str(self.mol_data['resnum'][i]) +
self.mol_data['atomname'][i])
counter = collections.Counter(atom_ids)
if any(t > 1 for t in list(counter.values())):
return False
return True |
class DB_Functions:
"""Functions to be called from the Menu."""
def __init__(self, db_name):
self.conn = sqlite3.connect(db_name)
self.c = self.conn.cursor() |
CRT-BIoV: A Cognitive Radio Technique for Blockchain-Enabled Internet of Vehicles
Cognitive Radio Network (CRN) is considered as a viable solution on Internet of Vehicle (IoV) where objects equipped with cognition make decisions intelligently through the understanding of both social and physical worlds. However, the spectrum availability and data sharing/transferring among vehicles are critical improving services and driving safety metrics where the presence of Malicious Devices (MD) further degrade the network performance. Recently, a blockchain technique in CRN-based IoV has been introduced to prevent data alteration from these MD and allowing the vehicles to track both legal and illegal activities in the network. In this paper, we provide the security to IoV during spectrum sensing and information transmission using CRN by sensing the channels through a decision-making technique known as Technique for Order Preference by Similarity to the Ideal Solution (TOPSIS), a technique that evokes the trust of its Cognitive Users (CU) by analyzing certain predefined attributes. Further, blockchain is maintained in the network to trace every activity of stored information. The proposed mechanism is validated rigorously against several security metrics using various spectrum sensing and security parameters against a baseline solution in IoV. Extensive simulations suggest that our proposed mechanism is approximately 70% more efficient in terms of malicious nodes identification and DoS threat against the baseline mechanism. |
def conv_net(self, input_shape, output_channels, convolutional_layers,
connected_layers):
tf.reset_default_graph()
x = neural_net_image_input(input_shape)
self.x = x
keep_prob_variable = neural_net_keep_prob_input()
self.keep_prob_variable = keep_prob_variable
conv_layers = make_convolutional_layers(x, convolutional_layers,
keep_prob_variable)
flattened_tensor = flatten(conv_layers[-1])
fullconn_layers = make_fullyconnected_layers(flattened_tensor,
connected_layers,
keep_prob_variable)
logits = output(fullconn_layers[-1], output_channels)
logits = tf.identity(logits, name='logits')
return logits |
261 RIGHT VENTRICULAR VOLUME AND SYSTOLIC FUNCTION CORRELATE WITH BRAIN NATRIURETIC PEPTIDE LEVELS IN ADULT PATIENTS WITH CONGENITAL HEART DISEASE.
Objectives To evaluate the correlation between right ventricular systolic function, volume, and plasma brain natriuretic peptide (BNP) levels in adults with congenital heart disease (CHD). Background Right ventricular function is an important though often undervalued prognostic factor in patients with heart failure. Plasma BNP levels are moderately elevated in patients with right ventricular pressure and/or volume overload. We sought to define the correlation between plasma BNP level and right ventricular volume and systolic function in a cohort of adults with CHD. Methods The transthoracic echocardiograms (TTE), magnetic resonance cine-gradient echo (MRI), and serum BNP levels of adults with CHD were reviewed. Correlation coefficients were sought between BNP and right ventricular systolic pressure (RVSP), right and left ventricular ejection fractions (RVEF/LVEF), right ventricular end-systolic volume (RVESV), and end-diastolic volume (RVEDV). MRI was utilized to determine RVESV and RVEDV via application of the Simpson's rule by manual planimetry of the endocardial and epicardial borders of both ventricles in the short axis. The RVEF was calculated as the stroke volume divided by the RVEDV. A similar method was utilized to quantify LVEF. Right and left ventricular muscle mass (RVMM/LVMM) were quantified by measuring the volume of the myocardium multiplied by the specific gravity of myocardium. TTE was also used to estimate RVEF and LVEF as well as estimate RVSP from tricuspid valve regurgitant velocity using the modified Bernoulli equation. Results Nine adults with various types of CHD (4 tetralogy of Fallot, 2 bicuspid aortic valve, 2 double outlet right ventricle, 1 double outlet left ventricle) were included in this analysis. Serum BNP levels correlated significantly with MRI-derived RVESV (r = .88, p = .01) and RVEDV (r = .82, p = .01). A significant inverse correlation existed between BNP levels and MRI-derived RVEF (r = -.74, p = .01) and echo-estimated RVEF (r = -.86, p = .01). Echo- and MRI-derived LVEF did not correlate significantly with BNP levels. All patients with BNP > 100 had RVEF ≤ 50%, RVESV ≥ 55 mL. RVSP and RVMM were not correlated with BNP levels. Conclusions In adults with CHD, MRI-derived RVESV and RVEDV significantly correlated with plasma BNP level. RVEF but not LVEF was inversely correlated with BNP level. Right ventricular systolic and diastolic volume and systolic function are closely associated with plasma BNP in adults with CHD. |
/**
@author <NAME>
*/
#include "SceneBody.h"
#include "SceneDevice.h"
#include <cnoid/SceneShape>
#include <cnoid/SceneUtil>
#include <boost/bind.hpp>
#include "gettext.h"
using namespace std;
using namespace boost;
using namespace cnoid;
SceneLink::SceneLink(Link* link)
: link_(link)
{
setName(link->name());
orgShape_ = link->shape();
if(orgShape_){
shape_ = orgShape_;
addChild(shape_);
}
transparency_ = 0.0;
}
SceneLink::SceneLink(const SceneLink& org)
: SgPosTransform(org)
{
link_ = 0;
transparency_ = 0.0;
}
void SceneLink::cloneShape(SgCloneMap& cloneMap)
{
if(orgShape_){
removeChild(shape_);
shape_ = orgShape_->cloneNode(cloneMap);
addChild(shape_);
notifyUpdate((SgUpdate::Action)(SgUpdate::ADDED | SgUpdate::REMOVED));
}
}
void SceneLink::addSceneDevice(SceneDevice* sdev)
{
if(!deviceGroup){
deviceGroup = new SgGroup();
addChild(deviceGroup);
}
sceneDevices_.push_back(sdev);
deviceGroup->addChild(sdev);
}
SceneDevice* SceneLink::getSceneDevice(Device* device)
{
for(size_t i=0; i < sceneDevices_.size(); ++i){
SceneDevice* sdev = sceneDevices_[i].get();
if(sdev->device() == device){
return sdev;
}
}
return 0;
}
bool SceneLink::isVisible() const
{
return (shape_ && contains(shape_));
}
void SceneLink::setVisible(bool on)
{
if(shape_){
if(on){
addChildOnce(shape_, true);
} else {
removeChild(shape_, true);
}
}
}
void SceneLink::makeTransparent(float transparency, SgCloneMap& cloneMap)
{
if(transparency == transparency_){
return;
}
transparency_ = transparency;
if(orgShape_){
removeChild(shape_);
if(transparency == 0.0){
shape_ = orgShape_;
} else {
shape_ = orgShape_->cloneNode(cloneMap);
cnoid::makeTransparent(shape_, transparency, cloneMap, true);
}
addChild(shape_);
notifyUpdate((SgUpdate::Action)(SgUpdate::ADDED | SgUpdate::REMOVED));
}
}
void SceneLink::makeTransparent(float transparency)
{
SgCloneMap cloneMap;
cloneMap.setNonNodeCloning(false);
makeTransparent(transparency, cloneMap);
}
namespace {
SceneLink* createSceneLink(Link* link)
{
return new SceneLink(link);
}
}
SceneBody::SceneBody(BodyPtr body)
{
initialize(body, createSceneLink);
}
SceneBody::SceneBody(BodyPtr body, boost::function<SceneLink*(Link*)> sceneLinkFactory)
{
initialize(body, sceneLinkFactory);
}
void SceneBody::initialize(BodyPtr& body, const boost::function<SceneLink*(Link*)>& sceneLinkFactory)
{
body_ = body;
setName(body->name());
const int n = body->numLinks();
for(int i=0; i < n; ++i){
Link* link = body->link(i);
SceneLink* sLink = sceneLinkFactory(link);
addChild(sLink);
sceneLinks_.push_back(sLink);
}
const DeviceList<Device>& devices = body->devices();
for(size_t i=0; i < devices.size(); ++i){
Device* device = devices.get(i);
SceneDevice* sceneDevice = new SceneDevice(device);
sceneLinks_[device->link()->index()]->addSceneDevice(sceneDevice);
sceneDevices.push_back(sceneDevice);
}
}
SceneBody::SceneBody(const SceneBody& org)
: SgPosTransform(org)
{
}
SceneBody::~SceneBody()
{
}
void SceneBody::cloneShapes(SgCloneMap& cloneMap)
{
for(size_t i=0; i < sceneLinks_.size(); ++i){
sceneLinks_[i]->cloneShape(cloneMap);
}
}
void SceneBody::updateLinkPositions()
{
const int n = sceneLinks_.size();
for(int i=0; i < n; ++i){
SceneLinkPtr& sLink = sceneLinks_[i];
sLink->setPosition(sLink->link()->position());
}
}
void SceneBody::updateLinkPositions(SgUpdate& update)
{
const int n = sceneLinks_.size();
for(int i=0; i < n; ++i){
SceneLinkPtr& sLink = sceneLinks_[i];
sLink->setPosition(sLink->link()->position());
sLink->notifyUpdate(update);
}
}
SceneDevice* SceneBody::getSceneDevice(Device* device)
{
const int linkIndex = device->link()->index();
if(linkIndex < sceneLinks_.size()){
return sceneLinks_[linkIndex]->getSceneDevice(device);
}
return 0;
}
void SceneBody::setSceneDeviceUpdateConnection(bool on)
{
for(size_t i=0; i < sceneDevices.size(); ++i){
sceneDevices[i]->setSceneUpdateConnection(on);
}
}
void SceneBody::updateSceneDevices()
{
for(size_t i=0; i < sceneDevices.size(); ++i){
sceneDevices[i]->updateScene();
}
}
void SceneBody::makeTransparent(float transparency, SgCloneMap& cloneMap)
{
for(size_t i=0; i < sceneLinks_.size(); ++i){
sceneLinks_[i]->makeTransparent(transparency, cloneMap);
}
}
void SceneBody::makeTransparent(float transparency)
{
SgCloneMap cloneMap;
cloneMap.setNonNodeCloning(false);
makeTransparent(transparency, cloneMap);
}
|
/**
* Maze class, handling the reading of a file and the creation of a map
* @author Samy Narrainen
*
*/
public class Maze implements Serializable {
private static final long serialVersionUID = 334664502184146667L;
BufferedReader reader;
//Represents the map in an integer format
public ArrayList<List<Integer>> maze;
// represents file on device
File mazeFile;
// res/levels directory for better file management
String levelDirectory;
public int columns, rows;
String levelSelection;
Random randomNo;
public ArrayList<List<Tile>> tiles;
// Level structure
final int EMPTY = 0;
final int MONSTER = 1;
final int TRAP = 2;
final int TREASURE = 3;
final int NULL = 4; // un-traversable.
/**
* Puts together the map reading methods to fully read a map from a file and translate it to a tile array.
* @param levelSelection, the map file to load
*/
public Maze(String levelSelection) {
this.levelSelection = levelSelection;
maze = new ArrayList<List<Integer>>();
if(maze.size() <= 0) readMaze(levelSelection);
tiles = new ArrayList<List<Tile>>();
if(tiles.size() <= 0) createTiles();
}
/**
* Reads the file and converts it into a 2d integer array
* @param levelSelection
*/
public void readMaze(String levelSelection) {
try {
// Setup directory
levelDirectory = new java.io.File(".").getCanonicalPath()
+ "\\res\\levels\\" + levelSelection;
levelDirectory = levelDirectory.replace('\\', '/');
mazeFile = new File(levelDirectory);
reader = new BufferedReader(new FileReader(mazeFile));
String x = "";
while (reader.ready()) {
x = reader.readLine();
List<Integer> lineList = new ArrayList<Integer>();
maze.add(lineList);
while (x.length() > 0) {
if (x.charAt(0) != ' ' && x.charAt(0) != '\r' && x.charAt(0) != '\n') {
lineList.add(Character.getNumericValue(x.charAt(0))); // converts char to integer then adds
x = x.substring(1);
} else {
x = x.substring(1);
}
}
}
System.out.println("maze = " + maze);
System.out.println("Columns: " + maze.get(0).size());
System.out.println("Rows: " + maze.size());
columns = maze.get(0).size();
rows = maze.size();
} catch (Exception e) {
System.out.println("Failed to read file or a map was loaded.");
}
}
/**
* Reads the 2d integer array and translates it into actual tiles
*/
public void createTiles() {
for (int i = 0; i < this.rows; i++) {
List<Integer> k = maze.get(i);
List<Tile> tileGroup = new ArrayList<Tile>();
for (int p = 0; p < k.size(); p++) {
try {
Tile t = new Tile();
if(northTileExists(i, p)) t.setNorthTile(true);
if(eastTileExists(i, p)) t.setEastTile(true);
if(southTileExists(i, p)) t.setSouthTile(true);
if(westTileExists(i, p)) t.setWestTile(true);
if(k.get(p) == 4) t.setNull(true);
t.setType(k.get(p));
tileGroup.add(t);
} catch (IndexOutOfBoundsException e) {}
}
tiles.add(tileGroup);
}
}
/**
* Methods to check whether or not a tile exists on an adjacent side
*/
public boolean northTileExists(int i, int p) {
try {
i--;
if(maze.get(i).get(p) != 4) return true;
} catch (IndexOutOfBoundsException e) {}
return false;
}
public boolean eastTileExists(int i, int p) {
try {
p++;
if(maze.get(i).get(p) != 4) return true;
} catch (IndexOutOfBoundsException e) {}
return false;
}
public boolean southTileExists(int i, int p) {
try {
i++;
if(maze.get(i).get(p) != 4) return true;
} catch (IndexOutOfBoundsException e) {}
return false;
}
public boolean westTileExists(int i, int p) {
try {
p--;
if(maze.get(i).get(p) != 4) return true;
} catch (IndexOutOfBoundsException e) {}
return false;
}
public boolean isMazeCleared() {
for(List<Tile> k : tiles)
for(Tile t : k)
if(t.getType() == MONSTER || t.getType() == TRAP || t.getType() == TREASURE) return false;
return true;
}
} |
To those who don't already know. Yes I like My Little Pony: Friendship is Magic.GOOD NOW THAT THAT'S OUTTA THE WAY!This is something I made to a video on my account.While editing the video I wanted to match it to a scene from the original source. Some of the main characters were sleeping together on one bed.And in MLP the only part I could think of were more then 1 character share a bed together is Rarity and AJ.Oh if only I could put other characters together on the same bed with some sorta...Photo editing software.Then I realized I could. So I did. Here it is.Made me think Twi's 2nd slumber party probably went something like this.And while making this I was listening to Land Before Times theme song "If Hold On Together". Just thought I'd share.And please KEEP ALL INAPPROPRIATE REMARKS TO URSELF! >8LThey're all friends...snuggling together...in the same bed.MLP:FiM © Hasbro/Lauren Faust |
/**
* Environment post processor capable of loading choices list from a custom file.<br/>
*
* @author Polyudov
* @implNote <ul>
* <li>There are no logs (by slf4j loggers) because the logging subsystem is not initialized yet</li>
* <li>Works only with <code>.yaml/.yml</code> files</li>
* </ul>
* @since v0.14
*/
public class ChoicesCustomConfigurationLoader implements EnvironmentPostProcessor {
private static final String CHOICES_RESOURCE_NAME = "choicesCustomResource";
private final PropertySourceLoader loader;
@SuppressWarnings("unused") //for Spring
public ChoicesCustomConfigurationLoader() {
this.loader = new YamlPropertySourceLoader();
}
@Override
public void postProcessEnvironment(ConfigurableEnvironment environment, SpringApplication application) {
String choicesPath = environment.getProperty("choices-source.location");
if (isNullOrEmpty(choicesPath)) {
return;
}
FileSystemResource resource = new FileSystemResource(choicesPath);
if (!resource.isReadable()) {
System.err.printf("Choices source ('%s') is not readable\n", choicesPath);
return;
}
if (!isExtensionCorrect(resource.getFilename())) {
System.err.printf("Choices source ('%s') file extension is not correct. Choices source must be an YAML-file\n", choicesPath);
return;
}
//Create new PropertySource or replace existing PropertySource
//if PropertySource with name 'choicesResource' already exists
loadPropertySource(resource)
.ifPresent(propertySource -> environment.getPropertySources().addFirst(propertySource));
}
private boolean isExtensionCorrect(String fileName) {
String extension = FilenameUtils.getExtension(fileName);
if (isNullOrEmpty(extension)) {
return false;
}
return Arrays.asList(loader.getFileExtensions()).contains(extension);
}
private Optional<PropertySource<?>> loadPropertySource(Resource resource) {
try {
List<PropertySource<?>> propertySources = loader.load(CHOICES_RESOURCE_NAME, resource);
if (isEmpty(propertySources)) {
return Optional.empty();
}
//There are more than one PropertySource as a result of load()-method in some cases
//because YAML is a multi-document format. But we use only the first document and ignore another ones
return Optional.ofNullable(propertySources.get(0));
} catch (Exception e) {
e.printStackTrace(); //logging subsystem is not initialized yet
return Optional.empty();
}
}
} |
def register_extension(cls, extension_class):
assert issubclass(extension_class, BaseWaveformExtractorExtension)
assert extension_class.extension_name is not None, 'extension_name must not be None'
assert all(extension_class.extension_name != ext.extension_name for ext in cls.extensions), \
'Extension name already exists'
cls.extensions.append(extension_class) |
/// A description of the error resources variant value.
fn description(&self) -> &'static str {
match self {
ErrorResources::Orphaned => "An error left resources that cannot be destroyed",
ErrorResources::Remaining => "An error left resources that can be destroyed",
ErrorResources::Clear => "An error occurred but no resources were left behind",
ErrorResources::Unknown => "An error occurred but it is unknown if resources exist",
}
} |
<reponame>bstrie/falcon<filename>lib/loader/mod.rs
//! Loading executable binaries into Falcon.
//!
//! ```
//! # use falcon::error::*;
//! use falcon::loader::Elf;
//! use falcon::loader::Loader;
//! use std::path::Path;
//!
//! # fn example () -> Result<()> {
//! // Load an elf for analysis
//! let elf = Elf::from_file(Path::new("test_binaries/simple-0/simple-0"))?;
//! // Lift a program from the elf
//! let program = elf.program()?;
//! for function in program.functions() {
//! println!("0x{:08x}: {}", function.address(), function.name());
//! }
//! # Ok(())
//! # }
//! ```
use crate::architecture::Architecture;
use crate::error::*;
use crate::executor::eval;
use crate::il;
use crate::memory;
use crate::translator::Options;
use std::any::Any;
use std::collections::{HashMap, HashSet};
use std::fmt;
mod elf;
mod json;
mod pe;
mod symbol;
pub use self::elf::*;
pub use self::json::*;
pub use self::pe::*;
pub use self::symbol::Symbol;
/// A declared entry point for a function.
#[derive(Clone, Debug, PartialEq)]
pub struct FunctionEntry {
address: u64,
name: Option<String>,
}
impl FunctionEntry {
/// Create a new `FunctionEntry`.
///
/// If no name is provided: `sup_{:X}` will be used to name the function.
pub fn new(address: u64, name: Option<String>) -> FunctionEntry {
FunctionEntry { address, name }
}
/// Get the address for this `FunctionEntry`.
pub fn address(&self) -> u64 {
self.address
}
/// Get the name for this `FunctionEntry`.
pub fn name(&self) -> Option<&str> {
self.name.as_deref()
}
}
impl fmt::Display for FunctionEntry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.name {
Some(ref name) => write!(f, "FunctionEntry({} -> 0x{:X})", name, self.address),
None => write!(f, "FunctionEntry(0x{:X})", self.address),
}
}
}
/// Generic trait for all loaders
pub trait Loader: fmt::Debug + Send + Sync {
/// Get a model of the memory contained in the binary
fn memory(&self) -> Result<memory::backing::Memory>;
/// Get addresses for known function entries
fn function_entries(&self) -> Result<Vec<FunctionEntry>>;
/// The address program execution should begin at
fn program_entry(&self) -> u64;
/// Get the architecture of the binary
fn architecture(&self) -> &dyn Architecture;
/// Lift just one function from the executable
fn function(&self, address: u64) -> Result<il::Function> {
self.function_extended(address, &Options::default())
}
/// Lift just one function from the executable, while also supplying
/// translator options.
fn function_extended(&self, address: u64, options: &Options) -> Result<il::Function> {
let translator = self.architecture().translator();
let memory = self.memory()?;
Ok(translator.translate_function_extended(&memory, address, options)?)
}
/// Cast loader to `Any`
fn as_any(&self) -> &dyn Any;
/// Get the symbols for this loader
fn symbols(&self) -> Vec<Symbol>;
/// Get the symbols as a hashmap by address
fn symbols_map(&self) -> HashMap<u64, Symbol> {
self.symbols()
.into_iter()
.map(|symbol| (symbol.address(), symbol))
.collect()
}
/// Lift executable into an il::Program.
///
/// Individual functions which fail to lift are omitted and ignored.
fn program(&self) -> Result<il::Program> {
Ok(self.program_verbose(&Options::default())?.0)
}
/// Lift executable into an `il::Program`.
///
/// Errors encountered while lifting specific functions are collected, and
/// returned with the `FunctionEntry` identifying the function. Only
/// catastrophic errors should cause this function call to fail.
fn program_verbose(
&self,
options: &Options,
) -> std::result::Result<(il::Program, Vec<(FunctionEntry, Error)>), Error> {
// Get out architecture-specific translator
let translator = self.architecture().translator();
// Create a mapping of the file memory
let memory = self.memory()?;
let mut program = il::Program::new();
let mut translation_errors: Vec<(FunctionEntry, Error)> = Vec::new();
for function_entry in self.function_entries()? {
let address = function_entry.address();
// Ensure this memory is marked executable
if memory
.permissions(address)
.map_or(false, |p| p.contains(memory::MemoryPermissions::EXECUTE))
{
match translator.translate_function_extended(&memory, address, options) {
Ok(mut function) => {
function.set_name(function_entry.name().map(|n| n.to_string()));
program.add_function(function);
}
Err(e) => translation_errors.push((function_entry.clone(), e)),
};
}
}
Ok((program, translation_errors))
}
/// Lift executable into an `il::Program`, while recursively resolving branch
/// targets into functions.
///
/// program_recursive silently drops any functions that cause lifting
/// errors. If you care about those, use `program_recursive_verbose`.
fn program_recursive(&self) -> Result<il::Program> {
Ok(self.program_recursive_verbose(&Options::default())?.0)
}
/// Lift executable into an `il::Program`, while recursively resolving branch
/// targets into functions.
///
/// Works in a similar manner to `program_recursive`
fn program_recursive_verbose(
&self,
options: &Options,
) -> std::result::Result<(il::Program, Vec<(FunctionEntry, Error)>), Error> {
fn call_targets(function: &il::Function) -> Result<Vec<u64>> {
let call_targets =
function
.blocks()
.iter()
.fold(Vec::new(), |mut call_targets, block| {
block.instructions().iter().for_each(|instruction| {
if let il::Operation::Branch { ref target } = *instruction.operation() {
if let Ok(constant) = eval(target) {
call_targets.push(constant.value_u64().unwrap())
}
}
});
call_targets
});
Ok(call_targets)
}
let (mut program, mut translation_errors) = self.program_verbose(options)?;
let mut processed = HashSet::new();
loop {
// Get the address of every function currently in the program
let function_addresses = program
.functions()
.into_iter()
.map(|function| function.address())
.collect::<Vec<u64>>();
let addresses = {
// For every function in the program which is not currentl a
// member of our processed set
let functions = program
.functions()
.into_iter()
.filter(|function| !processed.contains(&function.address()))
.collect::<Vec<&il::Function>>();
// Insert this function into the processed set
functions.iter().for_each(|function| {
processed.insert(function.address());
});
// Collect the call targets in all functions that have not yet
// been processed, and filter them against the functions already
// in program.
let addresses = functions
.into_iter()
.fold(HashSet::new(), |mut targets, function| {
call_targets(function)
.unwrap()
.into_iter()
.for_each(|target| {
targets.insert(target);
});
targets
})
.into_iter()
.filter(|address| !function_addresses.contains(address))
.collect::<Vec<u64>>();
if addresses.is_empty() {
break;
}
addresses
};
// For each address, attempt to lift a function
for address in addresses {
match self.function_extended(address, options) {
Ok(function) => program.add_function(function),
Err(e) => {
let function_entry = FunctionEntry::new(address, None);
translation_errors.push((function_entry, e));
}
}
}
}
Ok((program, translation_errors))
}
}
|
import java.util.Scanner;
public class Main {
static Scanner scn = new Scanner(System.in);
public static void main(String[] args) {
String str = scn.nextLine(); //codingblocks
char ch = scn.next().charAt(0);
occuren(str, ch);
}
public static void occuren(String str, char ch) {
String s1 = "";
//int j = 0;
char y[] = str.toCharArray();
for(int i=0; i < str.length(); i++) {
//char c = str.charAt(i);
if (str.charAt(i) != ch) {
s1 = s1+ y[i];
}
}
System.out.println(s1);
}
}
|
/** The component that masks the content panel when the left panel of the {@link Layout} is open */
public class Overlay extends BaseDominoElement<HTMLDivElement, Overlay>
implements IsElement<HTMLDivElement> {
private HTMLDivElement element;
/** */
public Overlay() {
element = div().css(LayoutStyles.OVERLAY).element();
init(this);
}
/** @return new Overlay instance */
public static Overlay create() {
return new Overlay();
}
/** {@inheritDoc} */
@Override
public HTMLDivElement element() {
return element;
}
} |
class EvgService:
"""A service for working with evergreen data."""
@inject.autoparams()
def __init__(self, evg_api: EvergreenApi, evg_cli_service: EvgCliService) -> None:
"""
Initialize the service.
:param evg_api: Evergreen API Client.
:param evg_cli_service: Service for working with the evergreen CLI.
"""
self.evg_api = evg_api
self.evg_cli_service = evg_cli_service
def get_project_config_location(self, project_id: str) -> str:
"""
Get the path to the evergreen config file for this project.
:param project_id: ID of Evergreen project being queried.
:return: Path to project config file.
"""
return self.get_evg_project(project_id).remote_path
def get_project_branch(self, project_id: str) -> str:
"""
Get the git branch a project works from.
:param project_id: ID of project to lookup.
:return: Branch name a project works from.
"""
return self.get_evg_project(project_id).branch_name
@lru_cache(maxsize=None)
def get_evg_project(self, project_id: str) -> Project:
"""
Get the project configuration for the given evergreen project.
:param project_id: ID of project to lookup.
:return: Project configuration for specified project.
"""
project_config_list = self.evg_api.all_projects(
project_filter_fn=lambda p: p.identifier == project_id
)
if len(project_config_list) != 1:
raise ValueError(f"Could not find unique project configuration for : '{project_id}'.")
return project_config_list[0]
def get_module_locations(self, project_id: str) -> Dict[str, str]:
"""
Get the paths that project modules are stored.
:param project_id: ID of project to query.
:return: Dictionary of modules and their paths.
"""
module_map = self.get_module_map(project_id)
return {module.name: module.prefix for module in module_map.values()}
def get_module_map(self, project_id: str) -> Dict[str, EvgModule]:
"""
Get a dictionary of known modules and data about them.
:param project_id: Evergreen ID of project being queried.
:return: Dictionary of module names to module data.
"""
project_config_location = self.get_project_config_location(project_id)
project_config = yaml.safe_load(
self.evg_cli_service.evaluate(Path(project_config_location))
)
return {module["name"]: EvgModule(**module) for module in project_config.get("modules", [])}
def get_manifest(self, project_id: str, commit_hash: str) -> Manifest:
"""
Get the manifest for the given commit and evergreen project.
:param project_id: Evergreen project to query.
:param commit_hash: Evergreen commit to query.
:return: Evergreen manifest for given commit.
"""
return self.evg_api.manifest(project_id, commit_hash) |
<reponame>hazelcast-incubator/cascading-prototype<gh_stars>1-10
package com.hazelcast.yarn.wordcount.offheap;
import java.nio.ByteBuffer;
import java.util.Map;
import com.hazelcast.yarn.impl.YarnUtil;
import com.hazelcast.yarn.api.dag.Vertex;
import com.hazelcast.yarn.api.tuple.Tuple;
import com.hazelcast.yarn.api.tuple.HDTuple;
import java.util.concurrent.atomic.AtomicInteger;
import com.hazelcast.util.collection.Int2ObjectHashMap;
import com.hazelcast.yarn.api.tuple.io.TupleInputStream;
import com.hazelcast.yarn.api.tuple.io.TupleOutputStream;
import com.hazelcast.yarn.api.container.ContainerContext;
import com.hazelcast.yarn.api.processor.TupleContainerProcessor;
import com.hazelcast.yarn.api.processor.TupleContainerProcessorFactory;
import com.hazelcast.yarn.impl.tuple.Tuple2;
import sun.nio.ch.DirectBuffer;
public class WordCounterProcessor implements TupleContainerProcessor<Object, Object, Object, Object> {
private static final AtomicInteger processCounter = new AtomicInteger(0);
private final int index;
public static volatile long time;
private static final AtomicInteger taskCounter = new AtomicInteger(0);
private final Object2LongOffHeapMap cache = new Object2LongOffHeapMap(65536 * 256);
private final static Map<Integer, Object2LongOffHeapMap> caches = new Int2ObjectHashMap<Object2LongOffHeapMap>();
private volatile int taskCount;
public WordCounterProcessor() {
this.index = processCounter.getAndIncrement();
}
@Override
public void beforeProcessing(ContainerContext containerContext) {
taskCounter.set(containerContext.getVertex().getDescriptor().getTaskCount());
taskCount = containerContext.getVertex().getDescriptor().getTaskCount();
caches.put(index, cache);
}
@Override
public boolean process(TupleInputStream<Object, Object> inputStream,
TupleOutputStream<Object, Object> outputStream,
String sourceName,
ContainerContext containerContext) throws Exception {
long cTime = System.currentTimeMillis();
for (Tuple<Object, Object> tuple : inputStream) {
HDTuple hdTuple = (HDTuple) tuple;
long count = this.cache.get(hdTuple.keyAddress(), hdTuple.keyObjectSize());
if (count < 0) {
this.cache.put(hdTuple.keyAddress(), hdTuple.keyObjectSize(), 1L);
} else {
this.cache.put(hdTuple.keyAddress(), hdTuple.keyObjectSize(), count + 1);
}
}
System.out.println("cTime=" + (System.currentTimeMillis() - cTime) + " chunk=" + inputStream.size());
return true;
}
@Override
public boolean finalizeProcessor(TupleOutputStream<Object, Object> outputStream,
ContainerContext containerContext) throws Exception {
try {
System.out.println("Finalization=" + (System.currentTimeMillis() - time) + " size=" + this.cache.size());
long tt = System.currentTimeMillis();
Object2LongOffHeapMap.KeySetIterator it = new Object2LongOffHeapMap.KeySetIterator(this.cache);
int totalSize = 0;
while (it.hasNext()) {
long keyAddress = it.nextKeyAddress();
long keySize = it.getKeyDataSize();
totalSize += keySize;
totalSize += getCharSize(cache.get(keyAddress, keySize));
totalSize += 3;
}
int position = 0;
byte space = " ".getBytes()[0];
byte nr = "\r".getBytes()[0];
byte nn = "\n".getBytes()[0];
ByteBuffer bb = ByteBuffer.allocateDirect(totalSize);
it = new Object2LongOffHeapMap.KeySetIterator(this.cache);
while (it.hasNext()) {
long keyAddress = it.nextKeyAddress();
long keySize = it.getKeyDataSize();
if (this.checkLeft(keyAddress, keySize)) {
continue;
}
long count = merge(keyAddress, keySize);
int size = getCharSize(count);
long address = getAddress(count, size);
if (keySize > 0) {
YarnUtil.getUnsafe().copyMemory(keyAddress, ((DirectBuffer) bb).address() + position, keySize);
position += keySize;
bb.position(position);
}
bb.put(space);
YarnUtil.getUnsafe().copyMemory(address, ((DirectBuffer) bb).address() + position, size);
position += size;
bb.position(position);
bb.put(nr);
bb.put(nn);
position = bb.position();
}
bb.position(0);
outputStream.consume(new Tuple2<Object, Object>("", bb));
System.out.println("FinalizationDone=" + (System.currentTimeMillis() - tt));
} finally {
try {
//this.cache.clear();
} finally {
if (taskCounter.decrementAndGet() == 0) {
caches.clear();
}
}
}
return true;
}
private long getAddress(long count, int size) {
long address = YarnUtil.getUnsafe().allocateMemory(size);
int pointer = 1;
do {
YarnUtil.getUnsafe().putByte(size + address - pointer, (byte) (count % 10 + 48));
count = count / 10;
pointer++;
} while (count > 0);
return address;
}
private int getCharSize(long count) {
int size = 0;
do {
size++;
count = count / 10;
} while (count > 0);
return size;
}
private long merge(long keyAddress, long keySize) {
long result = 0;
for (int i = index; i < taskCount; i++) {
long count = caches.get(i).get(keyAddress, keySize);
if (count > 0) {
result += count;
}
}
return result;
}
private boolean checkLeft(long keyAddress, long keySize) {
for (int i = 0; i < index; i++) {
if (caches.get(i).get(keyAddress, keySize) > 0) {
return true;
}
}
return false;
}
public static class Factory implements TupleContainerProcessorFactory {
@Override
public TupleContainerProcessor getProcessor(Vertex vertex) {
return new WordCounterProcessor();
}
}
}
|
Language Learning Strategies Employed by Yemeni EFL Learners in Developing Their Spoken English: A Case Study of Aden University EFL Learners
Language learning is not a spoon-fed process in which learners get everything from their teachers. It is a student-centered process in which learners should play active roles and employ various learning strategies that help them enhance their target foreign language. For this reason, this study was designed to investigate the language learning strategies employed by the Yemeni EFL learners in developing their spoken English. Data were collected through a questionnaire that targeted 120 fourth-year EFL students of three faculties of Aden University. The results showed that the majority of the concerned students do not make a balance among their language learning strategies and they depend heavily on memory strategies. A correlation was found between students’ spoken English proficiency and the learning strategies they use in developing their spoken English in the favor of the students who make use of various types of language learning strategies. As per these findings, this study recommends Yemeni EFL learners to employ various effective language learning strategies to enhance their spoken English and not to limit their spoken English development to memory strategies. It also recommends Yemeni EFL teachers to guide their students to the effective language learning strategies that help them enhance their English. |
/**
* Signs off the client from the database (deletes the client's row
* in the Client table)
*/
void sign_off() {
defer_signals();
delete_client(client_id);
reset_signal_handler();
log_message(LOG_IMPORTANT, "Signed off");
} |
def rotate(self, arg=None, **opt):
units = {k:v for k,v in opt.items() if k in ['degrees', 'radians', 'percent']}
if len(units) > 1:
badunits = 'rotate: specify one rotation at a time (got %s)' % " & ".join(units.keys())
raise DeviceError(badunits)
if not units:
units[_ctx._thetamode] = arg or 0
degrees = units.get('degrees', 0)
radians = units.get('radians', 0)
if 'percent' in units:
degrees, radians = 0, tau*units['percent']
xf = Transform()
if degrees:
xf._nsAffineTransform.rotateByDegrees_(-degrees)
else:
xf._nsAffineTransform.rotateByRadians_(-radians)
if opt.get('rollback'):
xf._rollback = {"_transform":self.copy()}
self.prepend(xf)
return xf |
package eth
import (
"context"
"fmt"
"math/big"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/golang/glog"
)
// type LibraryType uint8
// const (
// Node LibraryType = iota
// MaxHeap
// MinHeap
// TranscoderPools
// JobLib
// MerkleProof
// ECRecovery
// SafeMath
// )
// func deployLibrary(transactOpts *bind.TransactOpts, backend *ethclient.Client, name LibraryType, libraries map[string]common.Address) (common.Address, *types.Transaction, error) {
// var (
// addr common.Address
// tx *types.Transaction
// err error
// )
// switch name {
// case Node:
// addr, tx, _, err = contracts.DeployNode(transactOpts, backend, libraries)
// glog.Infof("Deploying Node at %v", addr.Hex())
// case MaxHeap:
// addr, tx, _, err = contracts.DeployMaxHeap(transactOpts, backend, libraries)
// glog.Infof("Deploying MaxHeap at %v", addr.Hex())
// case MinHeap:
// addr, tx, _, err = contracts.DeployMinHeap(transactOpts, backend, libraries)
// glog.Infof("Deploying MinHeap at %v", addr.Hex())
// case TranscoderPools:
// addr, tx, _, err = contracts.DeployTranscoderPools(transactOpts, backend, libraries)
// glog.Infof("Deploying TranscoderPools at %v", addr.Hex())
// case JobLib:
// addr, tx, _, err = contracts.DeployJobLib(transactOpts, backend, libraries)
// glog.Infof("Deploying JobLib at %v", addr.Hex())
// case MerkleProof:
// addr, tx, _, err = contracts.DeployMerkleProof(transactOpts, backend, libraries)
// glog.Infof("Deploying MerkleProof at %v", addr.Hex())
// case ECRecovery:
// addr, tx, _, err = contracts.DeployECRecovery(transactOpts, backend, libraries)
// glog.Infof("Deploying ECRecovery at %v", addr.Hex())
// case SafeMath:
// addr, tx, _, err = contracts.DeploySafeMath(transactOpts, backend, libraries)
// glog.Infof("Deploying SafeMath at %v", addr.Hex())
// default:
// err = fmt.Errorf("Invalid library type: %v", name)
// glog.Errorf(err.Error())
// return common.Address{}, nil, err
// }
// if err != nil {
// glog.Errorf("Error deploying library: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func deployIdentityVerifier(transactOpts *bind.TransactOpts, backend *ethclient.Client) (common.Address, *types.Transaction, error) {
// addr, tx, _, err := contracts.DeployIdentityVerifier(transactOpts, backend, nil)
// glog.Infof("Deploying IdentityVerifier at %v", addr.Hex())
// if err != nil {
// glog.Errorf("Error deploying IdentityVerifier: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func deployLivepeerToken(transactOpts *bind.TransactOpts, backend *ethclient.Client) (common.Address, *types.Transaction, error) {
// addr, tx, _, err := contracts.DeployLivepeerToken(transactOpts, backend, nil)
// glog.Infof("Deploying LivepeerToken at %v", addr.Hex())
// if err != nil {
// glog.Errorf("Error deploying LivepeerToken: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func deployLivepeerProtocol(transactOpts *bind.TransactOpts, backend *ethclient.Client) (common.Address, *types.Transaction, error) {
// addr, tx, _, err := contracts.DeployLivepeerProtocol(transactOpts, backend, nil)
// glog.Infof("Deploying LivepeerProtocol at %v", addr.Hex())
// if err != nil {
// glog.Errorf("Error deploying LivepeerProtocol: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func deployBondingManager(transactOpts *bind.TransactOpts, backend *ethclient.Client, libraries map[string]common.Address, registry common.Address, token common.Address, numActiveTranscoders *big.Int, unbondingPeriod uint64) (common.Address, *types.Transaction, error) {
// addr, tx, _, err := contracts.DeployBondingManager(transactOpts, backend, libraries, registry, token, numActiveTranscoders, unbondingPeriod)
// glog.Infof("Deploying BondingManager at %v", addr.Hex())
// if err != nil {
// glog.Errorf("Error deploying BondingManager: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func deployJobsManager(transactOpts *bind.TransactOpts, backend *ethclient.Client, libraries map[string]common.Address, registry common.Address, token common.Address, verifier common.Address, verificationRate uint64, jobEndingPeriod *big.Int, verificationPeriod *big.Int, slashingPeriod *big.Int, failedVerificationSlashAmount uint64, missedVerificationSlashAmount uint64, finderFee uint64) (common.Address, *types.Transaction, error) {
// addr, tx, _, err := contracts.DeployJobsManager(transactOpts, backend, libraries, registry, token, verifier, verificationRate, jobEndingPeriod, verificationPeriod, slashingPeriod, failedVerificationSlashAmount, missedVerificationSlashAmount, finderFee)
// glog.Infof("Deploying JobsManager at %v", addr.Hex())
// if err != nil {
// glog.Infof("Error deploying JobsManager: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func deployRoundsManager(transactOpts *bind.TransactOpts, backend *ethclient.Client, libraries map[string]common.Address, registry common.Address, blockTime *big.Int, roundLength *big.Int) (common.Address, *types.Transaction, error) {
// addr, tx, _, err := contracts.DeployRoundsManager(transactOpts, backend, libraries, registry, blockTime, roundLength)
// glog.Infof("Deploying RoundsManager at %v", addr.Hex())
// if err != nil {
// glog.Errorf("Error deploying RoundsManager: %v", err)
// return common.Address{}, nil, err
// }
// return addr, tx, nil
// }
// func initProtocol(transactOpts *bind.TransactOpts, backend *ethclient.Client, rpcTimeout time.Duration, minedTxTimeout time.Duration, protocolAddr common.Address, tokenAddr common.Address, bondingManagerAddr common.Address, jobsManagerAddr common.Address, roundsManagerAddr common.Address) error {
// protocol, err := contracts.NewLivepeerProtocol(protocolAddr, backend)
// if err != nil {
// glog.Errorf("Error creating LivepeerProtocol: %v", err)
// return err
// }
// tx, err := protocol.SetContract(transactOpts, crypto.Keccak256Hash([]byte("BondingManager")), bondingManagerAddr)
// if err != nil {
// glog.Errorf("Error adding BondingManager to registry: %v", err)
// return err
// }
// _, err = WaitForMinedTx(backend, rpcTimeout, minedTxTimeout, tx.Hash(), tx.Gas())
// if err != nil {
// glog.Errorf("Error waiting for mined SetContract tx: %v", err)
// return err
// }
// tx, err = protocol.SetContract(transactOpts, crypto.Keccak256Hash([]byte("JobsManager")), jobsManagerAddr)
// if err != nil {
// glog.Errorf("Error adding JobsManager to registry: %v", err)
// return err
// }
// _, err = WaitForMinedTx(backend, rpcTimeout, minedTxTimeout, tx.Hash(), tx.Gas())
// if err != nil {
// glog.Errorf("Error waiting for mined SetContract tx: %v", err)
// return err
// }
// tx, err = protocol.SetContract(transactOpts, crypto.Keccak256Hash([]byte("RoundsManager")), roundsManagerAddr)
// if err != nil {
// glog.Errorf("Error adding RoundsManager to registry: %v", err)
// return err
// }
// _, err = WaitForMinedTx(backend, rpcTimeout, minedTxTimeout, tx.Hash(), tx.Gas())
// if err != nil {
// glog.Errorf("Error waiting for mined SetContract tx: %v", err)
// return err
// }
// token, err := contracts.NewLivepeerToken(tokenAddr, backend)
// if err != nil {
// glog.Errorf("Error creating LivepeerToken: %v", err)
// return err
// }
// tx, err = token.TransferOwnership(transactOpts, bondingManagerAddr)
// if err != nil {
// glog.Errorf("Error transfering ownership of LivepeerToken: %v", err)
// return err
// }
// _, err = WaitForMinedTx(backend, rpcTimeout, minedTxTimeout, tx.Hash(), tx.Gas())
// if err != nil {
// glog.Errorf("Error waiting for TransferOwnership tx: %v", err)
// return err
// }
// tx, err = protocol.Unpause(transactOpts)
// if err != nil {
// glog.Errorf("Error unpausing LivepeerProtocol: %v", err)
// return err
// }
// _, err = WaitForMinedTx(backend, rpcTimeout, minedTxTimeout, tx.Hash(), tx.Gas())
// if err != nil {
// glog.Errorf("Error waiting for Unapuse tx: %v", err)
// return err
// }
// return nil
// }
// // To be called by account that deploys LivepeerToken
// func distributeTokens(transactOpts *bind.TransactOpts, backend *ethclient.Client, rpcTimeout time.Duration, minedTxTimeout time.Duration, tokenAddr common.Address, mintAmount *big.Int, accounts []accounts.Account) error {
// token, err := contracts.NewLivepeerToken(tokenAddr, backend)
// if err != nil {
// glog.Errorf("Error creating LivepeerToken: %v", err)
// }
// var tx *types.Transaction
// tx, err = token.Mint(transactOpts, account.Address, mintAmount)
// if err != nil {
// glog.Errorf("Error minting tokens: %v", err)
// return err
// }
// _, err = WaitForMinedTx(backend, rpcTimeout, minedTxTimeout, tx.Hash(), tx.Gas())
// if err != nil {
// glog.Errorf("Error waiting for mined mint tx: %v", err)
// return err
// }
// }
// return nil
// }
func WaitUntilBlockMultiple(backend *ethclient.Client, rpcTimeout time.Duration, blockMultiple *big.Int) error {
ctx, _ := context.WithTimeout(context.Background(), rpcTimeout)
block, err := backend.BlockByNumber(ctx, nil)
if err != nil {
return err
}
targetBlockNum := NextBlockMultiple(block.Number(), blockMultiple)
glog.Infof("Waiting until next round at block %v...", targetBlockNum)
for block.Number().Cmp(targetBlockNum) == -1 {
ctx, _ = context.WithTimeout(context.Background(), rpcTimeout)
block, err = backend.BlockByNumber(ctx, nil)
if err != nil {
return err
}
}
return nil
}
func Wait(backend *ethclient.Client, rpcTimeout time.Duration, blocks *big.Int) error {
ctx, _ := context.WithTimeout(context.Background(), rpcTimeout)
block, err := backend.BlockByNumber(ctx, nil)
if err != nil {
return err
}
targetBlockNum := new(big.Int).Add(block.Number(), blocks)
glog.Infof("Waiting %v blocks...", blocks)
for block.Number().Cmp(targetBlockNum) == -1 {
ctx, _ = context.WithTimeout(context.Background(), rpcTimeout)
block, err = backend.BlockByNumber(ctx, nil)
if err != nil {
return err
}
}
return nil
}
func CheckRoundAndInit(client LivepeerEthClient) error {
ok, err := client.CurrentRoundInitialized()
if err != nil {
return fmt.Errorf("Client failed CurrentRoundInitialized: %v", err)
}
if !ok {
receiptCh, errCh := client.InitializeRound()
select {
case <-receiptCh:
return nil
case err := <-errCh:
return err
}
}
return nil
}
func WaitForMinedTx(backend *ethclient.Client, rpcTimeout time.Duration, minedTxTimeout time.Duration, txHash common.Hash, gas *big.Int) (*types.Receipt, error) {
var (
receipt *types.Receipt
ctx context.Context
err error
)
h := common.Hash{}
if txHash == h {
return nil, nil
}
start := time.Now()
for time.Since(start) < minedTxTimeout {
ctx, _ = context.WithTimeout(context.Background(), rpcTimeout)
receipt, err = backend.TransactionReceipt(ctx, txHash)
if err != nil && err != ethereum.NotFound {
return nil, err
}
if receipt != nil {
break
}
time.Sleep(time.Second)
}
if gas.Cmp(receipt.GasUsed) == 0 {
return receipt, fmt.Errorf("Transaction %v threw", txHash.Hex())
} else {
return receipt, nil
}
}
func NextBlockMultiple(blockNum *big.Int, blockMultiple *big.Int) *big.Int {
if blockMultiple.Cmp(big.NewInt(0)) == 0 {
return blockNum
}
remainder := new(big.Int).Mod(blockNum, blockMultiple)
if remainder.Cmp(big.NewInt(0)) == 0 {
return blockNum
}
return new(big.Int).Sub(blockNum.Add(blockNum, blockMultiple), remainder)
}
func IsNullAddress(addr common.Address) bool {
return addr == common.Address{}
}
|
Subsets and Splits