repo_name
stringlengths 7
104
| file_path
stringlengths 13
198
| context
stringlengths 67
7.15k
| import_statement
stringlengths 16
4.43k
| code
stringlengths 40
6.98k
| prompt
stringlengths 227
8.27k
| next_line
stringlengths 8
795
|
---|---|---|---|---|---|---|
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.AbstractOperation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map; | /**
* Created by buremba on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.operations;
public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
private Map<String, byte[]> migrationData;
public HyperLogLogReplicationOperation() {
}
public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
this.migrationData = migrationData;
}
@Override
public void run() throws Exception { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.AbstractOperation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* Created by buremba on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.operations;
public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
private Map<String, byte[]> migrationData;
public HyperLogLogReplicationOperation() {
}
public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
this.migrationData = migrationData;
}
@Override
public void run() throws Exception { | HyperLogLogService service = getService(); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.AbstractOperation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map; | /**
* Created by buremba on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.operations;
public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
private Map<String, byte[]> migrationData;
public HyperLogLogReplicationOperation() {
}
public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
this.migrationData = migrationData;
}
@Override
public void run() throws Exception {
HyperLogLogService service = getService();
for (Map.Entry<String, byte[]> longEntry : migrationData.entrySet()) {
String name = longEntry.getKey(); | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.AbstractOperation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
/**
* Created by buremba on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.operations;
public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
private Map<String, byte[]> migrationData;
public HyperLogLogReplicationOperation() {
}
public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
this.migrationData = migrationData;
}
@Override
public void run() throws Exception {
HyperLogLogService service = getService();
for (Map.Entry<String, byte[]> longEntry : migrationData.entrySet()) {
String name = longEntry.getKey(); | HLLWrapper number = service.getHLL(name); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLog.java | // Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import java.util.Collection;
import com.hazelcast.core.DistributedObject;
import org.rakam.util.HLLWrapper; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public interface HyperLogLog extends DistributedObject {
/**
* Returns the name of this HyperLogLog instance.
*
* @return name of this instance
*/
String getName();
/**
* Returns the cardinality of the HyperLogLog container
*/
public long cardinality();
public void reset();
/**
* Unions given hll container with the internal one.
*
* @param hll the hll container to marge
*/ | // Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLog.java
import java.util.Collection;
import com.hazelcast.core.DistributedObject;
import org.rakam.util.HLLWrapper;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public interface HyperLogLog extends DistributedObject {
/**
* Returns the name of this HyperLogLog instance.
*
* @return name of this instance
*/
String getName();
/**
* Returns the cardinality of the HyperLogLog container
*/
public long cardinality();
public void reset();
/**
* Unions given hll container with the internal one.
*
* @param hll the hll container to marge
*/ | public void union(HLLWrapper hll); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/client/GetRequest.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
// public class GetOperation extends TreeMapBaseOperation {
// private Integer numberOfElements = null;
// private Map<String, Long> returnValue;
//
// public GetOperation(String name) {
// super(name);
// }
//
// public GetOperation() {
//
// }
//
// public GetOperation(String name, Integer numberOfElements) {
// super(name);
// this.numberOfElements = numberOfElements;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.GET;
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// if(numberOfElements==null)
// this.returnValue = service.getHLL(name).getAll();
// else
// this.returnValue = service.getHLL(name).getTopItems(numberOfElements);
// }
//
// @Override
// public Object getResponse() {
// return returnValue;
// }
// }
| import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.treemap.operations.GetOperation;
import java.io.IOException; | package org.rakam.cache.hazelcast.treemap.client;
/**
* Created by buremba <Burak Emre Kabakcı> on 19/07/14 20:24.
*/
public class GetRequest extends ReadRequest {
private Integer limit;
public GetRequest(String name, Integer limit) {
super(name);
this.limit = limit==null ? -1 : limit;
}
public GetRequest() {
}
@Override
protected Operation prepareOperation() { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
// public class GetOperation extends TreeMapBaseOperation {
// private Integer numberOfElements = null;
// private Map<String, Long> returnValue;
//
// public GetOperation(String name) {
// super(name);
// }
//
// public GetOperation() {
//
// }
//
// public GetOperation(String name, Integer numberOfElements) {
// super(name);
// this.numberOfElements = numberOfElements;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.GET;
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// if(numberOfElements==null)
// this.returnValue = service.getHLL(name).getAll();
// else
// this.returnValue = service.getHLL(name).getTopItems(numberOfElements);
// }
//
// @Override
// public Object getResponse() {
// return returnValue;
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/client/GetRequest.java
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.treemap.operations.GetOperation;
import java.io.IOException;
package org.rakam.cache.hazelcast.treemap.client;
/**
* Created by buremba <Burak Emre Kabakcı> on 19/07/14 20:24.
*/
public class GetRequest extends ReadRequest {
private Integer limit;
public GetRequest(String name, Integer limit) {
super(name);
this.limit = limit==null ? -1 : limit;
}
public GetRequest() {
}
@Override
protected Operation prepareOperation() { | return new GetOperation(name, limit==-1 ? null : limit); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/ReadRequest.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
| import com.hazelcast.client.ClientEngine;
import com.hazelcast.client.PartitionClientRequest;
import com.hazelcast.client.SecureRequest;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.Portable;
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.security.permission.ActionConstants;
import com.hazelcast.security.permission.AtomicLongPermission;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import java.io.IOException;
import java.security.Permission; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.client;
public abstract class ReadRequest extends PartitionClientRequest implements Portable, SecureRequest {
protected String name;
public ReadRequest() {
}
public ReadRequest(String name) {
this.name = name;
}
@Override
protected int getPartition() {
ClientEngine clientEngine = getClientEngine();
//Data key = serializationService.toData(name);
Data key = clientEngine.getSerializationService().toData(name);
return clientEngine.getPartitionService().getPartitionId(key);
}
@Override
public String getServiceName() { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/ReadRequest.java
import com.hazelcast.client.ClientEngine;
import com.hazelcast.client.PartitionClientRequest;
import com.hazelcast.client.SecureRequest;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.Portable;
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.security.permission.ActionConstants;
import com.hazelcast.security.permission.AtomicLongPermission;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import java.io.IOException;
import java.security.Permission;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.client;
public abstract class ReadRequest extends PartitionClientRequest implements Portable, SecureRequest {
protected String name;
public ReadRequest() {
}
public ReadRequest(String name) {
this.name = name;
}
@Override
protected int getPartition() {
ClientEngine clientEngine = getClientEngine();
//Data key = serializationService.toData(name);
Data key = clientEngine.getSerializationService().toData(name);
return clientEngine.getPartitionService().getPartitionId(key);
}
@Override
public String getServiceName() { | return HyperLogLogService.SERVICE_NAME; |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/customMap/operations/IncrementByOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByBackupOperation.java
// public class IncrementByBackupOperation extends TreeMapBaseOperation implements BackupOperation {
// private long by;
//
// public IncrementByBackupOperation(String name, long by) {
// super(name);
// this.by = by;
// }
//
// public IncrementByBackupOperation() {
//
// }
//
// @Override
// public void run() throws Exception {
// OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name);
//
// map.increment(name, by);
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.ADD_BACKUP;
// }
// }
| import com.hazelcast.map.operation.KeyBasedMapOperation;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.spi.BackupAwareOperation;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.treemap.operations.IncrementByBackupOperation;
import java.io.IOException; | @Override
public void run() throws Exception {
getService();
}
@Override
public Object getResponse() {
return null;
}
@Override
public boolean shouldBackup() {
return true;
}
@Override
public int getSyncBackupCount() {
return 0;
}
@Override
public int getAsyncBackupCount() {
return 0;
}
@Override
public Operation getBackupOperation() { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByBackupOperation.java
// public class IncrementByBackupOperation extends TreeMapBaseOperation implements BackupOperation {
// private long by;
//
// public IncrementByBackupOperation(String name, long by) {
// super(name);
// this.by = by;
// }
//
// public IncrementByBackupOperation() {
//
// }
//
// @Override
// public void run() throws Exception {
// OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name);
//
// map.increment(name, by);
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.ADD_BACKUP;
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/customMap/operations/IncrementByOperation.java
import com.hazelcast.map.operation.KeyBasedMapOperation;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.spi.BackupAwareOperation;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.treemap.operations.IncrementByBackupOperation;
import java.io.IOException;
@Override
public void run() throws Exception {
getService();
}
@Override
public Object getResponse() {
return null;
}
@Override
public boolean shouldBackup() {
return true;
}
@Override
public int getSyncBackupCount() {
return 0;
}
@Override
public int getAsyncBackupCount() {
return 0;
}
@Override
public Operation getBackupOperation() { | return new IncrementByBackupOperation(); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/ResetRequest.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/ResetOperation.java
// public class ResetOperation extends HyperLogLogBackupAwareOperation {
//
//
// public ResetOperation() {
// }
//
// public ResetOperation(String name) {
// super(name);
// }
//
// @Override
// public void run() throws IllegalArgumentException {
// HLLWrapper hll = getHLL();
// hll.reset();
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.RESET;
// }
//
// @Override
// public Operation getBackupOperation() {
// return new ResetBackupOperation(name);
// }
// }
| import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.hyperloglog.operations.ResetOperation; | package org.rakam.cache.hazelcast.hyperloglog.client;
/**
* Created by buremba <Burak Emre Kabakcı> on 11/07/14 16:18.
*/
public class ResetRequest extends WriteRequest {
public ResetRequest(String name) {
super(name);
}
public ResetRequest() {
}
@Override
protected Operation prepareOperation() { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/ResetOperation.java
// public class ResetOperation extends HyperLogLogBackupAwareOperation {
//
//
// public ResetOperation() {
// }
//
// public ResetOperation(String name) {
// super(name);
// }
//
// @Override
// public void run() throws IllegalArgumentException {
// HLLWrapper hll = getHLL();
// hll.reset();
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.RESET;
// }
//
// @Override
// public Operation getBackupOperation() {
// return new ResetBackupOperation(name);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/ResetRequest.java
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.hyperloglog.operations.ResetOperation;
package org.rakam.cache.hazelcast.hyperloglog.client;
/**
* Created by buremba <Burak Emre Kabakcı> on 11/07/14 16:18.
*/
public class ResetRequest extends WriteRequest {
public ResetRequest(String name) {
super(name);
}
public ResetRequest() {
}
@Override
protected Operation prepareOperation() { | return new ResetOperation(name); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
| import org.rakam.cache.hazelcast.treemap.TreeMapService;
import java.util.Map; | package org.rakam.cache.hazelcast.treemap.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 15/07/14 02:34.
*/
public class GetOperation extends TreeMapBaseOperation {
private Integer numberOfElements = null;
private Map<String, Long> returnValue;
public GetOperation(String name) {
super(name);
}
public GetOperation() {
}
public GetOperation(String name, Integer numberOfElements) {
super(name);
this.numberOfElements = numberOfElements;
}
@Override
public int getId() {
return TreeMapSerializerFactory.GET;
}
@Override
public void run() throws Exception { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
import org.rakam.cache.hazelcast.treemap.TreeMapService;
import java.util.Map;
package org.rakam.cache.hazelcast.treemap.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 15/07/14 02:34.
*/
public class GetOperation extends TreeMapBaseOperation {
private Integer numberOfElements = null;
private Map<String, Long> returnValue;
public GetOperation(String name) {
super(name);
}
public GetOperation() {
}
public GetOperation(String name, Integer numberOfElements) {
super(name);
this.numberOfElements = numberOfElements;
}
@Override
public int getId() {
return TreeMapSerializerFactory.GET;
}
@Override
public void run() throws Exception { | TreeMapService service = getService(); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/TreeMapReplicationOperation.java
// public class TreeMapReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
//
// private Map<String, OrderedCounterMap> migrationData;
//
// public TreeMapReplicationOperation(Map<String, OrderedCounterMap> data) {
// migrationData = data;
// }
//
// public TreeMapReplicationOperation() {
//
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// for (Map.Entry<String, OrderedCounterMap> longEntry : migrationData.entrySet()) {
// String name = longEntry.getKey();
// service.setHLL(name, longEntry.getValue());
// }
// }
//
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// migrationData = new HashMap();
// int len = in.readInt();
// for(int i=0; i<len; i++) {
// OrderedCounterMap map = new OrderedCounterMap();
// int internalLength = in.readInt();
// for(int a=0; a<internalLength; a++) {
// map.increment(in.readUTF(), in.readLong());
// }
// }
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeInt(migrationData.size());
// for (Map.Entry<String, OrderedCounterMap> entry : migrationData.entrySet()) {
// out.writeUTF(entry.getKey());
// OrderedCounterMap val = entry.getValue();
// out.writeObject(val);
// }
// }
//
//
// @Override
// public int getFactoryId() {
// return TreeMapSerializerFactory.F_ID;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.REPLICATION;
// }
// }
| import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.partition.MigrationEndpoint;
import com.hazelcast.spi.*;
import com.hazelcast.util.ConstructorFunction;
import org.rakam.cache.hazelcast.treemap.operations.TreeMapReplicationOperation;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static com.hazelcast.partition.strategy.StringPartitioningStrategy.getPartitionKey;
import static com.hazelcast.util.ConcurrencyUtil.getOrPutIfAbsent; |
@Override
public void shutdown(boolean terminate) {
reset();
}
@Override
public TreeMapProxy createDistributedObject(String name) {
return new TreeMapProxy(name, nodeEngine, this);
}
@Override
public void destroyDistributedObject(String name) {
containers.remove(name);
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
Map<String, OrderedCounterMap> data = new HashMap();
int partitionId = event.getPartitionId();
for (String name : containers.keySet()) {
if (partitionId == getPartitionId(name)) {
OrderedCounterMap number = containers.get(name);
data.put(name, number);
}
} | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/TreeMapReplicationOperation.java
// public class TreeMapReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
//
// private Map<String, OrderedCounterMap> migrationData;
//
// public TreeMapReplicationOperation(Map<String, OrderedCounterMap> data) {
// migrationData = data;
// }
//
// public TreeMapReplicationOperation() {
//
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// for (Map.Entry<String, OrderedCounterMap> longEntry : migrationData.entrySet()) {
// String name = longEntry.getKey();
// service.setHLL(name, longEntry.getValue());
// }
// }
//
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// migrationData = new HashMap();
// int len = in.readInt();
// for(int i=0; i<len; i++) {
// OrderedCounterMap map = new OrderedCounterMap();
// int internalLength = in.readInt();
// for(int a=0; a<internalLength; a++) {
// map.increment(in.readUTF(), in.readLong());
// }
// }
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeInt(migrationData.size());
// for (Map.Entry<String, OrderedCounterMap> entry : migrationData.entrySet()) {
// out.writeUTF(entry.getKey());
// OrderedCounterMap val = entry.getValue();
// out.writeObject(val);
// }
// }
//
//
// @Override
// public int getFactoryId() {
// return TreeMapSerializerFactory.F_ID;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.REPLICATION;
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.partition.MigrationEndpoint;
import com.hazelcast.spi.*;
import com.hazelcast.util.ConstructorFunction;
import org.rakam.cache.hazelcast.treemap.operations.TreeMapReplicationOperation;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static com.hazelcast.partition.strategy.StringPartitioningStrategy.getPartitionKey;
import static com.hazelcast.util.ConcurrencyUtil.getOrPutIfAbsent;
@Override
public void shutdown(boolean terminate) {
reset();
}
@Override
public TreeMapProxy createDistributedObject(String name) {
return new TreeMapProxy(name, nodeEngine, this);
}
@Override
public void destroyDistributedObject(String name) {
containers.remove(name);
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
Map<String, OrderedCounterMap> data = new HashMap();
int partitionId = event.getPartitionId();
for (String name : containers.keySet()) {
if (partitionId == getPartitionId(name)) {
OrderedCounterMap number = containers.get(name);
data.put(name, number);
}
} | return data.isEmpty() ? null : new TreeMapReplicationOperation(data); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/AddAllRequest.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/AddAllOperation.java
// public class AddAllOperation extends HyperLogLogBackupAwareOperation {
//
// private Collection<String> items;
//
// public AddAllOperation() {
// }
//
// public AddAllOperation(String name, Collection<String> item) {
// super(name);
// this.items = item;
// }
//
// @Override
// public void run() throws Exception {
// HLLWrapper hll = getHLL();
// hll.addAll(items);
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.ADD_ALL;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// super.writeInternal(out);
// out.writeInt(items.size());
// for (String item : items)
// out.writeUTF(item);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// super.readInternal(in);
// int size = in.readInt();
// items = new ArrayList(size);
// for (int i = 0; i < size; i++)
// items.add(in.readUTF());
// }
//
// @Override
// public Operation getBackupOperation() {
// return new AddAllBackupOperation(name, items);
// }
// }
| import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.hyperloglog.operations.AddAllOperation;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.client;
public class AddAllRequest extends WriteRequest {
private Collection<String> items;
public AddAllRequest() {
}
protected AddAllRequest(String name, Collection<String> items) {
this.name = name;
this.items = items;
}
@Override
protected Operation prepareOperation() { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/AddAllOperation.java
// public class AddAllOperation extends HyperLogLogBackupAwareOperation {
//
// private Collection<String> items;
//
// public AddAllOperation() {
// }
//
// public AddAllOperation(String name, Collection<String> item) {
// super(name);
// this.items = item;
// }
//
// @Override
// public void run() throws Exception {
// HLLWrapper hll = getHLL();
// hll.addAll(items);
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.ADD_ALL;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// super.writeInternal(out);
// out.writeInt(items.size());
// for (String item : items)
// out.writeUTF(item);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// super.readInternal(in);
// int size = in.readInt();
// items = new ArrayList(size);
// for (int i = 0; i < size; i++)
// items.add(in.readUTF());
// }
//
// @Override
// public Operation getBackupOperation() {
// return new AddAllBackupOperation(name, items);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/AddAllRequest.java
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.hyperloglog.operations.AddAllOperation;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.client;
public class AddAllRequest extends WriteRequest {
private Collection<String> items;
public AddAllRequest() {
}
protected AddAllRequest(String name, Collection<String> items) {
this.name = name;
this.items = items;
}
@Override
protected Operation prepareOperation() { | return new AddAllOperation(name, items); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java
// public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
//
// private Map<String, byte[]> migrationData;
//
// public HyperLogLogReplicationOperation() {
// }
//
// public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
// this.migrationData = migrationData;
// }
//
// @Override
// public void run() throws Exception {
// HyperLogLogService service = getService();
// for (Map.Entry<String, byte[]> longEntry : migrationData.entrySet()) {
// String name = longEntry.getKey();
// HLLWrapper number = service.getHLL(name);
// byte[] value = longEntry.getValue();
// number.set(value);
// }
// }
//
// @Override
// public String getServiceName() {
// return HyperLogLogService.SERVICE_NAME;
// }
//
// @Override
// public int getFactoryId() {
// return HyperLogLogSerializerFactory.F_ID;
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.REPLICATION;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeInt(migrationData.size());
// for (Map.Entry<String, byte[]> entry : migrationData.entrySet()) {
// out.writeUTF(entry.getKey());
// byte[] val = entry.getValue();
// out.writeInt(val.length);
// out.write(val);
// }
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// int mapSize = in.readInt();
// migrationData = new HashMap<String, byte[]>(mapSize);
// for (int i = 0; i < mapSize; i++) {
// String name = in.readUTF();
// int number = in.readInt();
// byte[] a = new byte[number];
// in.readFully(a);
// migrationData.put(name, a);
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.partition.MigrationEndpoint;
import com.hazelcast.spi.*;
import com.hazelcast.util.ConstructorFunction;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogReplicationOperation;
import org.rakam.util.HLLWrapper;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static com.hazelcast.partition.strategy.StringPartitioningStrategy.getPartitionKey;
import static com.hazelcast.util.ConcurrencyUtil.getOrPutIfAbsent; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
public static final String SERVICE_NAME = "rakam:hyperLogLogService";
private NodeEngine nodeEngine; | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java
// public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
//
// private Map<String, byte[]> migrationData;
//
// public HyperLogLogReplicationOperation() {
// }
//
// public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
// this.migrationData = migrationData;
// }
//
// @Override
// public void run() throws Exception {
// HyperLogLogService service = getService();
// for (Map.Entry<String, byte[]> longEntry : migrationData.entrySet()) {
// String name = longEntry.getKey();
// HLLWrapper number = service.getHLL(name);
// byte[] value = longEntry.getValue();
// number.set(value);
// }
// }
//
// @Override
// public String getServiceName() {
// return HyperLogLogService.SERVICE_NAME;
// }
//
// @Override
// public int getFactoryId() {
// return HyperLogLogSerializerFactory.F_ID;
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.REPLICATION;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeInt(migrationData.size());
// for (Map.Entry<String, byte[]> entry : migrationData.entrySet()) {
// out.writeUTF(entry.getKey());
// byte[] val = entry.getValue();
// out.writeInt(val.length);
// out.write(val);
// }
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// int mapSize = in.readInt();
// migrationData = new HashMap<String, byte[]>(mapSize);
// for (int i = 0; i < mapSize; i++) {
// String name = in.readUTF();
// int number = in.readInt();
// byte[] a = new byte[number];
// in.readFully(a);
// migrationData.put(name, a);
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.partition.MigrationEndpoint;
import com.hazelcast.spi.*;
import com.hazelcast.util.ConstructorFunction;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogReplicationOperation;
import org.rakam.util.HLLWrapper;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static com.hazelcast.partition.strategy.StringPartitioningStrategy.getPartitionKey;
import static com.hazelcast.util.ConcurrencyUtil.getOrPutIfAbsent;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
public static final String SERVICE_NAME = "rakam:hyperLogLogService";
private NodeEngine nodeEngine; | private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>(); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java
// public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
//
// private Map<String, byte[]> migrationData;
//
// public HyperLogLogReplicationOperation() {
// }
//
// public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
// this.migrationData = migrationData;
// }
//
// @Override
// public void run() throws Exception {
// HyperLogLogService service = getService();
// for (Map.Entry<String, byte[]> longEntry : migrationData.entrySet()) {
// String name = longEntry.getKey();
// HLLWrapper number = service.getHLL(name);
// byte[] value = longEntry.getValue();
// number.set(value);
// }
// }
//
// @Override
// public String getServiceName() {
// return HyperLogLogService.SERVICE_NAME;
// }
//
// @Override
// public int getFactoryId() {
// return HyperLogLogSerializerFactory.F_ID;
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.REPLICATION;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeInt(migrationData.size());
// for (Map.Entry<String, byte[]> entry : migrationData.entrySet()) {
// out.writeUTF(entry.getKey());
// byte[] val = entry.getValue();
// out.writeInt(val.length);
// out.write(val);
// }
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// int mapSize = in.readInt();
// migrationData = new HashMap<String, byte[]>(mapSize);
// for (int i = 0; i < mapSize; i++) {
// String name = in.readUTF();
// int number = in.readInt();
// byte[] a = new byte[number];
// in.readFully(a);
// migrationData.put(name, a);
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.partition.MigrationEndpoint;
import com.hazelcast.spi.*;
import com.hazelcast.util.ConstructorFunction;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogReplicationOperation;
import org.rakam.util.HLLWrapper;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static com.hazelcast.partition.strategy.StringPartitioningStrategy.getPartitionKey;
import static com.hazelcast.util.ConcurrencyUtil.getOrPutIfAbsent; | @Override
public void shutdown(boolean terminate) {
reset();
}
@Override
public HyperLogLogProxy createDistributedObject(String name) {
return new HyperLogLogProxy(name, nodeEngine, this);
}
@Override
public void destroyDistributedObject(String name) {
containers.remove(name);
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
Map<String, byte[]> data = new HashMap<String, byte[]>();
int partitionId = event.getPartitionId();
for (String name : containers.keySet()) {
if (partitionId == getPartitionId(name)) {
HLLWrapper number = containers.get(name);
data.put(name, number.bytes());
}
} | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogReplicationOperation.java
// public class HyperLogLogReplicationOperation extends AbstractOperation implements IdentifiedDataSerializable {
//
// private Map<String, byte[]> migrationData;
//
// public HyperLogLogReplicationOperation() {
// }
//
// public HyperLogLogReplicationOperation(Map<String, byte[]> migrationData) {
// this.migrationData = migrationData;
// }
//
// @Override
// public void run() throws Exception {
// HyperLogLogService service = getService();
// for (Map.Entry<String, byte[]> longEntry : migrationData.entrySet()) {
// String name = longEntry.getKey();
// HLLWrapper number = service.getHLL(name);
// byte[] value = longEntry.getValue();
// number.set(value);
// }
// }
//
// @Override
// public String getServiceName() {
// return HyperLogLogService.SERVICE_NAME;
// }
//
// @Override
// public int getFactoryId() {
// return HyperLogLogSerializerFactory.F_ID;
// }
//
// @Override
// public int getId() {
// return HyperLogLogSerializerFactory.REPLICATION;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeInt(migrationData.size());
// for (Map.Entry<String, byte[]> entry : migrationData.entrySet()) {
// out.writeUTF(entry.getKey());
// byte[] val = entry.getValue();
// out.writeInt(val.length);
// out.write(val);
// }
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// int mapSize = in.readInt();
// migrationData = new HashMap<String, byte[]>(mapSize);
// for (int i = 0; i < mapSize; i++) {
// String name = in.readUTF();
// int number = in.readInt();
// byte[] a = new byte[number];
// in.readFully(a);
// migrationData.put(name, a);
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
import com.hazelcast.partition.InternalPartitionService;
import com.hazelcast.partition.MigrationEndpoint;
import com.hazelcast.spi.*;
import com.hazelcast.util.ConstructorFunction;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogReplicationOperation;
import org.rakam.util.HLLWrapper;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import static com.hazelcast.partition.strategy.StringPartitioningStrategy.getPartitionKey;
import static com.hazelcast.util.ConcurrencyUtil.getOrPutIfAbsent;
@Override
public void shutdown(boolean terminate) {
reset();
}
@Override
public HyperLogLogProxy createDistributedObject(String name) {
return new HyperLogLogProxy(name, nodeEngine, this);
}
@Override
public void destroyDistributedObject(String name) {
containers.remove(name);
}
@Override
public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
}
@Override
public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
Map<String, byte[]> data = new HashMap<String, byte[]>();
int partitionId = event.getPartitionId();
for (String name : containers.keySet()) {
if (partitionId == getPartitionId(name)) {
HLLWrapper number = containers.get(name);
data.put(name, number.bytes());
}
} | return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/client/ReadRequest.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
| import com.hazelcast.client.ClientEngine;
import com.hazelcast.client.PartitionClientRequest;
import com.hazelcast.client.SecureRequest;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.Portable;
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.security.permission.ActionConstants;
import com.hazelcast.security.permission.AtomicLongPermission;
import org.rakam.cache.hazelcast.treemap.TreeMapService;
import java.io.IOException;
import java.security.Permission; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.treemap.client;
public abstract class ReadRequest extends PartitionClientRequest implements Portable, SecureRequest {
protected String name;
public ReadRequest() {
}
public ReadRequest(String name) {
this.name = name;
}
@Override
protected int getPartition() {
ClientEngine clientEngine = getClientEngine();
//Data key = serializationService.toData(name);
Data key = clientEngine.getSerializationService().toData(name);
return clientEngine.getPartitionService().getPartitionId(key);
}
@Override
public String getServiceName() { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/client/ReadRequest.java
import com.hazelcast.client.ClientEngine;
import com.hazelcast.client.PartitionClientRequest;
import com.hazelcast.client.SecureRequest;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.nio.serialization.Portable;
import com.hazelcast.nio.serialization.PortableReader;
import com.hazelcast.nio.serialization.PortableWriter;
import com.hazelcast.security.permission.ActionConstants;
import com.hazelcast.security.permission.AtomicLongPermission;
import org.rakam.cache.hazelcast.treemap.TreeMapService;
import java.io.IOException;
import java.security.Permission;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.treemap.client;
public abstract class ReadRequest extends PartitionClientRequest implements Portable, SecureRequest {
protected String name;
public ReadRequest() {
}
public ReadRequest(String name) {
this.name = name;
}
@Override
protected int getPartition() {
ClientEngine clientEngine = getClientEngine();
//Data key = serializationService.toData(name);
Data key = clientEngine.getSerializationService().toData(name);
return clientEngine.getPartitionService().getPartitionId(key);
}
@Override
public String getServiceName() { | return TreeMapService.SERVICE_NAME; |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/ClientHyperLogLogProxyFactory.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
| import com.hazelcast.client.spi.ClientProxyFactory;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import com.hazelcast.client.spi.ClientProxy; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.client;
public class ClientHyperLogLogProxyFactory implements ClientProxyFactory {
@Override
public ClientProxy create(String s) { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogService.java
// public class HyperLogLogService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:hyperLogLogService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, HLLWrapper> containers = new ConcurrentHashMap<String, HLLWrapper>();
// private final ConstructorFunction<String, HLLWrapper> CountersConstructorFunction =
// new ConstructorFunction<String, HLLWrapper>() {
// public HLLWrapper createNew(String key) {
// return new HLLWrapper();
// }
// };
//
// public HyperLogLogService() {
// }
//
// public HLLWrapper getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public HyperLogLogProxy createDistributedObject(String name) {
// return new HyperLogLogProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
//
// Map<String, byte[]> data = new HashMap<String, byte[]>();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// HLLWrapper number = containers.get(name);
// data.put(name, number.bytes());
// }
// }
// return data.isEmpty() ? null : new HyperLogLogReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/client/ClientHyperLogLogProxyFactory.java
import com.hazelcast.client.spi.ClientProxyFactory;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogService;
import com.hazelcast.client.spi.ClientProxy;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.client;
public class ClientHyperLogLogProxyFactory implements ClientProxyFactory {
@Override
public ClientProxy create(String s) { | return new ClientHyperLogLogProxy("ClientHyperLogLogProxy", HyperLogLogService.SERVICE_NAME, s); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/ResetBackupOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBaseOperation.java
// public abstract class HyperLogLogBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable {
//
// protected String name;
//
// public HyperLogLogBaseOperation() {
// }
//
// public HyperLogLogBaseOperation(String name) {
// this.name = name;
// }
//
// public HLLWrapper getHLL() {
// HyperLogLogService service = getService();
// return service.getHLL(name);
// }
//
// @Override
// public int getFactoryId() {
// return HyperLogLogSerializerFactory.F_ID;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeUTF(name);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// name = in.readUTF();
// }
//
// @Override
// public void afterRun() throws Exception {
// }
//
// @Override
// public void beforeRun() throws Exception {
// }
//
// @Override
// public Object getResponse() {
// return null;
// }
//
// @Override
// public boolean returnsResponse() {
// return true;
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.spi.BackupOperation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogBaseOperation;
import org.rakam.util.HLLWrapper; | package org.rakam.cache.hazelcast.hyperloglog.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 11/07/14 16:16.
*/
public class ResetBackupOperation extends HyperLogLogBaseOperation implements BackupOperation {
public ResetBackupOperation() {
}
public ResetBackupOperation(String name) {
super(name);
}
@Override
public void run() throws IllegalArgumentException { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBaseOperation.java
// public abstract class HyperLogLogBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable {
//
// protected String name;
//
// public HyperLogLogBaseOperation() {
// }
//
// public HyperLogLogBaseOperation(String name) {
// this.name = name;
// }
//
// public HLLWrapper getHLL() {
// HyperLogLogService service = getService();
// return service.getHLL(name);
// }
//
// @Override
// public int getFactoryId() {
// return HyperLogLogSerializerFactory.F_ID;
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// out.writeUTF(name);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// name = in.readUTF();
// }
//
// @Override
// public void afterRun() throws Exception {
// }
//
// @Override
// public void beforeRun() throws Exception {
// }
//
// @Override
// public Object getResponse() {
// return null;
// }
//
// @Override
// public boolean returnsResponse() {
// return true;
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/ResetBackupOperation.java
import com.hazelcast.spi.BackupOperation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogBaseOperation;
import org.rakam.util.HLLWrapper;
package org.rakam.cache.hazelcast.hyperloglog.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 11/07/14 16:16.
*/
public class ResetBackupOperation extends HyperLogLogBaseOperation implements BackupOperation {
public ResetBackupOperation() {
}
public ResetBackupOperation(String name) {
super(name);
}
@Override
public void run() throws IllegalArgumentException { | HLLWrapper hll = getHLL(); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBaseOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogSerializerFactory.java
// public final class HyperLogLogSerializerFactory implements DataSerializableFactory {
//
// public static final int F_ID = 100;
//
// public static final int ADD = 0;
// public static final int ADD_BACKUP = 1;
// public static final int ADD_ALL = 2;
// public static final int ADD_ALL_BACKUP = 3;
// public static final int CARDINALITY = 4;
// public static final int UNION = 5;
// public static final int UNION_BACKUP = 6;
// public static final int REPLICATION = 7;
// public static final int RESET = 8;
// public static final int RESET_BACKUP = 9;
//
// @Override
// public IdentifiedDataSerializable create(int typeId) {
// switch (typeId) {
// case ADD_BACKUP:
// return new AddBackupOperation();
// case ADD:
// return new ResetOperation();
// case ADD_ALL_BACKUP:
// return new AddAllBackupOperation();
// case ADD_ALL:
// return new AddAllOperation();
// case CARDINALITY:
// return new CardinalityOperation();
// case UNION_BACKUP:
// return new UnionBackupOperation();
// case UNION:
// return new UnionOperation();
// case REPLICATION:
// return new HyperLogLogReplicationOperation();
// default:
// return null;
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.PartitionAwareOperation;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogSerializerFactory;
import org.rakam.util.HLLWrapper;
import java.io.IOException; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public abstract class HyperLogLogBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable {
protected String name;
public HyperLogLogBaseOperation() {
}
public HyperLogLogBaseOperation(String name) {
this.name = name;
}
| // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogSerializerFactory.java
// public final class HyperLogLogSerializerFactory implements DataSerializableFactory {
//
// public static final int F_ID = 100;
//
// public static final int ADD = 0;
// public static final int ADD_BACKUP = 1;
// public static final int ADD_ALL = 2;
// public static final int ADD_ALL_BACKUP = 3;
// public static final int CARDINALITY = 4;
// public static final int UNION = 5;
// public static final int UNION_BACKUP = 6;
// public static final int REPLICATION = 7;
// public static final int RESET = 8;
// public static final int RESET_BACKUP = 9;
//
// @Override
// public IdentifiedDataSerializable create(int typeId) {
// switch (typeId) {
// case ADD_BACKUP:
// return new AddBackupOperation();
// case ADD:
// return new ResetOperation();
// case ADD_ALL_BACKUP:
// return new AddAllBackupOperation();
// case ADD_ALL:
// return new AddAllOperation();
// case CARDINALITY:
// return new CardinalityOperation();
// case UNION_BACKUP:
// return new UnionBackupOperation();
// case UNION:
// return new UnionOperation();
// case REPLICATION:
// return new HyperLogLogReplicationOperation();
// default:
// return null;
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBaseOperation.java
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.PartitionAwareOperation;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogSerializerFactory;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public abstract class HyperLogLogBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable {
protected String name;
public HyperLogLogBaseOperation() {
}
public HyperLogLogBaseOperation(String name) {
this.name = name;
}
| public HLLWrapper getHLL() { |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBaseOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogSerializerFactory.java
// public final class HyperLogLogSerializerFactory implements DataSerializableFactory {
//
// public static final int F_ID = 100;
//
// public static final int ADD = 0;
// public static final int ADD_BACKUP = 1;
// public static final int ADD_ALL = 2;
// public static final int ADD_ALL_BACKUP = 3;
// public static final int CARDINALITY = 4;
// public static final int UNION = 5;
// public static final int UNION_BACKUP = 6;
// public static final int REPLICATION = 7;
// public static final int RESET = 8;
// public static final int RESET_BACKUP = 9;
//
// @Override
// public IdentifiedDataSerializable create(int typeId) {
// switch (typeId) {
// case ADD_BACKUP:
// return new AddBackupOperation();
// case ADD:
// return new ResetOperation();
// case ADD_ALL_BACKUP:
// return new AddAllBackupOperation();
// case ADD_ALL:
// return new AddAllOperation();
// case CARDINALITY:
// return new CardinalityOperation();
// case UNION_BACKUP:
// return new UnionBackupOperation();
// case UNION:
// return new UnionOperation();
// case REPLICATION:
// return new HyperLogLogReplicationOperation();
// default:
// return null;
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.PartitionAwareOperation;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogSerializerFactory;
import org.rakam.util.HLLWrapper;
import java.io.IOException; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public abstract class HyperLogLogBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable {
protected String name;
public HyperLogLogBaseOperation() {
}
public HyperLogLogBaseOperation(String name) {
this.name = name;
}
public HLLWrapper getHLL() {
HyperLogLogService service = getService();
return service.getHLL(name);
}
@Override
public int getFactoryId() { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/HyperLogLogSerializerFactory.java
// public final class HyperLogLogSerializerFactory implements DataSerializableFactory {
//
// public static final int F_ID = 100;
//
// public static final int ADD = 0;
// public static final int ADD_BACKUP = 1;
// public static final int ADD_ALL = 2;
// public static final int ADD_ALL_BACKUP = 3;
// public static final int CARDINALITY = 4;
// public static final int UNION = 5;
// public static final int UNION_BACKUP = 6;
// public static final int REPLICATION = 7;
// public static final int RESET = 8;
// public static final int RESET_BACKUP = 9;
//
// @Override
// public IdentifiedDataSerializable create(int typeId) {
// switch (typeId) {
// case ADD_BACKUP:
// return new AddBackupOperation();
// case ADD:
// return new ResetOperation();
// case ADD_ALL_BACKUP:
// return new AddAllBackupOperation();
// case ADD_ALL:
// return new AddAllOperation();
// case CARDINALITY:
// return new CardinalityOperation();
// case UNION_BACKUP:
// return new UnionBackupOperation();
// case UNION:
// return new UnionOperation();
// case REPLICATION:
// return new HyperLogLogReplicationOperation();
// default:
// return null;
// }
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBaseOperation.java
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.IdentifiedDataSerializable;
import com.hazelcast.spi.Operation;
import com.hazelcast.spi.PartitionAwareOperation;
import org.rakam.cache.hazelcast.hyperloglog.operations.HyperLogLogSerializerFactory;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog;
public abstract class HyperLogLogBaseOperation extends Operation implements PartitionAwareOperation, IdentifiedDataSerializable {
protected String name;
public HyperLogLogBaseOperation() {
}
public HyperLogLogBaseOperation(String name) {
this.name = name;
}
public HLLWrapper getHLL() {
HyperLogLogService service = getService();
return service.getHLL(name);
}
@Override
public int getFactoryId() { | return HyperLogLogSerializerFactory.F_ID; |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogProxy.java | // Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.spi.*;
import com.hazelcast.util.ExceptionUtil;
import org.rakam.cache.hazelcast.hyperloglog.operations.*;
import org.rakam.util.HLLWrapper;
import java.util.Collection; | public String getServiceName() {
return HyperLogLogService.SERVICE_NAME;
}
@Override
public String toString() {
return "HyperLogLog{" + "name='" + name + '\'' + '}';
}
@Override
public String getName() {
return name;
}
@Override
public long cardinality() {
return asyncCardinality().getSafely();
}
@Override
public void reset() {
asyncReset().getSafely();
}
public InternalCompletableFuture<Void> asyncReset() {
ResetOperation operation = new ResetOperation(name);
return asyncInvoke(operation);
}
@Override | // Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogProxy.java
import com.hazelcast.spi.*;
import com.hazelcast.util.ExceptionUtil;
import org.rakam.cache.hazelcast.hyperloglog.operations.*;
import org.rakam.util.HLLWrapper;
import java.util.Collection;
public String getServiceName() {
return HyperLogLogService.SERVICE_NAME;
}
@Override
public String toString() {
return "HyperLogLog{" + "name='" + name + '\'' + '}';
}
@Override
public String getName() {
return name;
}
@Override
public long cardinality() {
return asyncCardinality().getSafely();
}
@Override
public void reset() {
asyncReset().getSafely();
}
public InternalCompletableFuture<Void> asyncReset() {
ResetOperation operation = new ResetOperation(name);
return asyncInvoke(operation);
}
@Override | public void union(HLLWrapper hll) { |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByBackupOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/OrderedCounterMap.java
// public class OrderedCounterMap implements Iterable<Counter>, DataSerializable {
// Set<Counter> set = Collections.newSetFromMap(new ConcurrentSkipListMap());
// Map<String, Counter> map = new ConcurrentHashMap<String, Counter>();
//
// public long increment(String key, long by) {
// Counter c = map.get(key);
//
// if(c!=null) {
// return map.get(key).increment();
// }else {
// c = new Counter(key, by);
// map.put(c.id, c);
// set.add(c);
// }
// return by;
// }
//
// public Map<String, Long> getAll() {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// }
// return ret;
// }
//
// public Map<String, Long> getTopItems(int numberOfItems) {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// int i = 0;
// while(it.hasNext() && numberOfItems>i) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// i++;
// }
// return ret;
// }
//
//
// @Override
// public Iterator<Counter> iterator() {
// return set.iterator();
// }
//
// @Override
// public void writeData(ObjectDataOutput out) throws IOException {
// out.writeInt(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// out.writeObject(it.next());
// }
// }
//
// @Override
// public void readData(ObjectDataInput in) throws IOException {
// int len = in.readInt();
// for(int i=0; i<len; i++) {
// Counter c = in.readObject();
// map.put(c.id, c);
// set.add(c);
// }
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
| import com.hazelcast.spi.BackupOperation;
import org.rakam.cache.hazelcast.treemap.OrderedCounterMap;
import org.rakam.cache.hazelcast.treemap.TreeMapService; | package org.rakam.cache.hazelcast.treemap.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 15/07/14 03:23.
*/
public class IncrementByBackupOperation extends TreeMapBaseOperation implements BackupOperation {
private long by;
public IncrementByBackupOperation(String name, long by) {
super(name);
this.by = by;
}
public IncrementByBackupOperation() {
}
@Override
public void run() throws Exception { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/OrderedCounterMap.java
// public class OrderedCounterMap implements Iterable<Counter>, DataSerializable {
// Set<Counter> set = Collections.newSetFromMap(new ConcurrentSkipListMap());
// Map<String, Counter> map = new ConcurrentHashMap<String, Counter>();
//
// public long increment(String key, long by) {
// Counter c = map.get(key);
//
// if(c!=null) {
// return map.get(key).increment();
// }else {
// c = new Counter(key, by);
// map.put(c.id, c);
// set.add(c);
// }
// return by;
// }
//
// public Map<String, Long> getAll() {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// }
// return ret;
// }
//
// public Map<String, Long> getTopItems(int numberOfItems) {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// int i = 0;
// while(it.hasNext() && numberOfItems>i) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// i++;
// }
// return ret;
// }
//
//
// @Override
// public Iterator<Counter> iterator() {
// return set.iterator();
// }
//
// @Override
// public void writeData(ObjectDataOutput out) throws IOException {
// out.writeInt(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// out.writeObject(it.next());
// }
// }
//
// @Override
// public void readData(ObjectDataInput in) throws IOException {
// int len = in.readInt();
// for(int i=0; i<len; i++) {
// Counter c = in.readObject();
// map.put(c.id, c);
// set.add(c);
// }
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByBackupOperation.java
import com.hazelcast.spi.BackupOperation;
import org.rakam.cache.hazelcast.treemap.OrderedCounterMap;
import org.rakam.cache.hazelcast.treemap.TreeMapService;
package org.rakam.cache.hazelcast.treemap.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 15/07/14 03:23.
*/
public class IncrementByBackupOperation extends TreeMapBaseOperation implements BackupOperation {
private long by;
public IncrementByBackupOperation(String name, long by) {
super(name);
this.by = by;
}
public IncrementByBackupOperation() {
}
@Override
public void run() throws Exception { | OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByBackupOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/OrderedCounterMap.java
// public class OrderedCounterMap implements Iterable<Counter>, DataSerializable {
// Set<Counter> set = Collections.newSetFromMap(new ConcurrentSkipListMap());
// Map<String, Counter> map = new ConcurrentHashMap<String, Counter>();
//
// public long increment(String key, long by) {
// Counter c = map.get(key);
//
// if(c!=null) {
// return map.get(key).increment();
// }else {
// c = new Counter(key, by);
// map.put(c.id, c);
// set.add(c);
// }
// return by;
// }
//
// public Map<String, Long> getAll() {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// }
// return ret;
// }
//
// public Map<String, Long> getTopItems(int numberOfItems) {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// int i = 0;
// while(it.hasNext() && numberOfItems>i) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// i++;
// }
// return ret;
// }
//
//
// @Override
// public Iterator<Counter> iterator() {
// return set.iterator();
// }
//
// @Override
// public void writeData(ObjectDataOutput out) throws IOException {
// out.writeInt(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// out.writeObject(it.next());
// }
// }
//
// @Override
// public void readData(ObjectDataInput in) throws IOException {
// int len = in.readInt();
// for(int i=0; i<len; i++) {
// Counter c = in.readObject();
// map.put(c.id, c);
// set.add(c);
// }
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
| import com.hazelcast.spi.BackupOperation;
import org.rakam.cache.hazelcast.treemap.OrderedCounterMap;
import org.rakam.cache.hazelcast.treemap.TreeMapService; | package org.rakam.cache.hazelcast.treemap.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 15/07/14 03:23.
*/
public class IncrementByBackupOperation extends TreeMapBaseOperation implements BackupOperation {
private long by;
public IncrementByBackupOperation(String name, long by) {
super(name);
this.by = by;
}
public IncrementByBackupOperation() {
}
@Override
public void run() throws Exception { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/OrderedCounterMap.java
// public class OrderedCounterMap implements Iterable<Counter>, DataSerializable {
// Set<Counter> set = Collections.newSetFromMap(new ConcurrentSkipListMap());
// Map<String, Counter> map = new ConcurrentHashMap<String, Counter>();
//
// public long increment(String key, long by) {
// Counter c = map.get(key);
//
// if(c!=null) {
// return map.get(key).increment();
// }else {
// c = new Counter(key, by);
// map.put(c.id, c);
// set.add(c);
// }
// return by;
// }
//
// public Map<String, Long> getAll() {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// }
// return ret;
// }
//
// public Map<String, Long> getTopItems(int numberOfItems) {
// Map<String, Long> ret = new LinkedHashMap(set.size());
// Iterator<Counter> it = set.iterator();
// int i = 0;
// while(it.hasNext() && numberOfItems>i) {
// Counter next = it.next();
// ret.put(next.id, next.get());
// i++;
// }
// return ret;
// }
//
//
// @Override
// public Iterator<Counter> iterator() {
// return set.iterator();
// }
//
// @Override
// public void writeData(ObjectDataOutput out) throws IOException {
// out.writeInt(set.size());
// Iterator<Counter> it = set.iterator();
// while(it.hasNext()) {
// out.writeObject(it.next());
// }
// }
//
// @Override
// public void readData(ObjectDataInput in) throws IOException {
// int len = in.readInt();
// for(int i=0; i<len; i++) {
// Counter c = in.readObject();
// map.put(c.id, c);
// set.add(c);
// }
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapService.java
// public class TreeMapService implements ManagedService, RemoteService, MigrationAwareService {
//
// public static final String SERVICE_NAME = "rakam:treeMapService";
//
// private NodeEngine nodeEngine;
// private final ConcurrentMap<String, OrderedCounterMap> containers = new ConcurrentHashMap<String, OrderedCounterMap>();
// private final ConstructorFunction<String, OrderedCounterMap> CountersConstructorFunction =
// new ConstructorFunction<String, OrderedCounterMap>() {
// public OrderedCounterMap createNew(String key) {
// return new OrderedCounterMap();
// }
// };
//
// public TreeMapService() {
// }
//
// public OrderedCounterMap getHLL(String name) {
// return getOrPutIfAbsent(containers, name, CountersConstructorFunction);
// }
//
// public void setHLL(String name, OrderedCounterMap map) {
// containers.put(name, map);
// }
//
// @Override
// public void init(NodeEngine nodeEngine, Properties properties) {
// this.nodeEngine = nodeEngine;
// }
//
// @Override
// public void reset() {
// containers.clear();
// }
//
// @Override
// public void shutdown(boolean terminate) {
// reset();
// }
//
// @Override
// public TreeMapProxy createDistributedObject(String name) {
// return new TreeMapProxy(name, nodeEngine, this);
// }
//
// @Override
// public void destroyDistributedObject(String name) {
// containers.remove(name);
// }
//
// @Override
// public void beforeMigration(PartitionMigrationEvent partitionMigrationEvent) {
// }
//
// @Override
// public Operation prepareReplicationOperation(PartitionReplicationEvent event) {
// Map<String, OrderedCounterMap> data = new HashMap();
// int partitionId = event.getPartitionId();
// for (String name : containers.keySet()) {
// if (partitionId == getPartitionId(name)) {
// OrderedCounterMap number = containers.get(name);
// data.put(name, number);
// }
// }
// return data.isEmpty() ? null : new TreeMapReplicationOperation(data);
// }
//
// private int getPartitionId(String name) {
// InternalPartitionService partitionService = nodeEngine.getPartitionService();
// String partitionKey = getPartitionKey(name);
// return partitionService.getPartitionId(partitionKey);
// }
//
// @Override
// public void commitMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.SOURCE) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void rollbackMigration(PartitionMigrationEvent partitionMigrationEvent) {
// if (partitionMigrationEvent.getMigrationEndpoint() == MigrationEndpoint.DESTINATION) {
// removePartition(partitionMigrationEvent.getPartitionId());
// }
// }
//
// @Override
// public void clearPartitionReplica(int partitionId) {
// removePartition(partitionId);
// }
//
// public void removePartition(int partitionId) {
// final Iterator<String> iterator = containers.keySet().iterator();
// while (iterator.hasNext()) {
// String name = iterator.next();
// if (getPartitionId(name) == partitionId) {
// iterator.remove();
// }
// }
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByBackupOperation.java
import com.hazelcast.spi.BackupOperation;
import org.rakam.cache.hazelcast.treemap.OrderedCounterMap;
import org.rakam.cache.hazelcast.treemap.TreeMapService;
package org.rakam.cache.hazelcast.treemap.operations;
/**
* Created by buremba <Burak Emre Kabakcı> on 15/07/14 03:23.
*/
public class IncrementByBackupOperation extends TreeMapBaseOperation implements BackupOperation {
private long by;
public IncrementByBackupOperation(String name, long by) {
super(name);
this.by = by;
}
public IncrementByBackupOperation() {
}
@Override
public void run() throws Exception { | OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapProxy.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
// public class GetOperation extends TreeMapBaseOperation {
// private Integer numberOfElements = null;
// private Map<String, Long> returnValue;
//
// public GetOperation(String name) {
// super(name);
// }
//
// public GetOperation() {
//
// }
//
// public GetOperation(String name, Integer numberOfElements) {
// super(name);
// this.numberOfElements = numberOfElements;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.GET;
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// if(numberOfElements==null)
// this.returnValue = service.getHLL(name).getAll();
// else
// this.returnValue = service.getHLL(name).getTopItems(numberOfElements);
// }
//
// @Override
// public Object getResponse() {
// return returnValue;
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByOperation.java
// public class IncrementByOperation extends TreeMapBackupAwareOperation {
// private String key;
// private long by;
//
// public IncrementByOperation(String name, String key, long by) {
// super(name);
// this.by = by;
// this.key = key;
// }
//
// public IncrementByOperation() {
//
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// super.writeInternal(out);
// out.writeLong(by);
// out.writeUTF(key);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// super.readInternal(in);
// by = in.readLong();
// key = in.readUTF();
// }
//
// @Override
// public void run() throws Exception {
//
// OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name);
// map.increment(key, by);
// }
//
// @Override
// public Operation getBackupOperation() {
// return new IncrementByBackupOperation();
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.ADD;
// }
// }
| import com.hazelcast.spi.*;
import com.hazelcast.util.ExceptionUtil;
import org.rakam.cache.hazelcast.treemap.operations.GetOperation;
import org.rakam.cache.hazelcast.treemap.operations.IncrementByOperation;
import java.util.Map; | try {
OperationService operationService = getNodeEngine().getOperationService();
return (InternalCompletableFuture<E>) operationService.invokeOnPartition(
TreeMapService.SERVICE_NAME, operation, partitionId);
} catch (Throwable throwable) {
throw ExceptionUtil.rethrow(throwable);
}
}
@Override
public String getServiceName() {
return TreeMapService.SERVICE_NAME;
}
@Override
public String toString() {
return "Map{" + "name='" + name + '\'' + '}';
}
@Override
public String getName() {
return name;
}
@Override
public void increment(String key, long by) {
asyncIncrementBy(key, by).getSafely();
}
public InternalCompletableFuture<Void> asyncIncrementBy(String key, long by) { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
// public class GetOperation extends TreeMapBaseOperation {
// private Integer numberOfElements = null;
// private Map<String, Long> returnValue;
//
// public GetOperation(String name) {
// super(name);
// }
//
// public GetOperation() {
//
// }
//
// public GetOperation(String name, Integer numberOfElements) {
// super(name);
// this.numberOfElements = numberOfElements;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.GET;
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// if(numberOfElements==null)
// this.returnValue = service.getHLL(name).getAll();
// else
// this.returnValue = service.getHLL(name).getTopItems(numberOfElements);
// }
//
// @Override
// public Object getResponse() {
// return returnValue;
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByOperation.java
// public class IncrementByOperation extends TreeMapBackupAwareOperation {
// private String key;
// private long by;
//
// public IncrementByOperation(String name, String key, long by) {
// super(name);
// this.by = by;
// this.key = key;
// }
//
// public IncrementByOperation() {
//
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// super.writeInternal(out);
// out.writeLong(by);
// out.writeUTF(key);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// super.readInternal(in);
// by = in.readLong();
// key = in.readUTF();
// }
//
// @Override
// public void run() throws Exception {
//
// OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name);
// map.increment(key, by);
// }
//
// @Override
// public Operation getBackupOperation() {
// return new IncrementByBackupOperation();
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.ADD;
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapProxy.java
import com.hazelcast.spi.*;
import com.hazelcast.util.ExceptionUtil;
import org.rakam.cache.hazelcast.treemap.operations.GetOperation;
import org.rakam.cache.hazelcast.treemap.operations.IncrementByOperation;
import java.util.Map;
try {
OperationService operationService = getNodeEngine().getOperationService();
return (InternalCompletableFuture<E>) operationService.invokeOnPartition(
TreeMapService.SERVICE_NAME, operation, partitionId);
} catch (Throwable throwable) {
throw ExceptionUtil.rethrow(throwable);
}
}
@Override
public String getServiceName() {
return TreeMapService.SERVICE_NAME;
}
@Override
public String toString() {
return "Map{" + "name='" + name + '\'' + '}';
}
@Override
public String getName() {
return name;
}
@Override
public void increment(String key, long by) {
asyncIncrementBy(key, by).getSafely();
}
public InternalCompletableFuture<Void> asyncIncrementBy(String key, long by) { | Operation operation = new IncrementByOperation(name, key, by); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapProxy.java | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
// public class GetOperation extends TreeMapBaseOperation {
// private Integer numberOfElements = null;
// private Map<String, Long> returnValue;
//
// public GetOperation(String name) {
// super(name);
// }
//
// public GetOperation() {
//
// }
//
// public GetOperation(String name, Integer numberOfElements) {
// super(name);
// this.numberOfElements = numberOfElements;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.GET;
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// if(numberOfElements==null)
// this.returnValue = service.getHLL(name).getAll();
// else
// this.returnValue = service.getHLL(name).getTopItems(numberOfElements);
// }
//
// @Override
// public Object getResponse() {
// return returnValue;
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByOperation.java
// public class IncrementByOperation extends TreeMapBackupAwareOperation {
// private String key;
// private long by;
//
// public IncrementByOperation(String name, String key, long by) {
// super(name);
// this.by = by;
// this.key = key;
// }
//
// public IncrementByOperation() {
//
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// super.writeInternal(out);
// out.writeLong(by);
// out.writeUTF(key);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// super.readInternal(in);
// by = in.readLong();
// key = in.readUTF();
// }
//
// @Override
// public void run() throws Exception {
//
// OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name);
// map.increment(key, by);
// }
//
// @Override
// public Operation getBackupOperation() {
// return new IncrementByBackupOperation();
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.ADD;
// }
// }
| import com.hazelcast.spi.*;
import com.hazelcast.util.ExceptionUtil;
import org.rakam.cache.hazelcast.treemap.operations.GetOperation;
import org.rakam.cache.hazelcast.treemap.operations.IncrementByOperation;
import java.util.Map; | }
}
@Override
public String getServiceName() {
return TreeMapService.SERVICE_NAME;
}
@Override
public String toString() {
return "Map{" + "name='" + name + '\'' + '}';
}
@Override
public String getName() {
return name;
}
@Override
public void increment(String key, long by) {
asyncIncrementBy(key, by).getSafely();
}
public InternalCompletableFuture<Void> asyncIncrementBy(String key, long by) {
Operation operation = new IncrementByOperation(name, key, by);
return asyncInvoke(operation);
}
@Override
public Map<String, Long> getAll() { | // Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/GetOperation.java
// public class GetOperation extends TreeMapBaseOperation {
// private Integer numberOfElements = null;
// private Map<String, Long> returnValue;
//
// public GetOperation(String name) {
// super(name);
// }
//
// public GetOperation() {
//
// }
//
// public GetOperation(String name, Integer numberOfElements) {
// super(name);
// this.numberOfElements = numberOfElements;
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.GET;
// }
//
// @Override
// public void run() throws Exception {
// TreeMapService service = getService();
// if(numberOfElements==null)
// this.returnValue = service.getHLL(name).getAll();
// else
// this.returnValue = service.getHLL(name).getTopItems(numberOfElements);
// }
//
// @Override
// public Object getResponse() {
// return returnValue;
// }
// }
//
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/operations/IncrementByOperation.java
// public class IncrementByOperation extends TreeMapBackupAwareOperation {
// private String key;
// private long by;
//
// public IncrementByOperation(String name, String key, long by) {
// super(name);
// this.by = by;
// this.key = key;
// }
//
// public IncrementByOperation() {
//
// }
//
// @Override
// protected void writeInternal(ObjectDataOutput out) throws IOException {
// super.writeInternal(out);
// out.writeLong(by);
// out.writeUTF(key);
// }
//
// @Override
// protected void readInternal(ObjectDataInput in) throws IOException {
// super.readInternal(in);
// by = in.readLong();
// key = in.readUTF();
// }
//
// @Override
// public void run() throws Exception {
//
// OrderedCounterMap map = ((TreeMapService) getService()).getHLL(name);
// map.increment(key, by);
// }
//
// @Override
// public Operation getBackupOperation() {
// return new IncrementByBackupOperation();
// }
//
// @Override
// public int getId() {
// return TreeMapSerializerFactory.ADD;
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/treemap/TreeMapProxy.java
import com.hazelcast.spi.*;
import com.hazelcast.util.ExceptionUtil;
import org.rakam.cache.hazelcast.treemap.operations.GetOperation;
import org.rakam.cache.hazelcast.treemap.operations.IncrementByOperation;
import java.util.Map;
}
}
@Override
public String getServiceName() {
return TreeMapService.SERVICE_NAME;
}
@Override
public String toString() {
return "Map{" + "name='" + name + '\'' + '}';
}
@Override
public String getName() {
return name;
}
@Override
public void increment(String key, long by) {
asyncIncrementBy(key, by).getSafely();
}
public InternalCompletableFuture<Void> asyncIncrementBy(String key, long by) {
Operation operation = new IncrementByOperation(name, key, by);
return asyncInvoke(operation);
}
@Override
public Map<String, Long> getAll() { | Operation operation = new GetOperation(name); |
buremba/hazelcast-modules | src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/AddAllOperation.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBackupAwareOperation.java
// public abstract class HyperLogLogBackupAwareOperation extends HyperLogLogBaseOperation implements BackupAwareOperation, BackupOperation {
//
// protected boolean shouldBackup = true;
//
// public HyperLogLogBackupAwareOperation() {
// }
//
// public HyperLogLogBackupAwareOperation(String name) {
// super(name);
// }
//
// @Override
// public boolean shouldBackup() {
// return shouldBackup;
// }
//
// @Override
// public int getSyncBackupCount() {
// return 1;
// }
//
// @Override
// public int getAsyncBackupCount() {
// return 0;
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
| import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogBackupAwareOperation;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection; | /**
* Created by buremba on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.operations;
public class AddAllOperation extends HyperLogLogBackupAwareOperation {
private Collection<String> items;
public AddAllOperation() {
}
public AddAllOperation(String name, Collection<String> item) {
super(name);
this.items = item;
}
@Override
public void run() throws Exception { | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/HyperLogLogBackupAwareOperation.java
// public abstract class HyperLogLogBackupAwareOperation extends HyperLogLogBaseOperation implements BackupAwareOperation, BackupOperation {
//
// protected boolean shouldBackup = true;
//
// public HyperLogLogBackupAwareOperation() {
// }
//
// public HyperLogLogBackupAwareOperation(String name) {
// super(name);
// }
//
// @Override
// public boolean shouldBackup() {
// return shouldBackup;
// }
//
// @Override
// public int getSyncBackupCount() {
// return 1;
// }
//
// @Override
// public int getAsyncBackupCount() {
// return 0;
// }
// }
//
// Path: src/main/java/org/rakam/util/HLLWrapper.java
// public class HLLWrapper {
// final private static int SEED = 123456;
// private HLL hll;
//
// public HLLWrapper() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
//
// public HLLWrapper(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public long cardinality() {
// return hll.cardinality();
// }
//
// public void union(HLLWrapper hll) {
// this.hll.union(hll.hll);
// }
//
// public void addAll(Collection<String> coll) {
// for (String a : coll) {
// byte[] s = a.getBytes();
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
// }
//
// public void add(String obj) {
// if (obj == null)
// throw new IllegalArgumentException();
// byte[] s = obj.getBytes();
//
// hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED));
// }
//
// public void set(byte[] bytes) {
// hll = HLL.fromBytes(bytes);
// }
//
// public byte[] bytes() {
// return hll.toBytes();
// }
//
// public void reset() {
// hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
// }
// }
// Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/operations/AddAllOperation.java
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.spi.Operation;
import org.rakam.cache.hazelcast.hyperloglog.HyperLogLogBackupAwareOperation;
import org.rakam.util.HLLWrapper;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
/**
* Created by buremba on 10/07/14.
*/
package org.rakam.cache.hazelcast.hyperloglog.operations;
public class AddAllOperation extends HyperLogLogBackupAwareOperation {
private Collection<String> items;
public AddAllOperation() {
}
public AddAllOperation(String name, Collection<String> item) {
super(name);
this.items = item;
}
@Override
public void run() throws Exception { | HLLWrapper hll = getHLL(); |
buremba/hazelcast-modules | src/main/java/org/rakam/util/HLLWrapper.java | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/MurmurHash3.java
// public final class MurmurHash3 {
//
// private MurmurHash3() {
// }
//
// /**
// * Returns the MurmurHash3_x86_32 hash.
// */
// public static int murmurhash3x8632(byte[] data, int offset, int len, int seed) {
//
// int c1 = 0xcc9e2d51;
// int c2 = 0x1b873593;
//
// int h1 = seed;
// int roundedEnd = offset + (len & 0xfffffffc); // round down to 4 byte block
//
// for (int i = offset; i < roundedEnd; i += 4) {
// // little endian load order
// int k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | ((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24);
// k1 *= c1;
// k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
// k1 *= c2;
//
// h1 ^= k1;
// h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13);
// h1 = h1 * 5 + 0xe6546b64;
// }
//
// // tail
// int k1 = 0;
//
// switch (len & 0x03) {
// case 3:
// k1 = (data[roundedEnd + 2] & 0xff) << 16;
// // fallthrough
// case 2:
// k1 |= (data[roundedEnd + 1] & 0xff) << 8;
// // fallthrough
// case 1:
// k1 |= data[roundedEnd] & 0xff;
// k1 *= c1;
// k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
// k1 *= c2;
// h1 ^= k1;
// default:
// }
//
// // finalization
// h1 ^= len;
//
// // fmix(h1);
// h1 ^= h1 >>> 16;
// h1 *= 0x85ebca6b;
// h1 ^= h1 >>> 13;
// h1 *= 0xc2b2ae35;
// h1 ^= h1 >>> 16;
//
// return h1;
// }
//
// }
| import java.util.Collection;
import net.agkn.hll.HLL;
import org.rakam.cache.hazelcast.hyperloglog.MurmurHash3; | /**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.util;
public class HLLWrapper {
final private static int SEED = 123456;
private HLL hll;
public HLLWrapper() {
hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
}
public HLLWrapper(byte[] bytes) {
hll = HLL.fromBytes(bytes);
}
public long cardinality() {
return hll.cardinality();
}
public void union(HLLWrapper hll) {
this.hll.union(hll.hll);
}
public void addAll(Collection<String> coll) {
for (String a : coll) {
byte[] s = a.getBytes(); | // Path: src/main/java/org/rakam/cache/hazelcast/hyperloglog/MurmurHash3.java
// public final class MurmurHash3 {
//
// private MurmurHash3() {
// }
//
// /**
// * Returns the MurmurHash3_x86_32 hash.
// */
// public static int murmurhash3x8632(byte[] data, int offset, int len, int seed) {
//
// int c1 = 0xcc9e2d51;
// int c2 = 0x1b873593;
//
// int h1 = seed;
// int roundedEnd = offset + (len & 0xfffffffc); // round down to 4 byte block
//
// for (int i = offset; i < roundedEnd; i += 4) {
// // little endian load order
// int k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | ((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24);
// k1 *= c1;
// k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
// k1 *= c2;
//
// h1 ^= k1;
// h1 = (h1 << 13) | (h1 >>> 19); // ROTL32(h1,13);
// h1 = h1 * 5 + 0xe6546b64;
// }
//
// // tail
// int k1 = 0;
//
// switch (len & 0x03) {
// case 3:
// k1 = (data[roundedEnd + 2] & 0xff) << 16;
// // fallthrough
// case 2:
// k1 |= (data[roundedEnd + 1] & 0xff) << 8;
// // fallthrough
// case 1:
// k1 |= data[roundedEnd] & 0xff;
// k1 *= c1;
// k1 = (k1 << 15) | (k1 >>> 17); // ROTL32(k1,15);
// k1 *= c2;
// h1 ^= k1;
// default:
// }
//
// // finalization
// h1 ^= len;
//
// // fmix(h1);
// h1 ^= h1 >>> 16;
// h1 *= 0x85ebca6b;
// h1 ^= h1 >>> 13;
// h1 *= 0xc2b2ae35;
// h1 ^= h1 >>> 16;
//
// return h1;
// }
//
// }
// Path: src/main/java/org/rakam/util/HLLWrapper.java
import java.util.Collection;
import net.agkn.hll.HLL;
import org.rakam.cache.hazelcast.hyperloglog.MurmurHash3;
/**
* Created by buremba <Burak Emre Kabakcı> on 10/07/14.
*/
package org.rakam.util;
public class HLLWrapper {
final private static int SEED = 123456;
private HLL hll;
public HLLWrapper() {
hll = new HLL(13/*log2m*/, 5/*registerWidth*/);
}
public HLLWrapper(byte[] bytes) {
hll = HLL.fromBytes(bytes);
}
public long cardinality() {
return hll.cardinality();
}
public void union(HLLWrapper hll) {
this.hll.union(hll.hll);
}
public void addAll(Collection<String> coll) {
for (String a : coll) {
byte[] s = a.getBytes(); | hll.addRaw(MurmurHash3.murmurhash3x8632(s, 0, s.length, SEED)); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/activity/BaseActivity.java | // Path: common/src/main/java/io/gitcafe/zhanjiashu/common/util/DisplayUtils.java
// public class DisplayUtils {
//
// public static int getStatusBarHeight(Context context) {
// int result = 0;
// int resourceId = context.getResources().getIdentifier("status_bar_height", "dimen", "android");
// if (resourceId > 0) {
// result = context.getResources().getDimensionPixelSize(resourceId);
// }
// return result;
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java
// public class VolleyUtils {
//
// private static VolleyUtils mInstance;
// private RequestQueue mRequestQueue;
// private ImageLoader mImageLoader;
// private VolleyUtils(Context context) {
// mRequestQueue = getRequestQueue();
// mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context));
// }
//
//
// public static VolleyUtils getInstance(Context context) {
// if (mInstance == null) {
// synchronized (VolleyUtils.class) {
// if (mInstance == null) {
// mInstance = new VolleyUtils(context.getApplicationContext());
// }
// }
// }
// return mInstance;
// }
//
// private RequestQueue getRequestQueue() {
// if (mRequestQueue == null) {
// mRequestQueue = Volley.newRequestQueue(App.getContext());
// }
// return mRequestQueue;
// }
//
// public <T> void addRequest(Request<T> request) {
// mRequestQueue.add(request);
// }
// }
| import android.os.Build;
import android.os.Bundle;
import android.support.v4.app.FragmentManager;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.widget.LinearLayout;
import android.widget.TextView;
import de.greenrobot.event.EventBus;
import io.gitcafe.zhanjiashu.newzhihudialy.R;
import io.gitcafe.zhanjiashu.common.util.DisplayUtils;
import io.gitcafe.zhanjiashu.newzhihudialy.util.VolleyUtils; | package io.gitcafe.zhanjiashu.newzhihudialy.activity;
/**
* Created by Jiashu on 2015/5/31.
*/
public class BaseActivity extends AppCompatActivity {
EventBus mEventBus; | // Path: common/src/main/java/io/gitcafe/zhanjiashu/common/util/DisplayUtils.java
// public class DisplayUtils {
//
// public static int getStatusBarHeight(Context context) {
// int result = 0;
// int resourceId = context.getResources().getIdentifier("status_bar_height", "dimen", "android");
// if (resourceId > 0) {
// result = context.getResources().getDimensionPixelSize(resourceId);
// }
// return result;
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java
// public class VolleyUtils {
//
// private static VolleyUtils mInstance;
// private RequestQueue mRequestQueue;
// private ImageLoader mImageLoader;
// private VolleyUtils(Context context) {
// mRequestQueue = getRequestQueue();
// mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context));
// }
//
//
// public static VolleyUtils getInstance(Context context) {
// if (mInstance == null) {
// synchronized (VolleyUtils.class) {
// if (mInstance == null) {
// mInstance = new VolleyUtils(context.getApplicationContext());
// }
// }
// }
// return mInstance;
// }
//
// private RequestQueue getRequestQueue() {
// if (mRequestQueue == null) {
// mRequestQueue = Volley.newRequestQueue(App.getContext());
// }
// return mRequestQueue;
// }
//
// public <T> void addRequest(Request<T> request) {
// mRequestQueue.add(request);
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/activity/BaseActivity.java
import android.os.Build;
import android.os.Bundle;
import android.support.v4.app.FragmentManager;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.widget.LinearLayout;
import android.widget.TextView;
import de.greenrobot.event.EventBus;
import io.gitcafe.zhanjiashu.newzhihudialy.R;
import io.gitcafe.zhanjiashu.common.util.DisplayUtils;
import io.gitcafe.zhanjiashu.newzhihudialy.util.VolleyUtils;
package io.gitcafe.zhanjiashu.newzhihudialy.activity;
/**
* Created by Jiashu on 2015/5/31.
*/
public class BaseActivity extends AppCompatActivity {
EventBus mEventBus; | VolleyUtils mVolleyUtils; |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/activity/BaseActivity.java | // Path: common/src/main/java/io/gitcafe/zhanjiashu/common/util/DisplayUtils.java
// public class DisplayUtils {
//
// public static int getStatusBarHeight(Context context) {
// int result = 0;
// int resourceId = context.getResources().getIdentifier("status_bar_height", "dimen", "android");
// if (resourceId > 0) {
// result = context.getResources().getDimensionPixelSize(resourceId);
// }
// return result;
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java
// public class VolleyUtils {
//
// private static VolleyUtils mInstance;
// private RequestQueue mRequestQueue;
// private ImageLoader mImageLoader;
// private VolleyUtils(Context context) {
// mRequestQueue = getRequestQueue();
// mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context));
// }
//
//
// public static VolleyUtils getInstance(Context context) {
// if (mInstance == null) {
// synchronized (VolleyUtils.class) {
// if (mInstance == null) {
// mInstance = new VolleyUtils(context.getApplicationContext());
// }
// }
// }
// return mInstance;
// }
//
// private RequestQueue getRequestQueue() {
// if (mRequestQueue == null) {
// mRequestQueue = Volley.newRequestQueue(App.getContext());
// }
// return mRequestQueue;
// }
//
// public <T> void addRequest(Request<T> request) {
// mRequestQueue.add(request);
// }
// }
| import android.os.Build;
import android.os.Bundle;
import android.support.v4.app.FragmentManager;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.widget.LinearLayout;
import android.widget.TextView;
import de.greenrobot.event.EventBus;
import io.gitcafe.zhanjiashu.newzhihudialy.R;
import io.gitcafe.zhanjiashu.common.util.DisplayUtils;
import io.gitcafe.zhanjiashu.newzhihudialy.util.VolleyUtils; | package io.gitcafe.zhanjiashu.newzhihudialy.activity;
/**
* Created by Jiashu on 2015/5/31.
*/
public class BaseActivity extends AppCompatActivity {
EventBus mEventBus;
VolleyUtils mVolleyUtils;
FragmentManager mFragmentManager;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mEventBus = EventBus.getDefault();
mVolleyUtils = VolleyUtils.getInstance(this);
mFragmentManager = getSupportFragmentManager();
// Android4.4 以上实现沉浸式状态栏
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
getWindow().addFlags(WindowManager.LayoutParams.FLAG_TRANSLUCENT_STATUS);
View view = new TextView(this);
LinearLayout.LayoutParams params = new LinearLayout.LayoutParams(
LinearLayout.LayoutParams.MATCH_PARENT, | // Path: common/src/main/java/io/gitcafe/zhanjiashu/common/util/DisplayUtils.java
// public class DisplayUtils {
//
// public static int getStatusBarHeight(Context context) {
// int result = 0;
// int resourceId = context.getResources().getIdentifier("status_bar_height", "dimen", "android");
// if (resourceId > 0) {
// result = context.getResources().getDimensionPixelSize(resourceId);
// }
// return result;
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java
// public class VolleyUtils {
//
// private static VolleyUtils mInstance;
// private RequestQueue mRequestQueue;
// private ImageLoader mImageLoader;
// private VolleyUtils(Context context) {
// mRequestQueue = getRequestQueue();
// mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context));
// }
//
//
// public static VolleyUtils getInstance(Context context) {
// if (mInstance == null) {
// synchronized (VolleyUtils.class) {
// if (mInstance == null) {
// mInstance = new VolleyUtils(context.getApplicationContext());
// }
// }
// }
// return mInstance;
// }
//
// private RequestQueue getRequestQueue() {
// if (mRequestQueue == null) {
// mRequestQueue = Volley.newRequestQueue(App.getContext());
// }
// return mRequestQueue;
// }
//
// public <T> void addRequest(Request<T> request) {
// mRequestQueue.add(request);
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/activity/BaseActivity.java
import android.os.Build;
import android.os.Bundle;
import android.support.v4.app.FragmentManager;
import android.support.v7.app.AppCompatActivity;
import android.view.View;
import android.view.ViewGroup;
import android.view.WindowManager;
import android.widget.LinearLayout;
import android.widget.TextView;
import de.greenrobot.event.EventBus;
import io.gitcafe.zhanjiashu.newzhihudialy.R;
import io.gitcafe.zhanjiashu.common.util.DisplayUtils;
import io.gitcafe.zhanjiashu.newzhihudialy.util.VolleyUtils;
package io.gitcafe.zhanjiashu.newzhihudialy.activity;
/**
* Created by Jiashu on 2015/5/31.
*/
public class BaseActivity extends AppCompatActivity {
EventBus mEventBus;
VolleyUtils mVolleyUtils;
FragmentManager mFragmentManager;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
mEventBus = EventBus.getDefault();
mVolleyUtils = VolleyUtils.getInstance(this);
mFragmentManager = getSupportFragmentManager();
// Android4.4 以上实现沉浸式状态栏
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
getWindow().addFlags(WindowManager.LayoutParams.FLAG_TRANSLUCENT_STATUS);
View view = new TextView(this);
LinearLayout.LayoutParams params = new LinearLayout.LayoutParams(
LinearLayout.LayoutParams.MATCH_PARENT, | DisplayUtils.getStatusBarHeight(BaseActivity.this)); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/ZHStorageUtils.java | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
// public class App extends Application {
//
// private static final String TAG = "AppApplication";
// private static Context mContext;
//
// @Override
// public void onCreate() {
// super.onCreate();
// mContext = this;
// initImageLoader();
// LogUtil.d(TAG, "App reset");
// }
//
// public static Context getContext() {
// return mContext;
// }
//
// private void initImageLoader() {
//
// File cacheDir = StorageUtils.getIndividualCacheDirectory(this);
//
// ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(this)
// .diskCache(new UnlimitedDiskCache(cacheDir))
// .build();
// ImageLoader.getInstance().init(config);
// }
//
// public static int getAppVersion(Context context) {
// try {
// PackageInfo info = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
// return info.versionCode;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// }
// return 1;
// }
//
// public static String getVersionName() {
// PackageManager packageManager = getContext().getPackageManager();
//
// PackageInfo packInfo = null;
// try {
// packInfo = packageManager.getPackageInfo(getContext().getPackageName(), 0);
// String version = packInfo.versionName;
// return version;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// return "0";
// }
// }
// }
| import android.content.Context;
import android.os.Environment;
import android.text.TextUtils;
import com.jakewharton.disklrucache.DiskLruCache;
import com.nostra13.universalimageloader.utils.L;
import java.io.File;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import io.gitcafe.zhanjiashu.newzhihudialy.app.App; | package io.gitcafe.zhanjiashu.newzhihudialy.util;
/**
* Created by Jiashu on 2015/6/3.
*/
public class ZHStorageUtils {
private static DiskLruCache mFilesDiskCache;
public static File getDiskCacheDir(Context context, String dir) {
if (Environment.MEDIA_MOUNTED.equals(Environment.getExternalStorageState())
|| !Environment.isExternalStorageRemovable()) {
return new File(getExternalCacheDir(context), dir);
} else {
return new File(context.getCacheDir(), dir);
}
}
public static DiskLruCache getFilesDiskCache(Context context) {
if (mFilesDiskCache == null || mFilesDiskCache.isClosed()) {
try {
File cacheDir = getDiskCacheDir(context, "files");
if (!cacheDir.exists()) {
cacheDir.mkdirs();
} | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
// public class App extends Application {
//
// private static final String TAG = "AppApplication";
// private static Context mContext;
//
// @Override
// public void onCreate() {
// super.onCreate();
// mContext = this;
// initImageLoader();
// LogUtil.d(TAG, "App reset");
// }
//
// public static Context getContext() {
// return mContext;
// }
//
// private void initImageLoader() {
//
// File cacheDir = StorageUtils.getIndividualCacheDirectory(this);
//
// ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(this)
// .diskCache(new UnlimitedDiskCache(cacheDir))
// .build();
// ImageLoader.getInstance().init(config);
// }
//
// public static int getAppVersion(Context context) {
// try {
// PackageInfo info = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
// return info.versionCode;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// }
// return 1;
// }
//
// public static String getVersionName() {
// PackageManager packageManager = getContext().getPackageManager();
//
// PackageInfo packInfo = null;
// try {
// packInfo = packageManager.getPackageInfo(getContext().getPackageName(), 0);
// String version = packInfo.versionName;
// return version;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// return "0";
// }
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/ZHStorageUtils.java
import android.content.Context;
import android.os.Environment;
import android.text.TextUtils;
import com.jakewharton.disklrucache.DiskLruCache;
import com.nostra13.universalimageloader.utils.L;
import java.io.File;
import java.io.IOException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import io.gitcafe.zhanjiashu.newzhihudialy.app.App;
package io.gitcafe.zhanjiashu.newzhihudialy.util;
/**
* Created by Jiashu on 2015/6/3.
*/
public class ZHStorageUtils {
private static DiskLruCache mFilesDiskCache;
public static File getDiskCacheDir(Context context, String dir) {
if (Environment.MEDIA_MOUNTED.equals(Environment.getExternalStorageState())
|| !Environment.isExternalStorageRemovable()) {
return new File(getExternalCacheDir(context), dir);
} else {
return new File(context.getCacheDir(), dir);
}
}
public static DiskLruCache getFilesDiskCache(Context context) {
if (mFilesDiskCache == null || mFilesDiskCache.isClosed()) {
try {
File cacheDir = getDiskCacheDir(context, "files");
if (!cacheDir.exists()) {
cacheDir.mkdirs();
} | mFilesDiskCache = DiskLruCache.open(cacheDir, App.getAppVersion(context), 1, 30 * 1024 * 1024); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
// public class App extends Application {
//
// private static final String TAG = "AppApplication";
// private static Context mContext;
//
// @Override
// public void onCreate() {
// super.onCreate();
// mContext = this;
// initImageLoader();
// LogUtil.d(TAG, "App reset");
// }
//
// public static Context getContext() {
// return mContext;
// }
//
// private void initImageLoader() {
//
// File cacheDir = StorageUtils.getIndividualCacheDirectory(this);
//
// ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(this)
// .diskCache(new UnlimitedDiskCache(cacheDir))
// .build();
// ImageLoader.getInstance().init(config);
// }
//
// public static int getAppVersion(Context context) {
// try {
// PackageInfo info = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
// return info.versionCode;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// }
// return 1;
// }
//
// public static String getVersionName() {
// PackageManager packageManager = getContext().getPackageManager();
//
// PackageInfo packInfo = null;
// try {
// packInfo = packageManager.getPackageInfo(getContext().getPackageName(), 0);
// String version = packInfo.versionName;
// return version;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// return "0";
// }
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/cache/VolleyLruImageCache.java
// public class VolleyLruImageCache extends LruCache<String, Bitmap> implements ImageLoader.ImageCache {
//
// /**
// * @param maxSize for caches that do not override {@link #sizeOf}, this is
// * the maximum number of entries in the cache. For all other caches,
// * this is the maximum sum of the sizes of the entries in this cache.
// */
// public VolleyLruImageCache(int maxSize) {
// super(maxSize);
// }
//
// public VolleyLruImageCache(Context context) {
// this(getDefaultCacheSize(context));
// }
//
// private static int getDefaultCacheSize(Context context) {
// int maxMemory = (int) (Runtime.getRuntime().maxMemory() / 1024);
// return maxMemory / 8;
// }
//
// @Override
// protected int sizeOf(String key, Bitmap value) {
// return value.getRowBytes() * value.getHeight();
// }
//
// @Override
// public Bitmap getBitmap(String s) {
// return this.get(s);
// }
//
// @Override
// public void putBitmap(String s, Bitmap bitmap) {
// this.put(s, bitmap);
// }
// }
| import android.content.Context;
import android.graphics.Bitmap;
import android.util.LruCache;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.toolbox.ImageLoader;
import com.android.volley.toolbox.Volley;
import io.gitcafe.zhanjiashu.newzhihudialy.app.App;
import io.gitcafe.zhanjiashu.newzhihudialy.cache.VolleyLruImageCache; | package io.gitcafe.zhanjiashu.newzhihudialy.util;
/**
* Created by Jiashu on 2015/5/31.
*/
public class VolleyUtils {
private static VolleyUtils mInstance;
private RequestQueue mRequestQueue;
private ImageLoader mImageLoader;
private VolleyUtils(Context context) {
mRequestQueue = getRequestQueue(); | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
// public class App extends Application {
//
// private static final String TAG = "AppApplication";
// private static Context mContext;
//
// @Override
// public void onCreate() {
// super.onCreate();
// mContext = this;
// initImageLoader();
// LogUtil.d(TAG, "App reset");
// }
//
// public static Context getContext() {
// return mContext;
// }
//
// private void initImageLoader() {
//
// File cacheDir = StorageUtils.getIndividualCacheDirectory(this);
//
// ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(this)
// .diskCache(new UnlimitedDiskCache(cacheDir))
// .build();
// ImageLoader.getInstance().init(config);
// }
//
// public static int getAppVersion(Context context) {
// try {
// PackageInfo info = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
// return info.versionCode;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// }
// return 1;
// }
//
// public static String getVersionName() {
// PackageManager packageManager = getContext().getPackageManager();
//
// PackageInfo packInfo = null;
// try {
// packInfo = packageManager.getPackageInfo(getContext().getPackageName(), 0);
// String version = packInfo.versionName;
// return version;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// return "0";
// }
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/cache/VolleyLruImageCache.java
// public class VolleyLruImageCache extends LruCache<String, Bitmap> implements ImageLoader.ImageCache {
//
// /**
// * @param maxSize for caches that do not override {@link #sizeOf}, this is
// * the maximum number of entries in the cache. For all other caches,
// * this is the maximum sum of the sizes of the entries in this cache.
// */
// public VolleyLruImageCache(int maxSize) {
// super(maxSize);
// }
//
// public VolleyLruImageCache(Context context) {
// this(getDefaultCacheSize(context));
// }
//
// private static int getDefaultCacheSize(Context context) {
// int maxMemory = (int) (Runtime.getRuntime().maxMemory() / 1024);
// return maxMemory / 8;
// }
//
// @Override
// protected int sizeOf(String key, Bitmap value) {
// return value.getRowBytes() * value.getHeight();
// }
//
// @Override
// public Bitmap getBitmap(String s) {
// return this.get(s);
// }
//
// @Override
// public void putBitmap(String s, Bitmap bitmap) {
// this.put(s, bitmap);
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java
import android.content.Context;
import android.graphics.Bitmap;
import android.util.LruCache;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.toolbox.ImageLoader;
import com.android.volley.toolbox.Volley;
import io.gitcafe.zhanjiashu.newzhihudialy.app.App;
import io.gitcafe.zhanjiashu.newzhihudialy.cache.VolleyLruImageCache;
package io.gitcafe.zhanjiashu.newzhihudialy.util;
/**
* Created by Jiashu on 2015/5/31.
*/
public class VolleyUtils {
private static VolleyUtils mInstance;
private RequestQueue mRequestQueue;
private ImageLoader mImageLoader;
private VolleyUtils(Context context) {
mRequestQueue = getRequestQueue(); | mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context)); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
// public class App extends Application {
//
// private static final String TAG = "AppApplication";
// private static Context mContext;
//
// @Override
// public void onCreate() {
// super.onCreate();
// mContext = this;
// initImageLoader();
// LogUtil.d(TAG, "App reset");
// }
//
// public static Context getContext() {
// return mContext;
// }
//
// private void initImageLoader() {
//
// File cacheDir = StorageUtils.getIndividualCacheDirectory(this);
//
// ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(this)
// .diskCache(new UnlimitedDiskCache(cacheDir))
// .build();
// ImageLoader.getInstance().init(config);
// }
//
// public static int getAppVersion(Context context) {
// try {
// PackageInfo info = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
// return info.versionCode;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// }
// return 1;
// }
//
// public static String getVersionName() {
// PackageManager packageManager = getContext().getPackageManager();
//
// PackageInfo packInfo = null;
// try {
// packInfo = packageManager.getPackageInfo(getContext().getPackageName(), 0);
// String version = packInfo.versionName;
// return version;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// return "0";
// }
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/cache/VolleyLruImageCache.java
// public class VolleyLruImageCache extends LruCache<String, Bitmap> implements ImageLoader.ImageCache {
//
// /**
// * @param maxSize for caches that do not override {@link #sizeOf}, this is
// * the maximum number of entries in the cache. For all other caches,
// * this is the maximum sum of the sizes of the entries in this cache.
// */
// public VolleyLruImageCache(int maxSize) {
// super(maxSize);
// }
//
// public VolleyLruImageCache(Context context) {
// this(getDefaultCacheSize(context));
// }
//
// private static int getDefaultCacheSize(Context context) {
// int maxMemory = (int) (Runtime.getRuntime().maxMemory() / 1024);
// return maxMemory / 8;
// }
//
// @Override
// protected int sizeOf(String key, Bitmap value) {
// return value.getRowBytes() * value.getHeight();
// }
//
// @Override
// public Bitmap getBitmap(String s) {
// return this.get(s);
// }
//
// @Override
// public void putBitmap(String s, Bitmap bitmap) {
// this.put(s, bitmap);
// }
// }
| import android.content.Context;
import android.graphics.Bitmap;
import android.util.LruCache;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.toolbox.ImageLoader;
import com.android.volley.toolbox.Volley;
import io.gitcafe.zhanjiashu.newzhihudialy.app.App;
import io.gitcafe.zhanjiashu.newzhihudialy.cache.VolleyLruImageCache; | package io.gitcafe.zhanjiashu.newzhihudialy.util;
/**
* Created by Jiashu on 2015/5/31.
*/
public class VolleyUtils {
private static VolleyUtils mInstance;
private RequestQueue mRequestQueue;
private ImageLoader mImageLoader;
private VolleyUtils(Context context) {
mRequestQueue = getRequestQueue();
mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context));
}
public static VolleyUtils getInstance(Context context) {
if (mInstance == null) {
synchronized (VolleyUtils.class) {
if (mInstance == null) {
mInstance = new VolleyUtils(context.getApplicationContext());
}
}
}
return mInstance;
}
private RequestQueue getRequestQueue() {
if (mRequestQueue == null) { | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
// public class App extends Application {
//
// private static final String TAG = "AppApplication";
// private static Context mContext;
//
// @Override
// public void onCreate() {
// super.onCreate();
// mContext = this;
// initImageLoader();
// LogUtil.d(TAG, "App reset");
// }
//
// public static Context getContext() {
// return mContext;
// }
//
// private void initImageLoader() {
//
// File cacheDir = StorageUtils.getIndividualCacheDirectory(this);
//
// ImageLoaderConfiguration config = new ImageLoaderConfiguration.Builder(this)
// .diskCache(new UnlimitedDiskCache(cacheDir))
// .build();
// ImageLoader.getInstance().init(config);
// }
//
// public static int getAppVersion(Context context) {
// try {
// PackageInfo info = context.getPackageManager().getPackageInfo(context.getPackageName(), 0);
// return info.versionCode;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// }
// return 1;
// }
//
// public static String getVersionName() {
// PackageManager packageManager = getContext().getPackageManager();
//
// PackageInfo packInfo = null;
// try {
// packInfo = packageManager.getPackageInfo(getContext().getPackageName(), 0);
// String version = packInfo.versionName;
// return version;
// } catch (PackageManager.NameNotFoundException e) {
// e.printStackTrace();
// return "0";
// }
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/cache/VolleyLruImageCache.java
// public class VolleyLruImageCache extends LruCache<String, Bitmap> implements ImageLoader.ImageCache {
//
// /**
// * @param maxSize for caches that do not override {@link #sizeOf}, this is
// * the maximum number of entries in the cache. For all other caches,
// * this is the maximum sum of the sizes of the entries in this cache.
// */
// public VolleyLruImageCache(int maxSize) {
// super(maxSize);
// }
//
// public VolleyLruImageCache(Context context) {
// this(getDefaultCacheSize(context));
// }
//
// private static int getDefaultCacheSize(Context context) {
// int maxMemory = (int) (Runtime.getRuntime().maxMemory() / 1024);
// return maxMemory / 8;
// }
//
// @Override
// protected int sizeOf(String key, Bitmap value) {
// return value.getRowBytes() * value.getHeight();
// }
//
// @Override
// public Bitmap getBitmap(String s) {
// return this.get(s);
// }
//
// @Override
// public void putBitmap(String s, Bitmap bitmap) {
// this.put(s, bitmap);
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/util/VolleyUtils.java
import android.content.Context;
import android.graphics.Bitmap;
import android.util.LruCache;
import com.android.volley.Request;
import com.android.volley.RequestQueue;
import com.android.volley.toolbox.ImageLoader;
import com.android.volley.toolbox.Volley;
import io.gitcafe.zhanjiashu.newzhihudialy.app.App;
import io.gitcafe.zhanjiashu.newzhihudialy.cache.VolleyLruImageCache;
package io.gitcafe.zhanjiashu.newzhihudialy.util;
/**
* Created by Jiashu on 2015/5/31.
*/
public class VolleyUtils {
private static VolleyUtils mInstance;
private RequestQueue mRequestQueue;
private ImageLoader mImageLoader;
private VolleyUtils(Context context) {
mRequestQueue = getRequestQueue();
mImageLoader = new ImageLoader(mRequestQueue, new VolleyLruImageCache(context));
}
public static VolleyUtils getInstance(Context context) {
if (mInstance == null) {
synchronized (VolleyUtils.class) {
if (mInstance == null) {
mInstance = new VolleyUtils(context.getApplicationContext());
}
}
}
return mInstance;
}
private RequestQueue getRequestQueue() {
if (mRequestQueue == null) { | mRequestQueue = Volley.newRequestQueue(App.getContext()); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java | // Path: common/src/main/java/io/gitcafe/zhanjiashu/common/util/LogUtil.java
// public class LogUtil {
// public static final int VERBOSE = 1;
// public static final int DEBUG = 2;
// public static final int INFO = 3;
// public static final int WARN = 4;
// public static final int ERROR = 5;
// public static final int NOTHING = 6;
// public static final int LEVEL = VERBOSE;
//
// public static void v(String tag, String msg) {
// if (LEVEL <= VERBOSE) {
// Log.v(tag, msg);
// }
// }
//
// public static void d(String tag, String msg) {
// if (LEVEL <= DEBUG) {
// Log.d(tag, msg);
// }
// }
//
// public static void i(String tag, String msg) {
// if (LEVEL <= INFO) {
// Log.i(tag, msg);
// }
// }
//
// public static void w(String tag, String msg) {
// if (LEVEL < WARN) {
// Log.w(tag, msg);
// }
// }
//
// public static void e(String tag, String msg) {
// if (LEVEL < ERROR) {
// Log.e(tag, msg);
// }
// }
// }
| import android.app.Application;
import android.content.Context;
import android.content.pm.PackageInfo;
import android.content.pm.PackageManager;
import com.nostra13.universalimageloader.cache.disc.impl.UnlimitedDiskCache;
import com.nostra13.universalimageloader.core.ImageLoader;
import com.nostra13.universalimageloader.core.ImageLoaderConfiguration;
import com.nostra13.universalimageloader.utils.StorageUtils;
import java.io.File;
import io.gitcafe.zhanjiashu.common.util.LogUtil; | package io.gitcafe.zhanjiashu.newzhihudialy.app;
/**
* Created by Jiashu on 2015/5/31.
*/
public class App extends Application {
private static final String TAG = "AppApplication";
private static Context mContext;
@Override
public void onCreate() {
super.onCreate();
mContext = this;
initImageLoader(); | // Path: common/src/main/java/io/gitcafe/zhanjiashu/common/util/LogUtil.java
// public class LogUtil {
// public static final int VERBOSE = 1;
// public static final int DEBUG = 2;
// public static final int INFO = 3;
// public static final int WARN = 4;
// public static final int ERROR = 5;
// public static final int NOTHING = 6;
// public static final int LEVEL = VERBOSE;
//
// public static void v(String tag, String msg) {
// if (LEVEL <= VERBOSE) {
// Log.v(tag, msg);
// }
// }
//
// public static void d(String tag, String msg) {
// if (LEVEL <= DEBUG) {
// Log.d(tag, msg);
// }
// }
//
// public static void i(String tag, String msg) {
// if (LEVEL <= INFO) {
// Log.i(tag, msg);
// }
// }
//
// public static void w(String tag, String msg) {
// if (LEVEL < WARN) {
// Log.w(tag, msg);
// }
// }
//
// public static void e(String tag, String msg) {
// if (LEVEL < ERROR) {
// Log.e(tag, msg);
// }
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/app/App.java
import android.app.Application;
import android.content.Context;
import android.content.pm.PackageInfo;
import android.content.pm.PackageManager;
import com.nostra13.universalimageloader.cache.disc.impl.UnlimitedDiskCache;
import com.nostra13.universalimageloader.core.ImageLoader;
import com.nostra13.universalimageloader.core.ImageLoaderConfiguration;
import com.nostra13.universalimageloader.utils.StorageUtils;
import java.io.File;
import io.gitcafe.zhanjiashu.common.util.LogUtil;
package io.gitcafe.zhanjiashu.newzhihudialy.app;
/**
* Created by Jiashu on 2015/5/31.
*/
public class App extends Application {
private static final String TAG = "AppApplication";
private static Context mContext;
@Override
public void onCreate() {
super.onCreate();
mContext = this;
initImageLoader(); | LogUtil.d(TAG, "App reset"); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/task/FetchLatestDialyTask.java | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/model/DialyEntity.java
// public class DialyEntity {
//
// private String date;
// private List<StoryEntity> stories;
//
// public void setDate(String date) {
// this.date = date;
// }
//
// public void setStories(List<StoryEntity> stories) {
// this.stories = stories;
// }
//
// public String getDate() {
// return date;
// }
//
// public List<StoryEntity> getStories() {
// return stories;
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/model/LatestDialyEntity.java
// public class LatestDialyEntity extends DialyEntity{
//
// private List<TopStoryEntity> top_stories;
//
// @Override
// public void setDate(String date) {
// super.setDate(date);
// }
//
// public void setTopStories(List<TopStoryEntity> top_stories) {
// this.top_stories = top_stories;
// }
//
// @Override
// public void setStories(List<StoryEntity> stories) {
// super.setStories(stories);
// }
//
// public String getDate() {
// return super.getDate();
// }
//
// public List<TopStoryEntity> getTopStories() {
// return top_stories;
// }
//
// public List<StoryEntity> getStories() {
// return super.getStories();
// }
// }
| import android.content.Context;
import com.google.gson.Gson;
import io.gitcafe.zhanjiashu.newzhihudialy.model.DialyEntity;
import io.gitcafe.zhanjiashu.newzhihudialy.model.LatestDialyEntity; | package io.gitcafe.zhanjiashu.newzhihudialy.task;
/**
* Created by Jiashu on 2015/6/7.
*/
public class FetchLatestDialyTask extends FetchTask<DialyEntity> {
public static final String URL_LATEST = "http://news-at.zhihu.com/api/4/stories/latest";
public FetchLatestDialyTask(Context context) {
this(context, true);
}
public FetchLatestDialyTask(Context context, boolean fetchFromNetwork) {
super(context, URL_LATEST, fetchFromNetwork);
}
@Override
protected void parseResponse(String response, FetchCallback<DialyEntity> callback) { | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/model/DialyEntity.java
// public class DialyEntity {
//
// private String date;
// private List<StoryEntity> stories;
//
// public void setDate(String date) {
// this.date = date;
// }
//
// public void setStories(List<StoryEntity> stories) {
// this.stories = stories;
// }
//
// public String getDate() {
// return date;
// }
//
// public List<StoryEntity> getStories() {
// return stories;
// }
// }
//
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/model/LatestDialyEntity.java
// public class LatestDialyEntity extends DialyEntity{
//
// private List<TopStoryEntity> top_stories;
//
// @Override
// public void setDate(String date) {
// super.setDate(date);
// }
//
// public void setTopStories(List<TopStoryEntity> top_stories) {
// this.top_stories = top_stories;
// }
//
// @Override
// public void setStories(List<StoryEntity> stories) {
// super.setStories(stories);
// }
//
// public String getDate() {
// return super.getDate();
// }
//
// public List<TopStoryEntity> getTopStories() {
// return top_stories;
// }
//
// public List<StoryEntity> getStories() {
// return super.getStories();
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/task/FetchLatestDialyTask.java
import android.content.Context;
import com.google.gson.Gson;
import io.gitcafe.zhanjiashu.newzhihudialy.model.DialyEntity;
import io.gitcafe.zhanjiashu.newzhihudialy.model.LatestDialyEntity;
package io.gitcafe.zhanjiashu.newzhihudialy.task;
/**
* Created by Jiashu on 2015/6/7.
*/
public class FetchLatestDialyTask extends FetchTask<DialyEntity> {
public static final String URL_LATEST = "http://news-at.zhihu.com/api/4/stories/latest";
public FetchLatestDialyTask(Context context) {
this(context, true);
}
public FetchLatestDialyTask(Context context, boolean fetchFromNetwork) {
super(context, URL_LATEST, fetchFromNetwork);
}
@Override
protected void parseResponse(String response, FetchCallback<DialyEntity> callback) { | LatestDialyEntity entity = new Gson().fromJson(response, LatestDialyEntity.class); |
zhanjiashu/ZhihuDialyM | app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/activity/SettingsActivity.java | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/fragment/SettingsFragment.java
// public class SettingsFragment extends PreferenceFragment implements Preference.OnPreferenceClickListener {
//
// private final String TAG = getClass().getSimpleName();
//
// public static final String CLEAR_CACHE = "clear_cache";
// public static final String ABOUT_APP = "about_app";
// public static final String APP_VERSION = "app_version";
// public static final String ENABLE_SISTER = "enable_sister";
// public static final String ENABLE_FRESH_BIG = "enable_fresh_big";
//
// Preference mClearCachePref;
// Preference mAppVersionPref;
//
// @Override
// public void onCreate(Bundle savedInstanceState) {
// super.onCreate(savedInstanceState);
// addPreferencesFromResource(R.xml.preferences);
//
// mClearCachePref = findPreference(CLEAR_CACHE);
// mAppVersionPref = findPreference(APP_VERSION);
//
// mAppVersionPref.setSummary("当前版本号:" + App.getVersionName());
//
// double cacheSize = calculateCacheSize();
// DecimalFormat decimalFormat = new DecimalFormat("#0.00");
// mClearCachePref.setSummary("缓存大小:" + decimalFormat.format(cacheSize) + " M");
// mClearCachePref.setOnPreferenceClickListener(this);
// }
//
// @Override
// public boolean onPreferenceClick(Preference preference) {
// String key = preference.getKey();
// if (CLEAR_CACHE.equals(key)) {
// Snackbar.make(getView(), "确定要清除缓存?", Snackbar.LENGTH_LONG)
// .setActionTextColor(ColorStateList.valueOf(getResources().getColor(R.color.material_colorPrimary)))
// .setAction("Yes", new View.OnClickListener() {
// @Override
// public void onClick(View view) {
// clearCache();
// }
// })
// .show();
// }
// return true;
// }
//
// private double calculateCacheSize() {
// File uilCacheFile = ImageLoader.getInstance().getDiskCache().getDirectory();
// return ZHStorageUtils.getDirSize(uilCacheFile) +
// ZHStorageUtils.getFilesDiskCache(getActivity()).size() / (1024 * 1024);
// }
//
// private void clearCache() {
// try {
// ImageLoader.getInstance().getDiskCache().clear();
// ZHStorageUtils.getFilesDiskCache(getActivity()).delete();
// new WebView(getActivity()).clearCache(true);
// } catch (IOException e) {
// e.printStackTrace();
// }
// if (calculateCacheSize() == 0.0) {
// Snackbar.make(getView(), "缓存清理完成", Snackbar.LENGTH_SHORT).show();
// }
// mClearCachePref.setSummary("缓存大小:0.00 M");
// }
// }
| import android.os.Bundle;
import android.support.v7.app.ActionBar;
import android.support.v7.widget.Toolbar;
import android.view.MenuItem;
import butterknife.ButterKnife;
import butterknife.InjectView;
import io.gitcafe.zhanjiashu.newzhihudialy.R;
import io.gitcafe.zhanjiashu.newzhihudialy.fragment.SettingsFragment; | package io.gitcafe.zhanjiashu.newzhihudialy.activity;
public class SettingsActivity extends BaseActivity {
@InjectView(R.id.toolbar)
Toolbar mToolbar;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_settings);
ButterKnife.inject(this);
setSupportActionBar(mToolbar);
ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setDisplayHomeAsUpEnabled(true);
}
if (savedInstanceState == null && findViewById(R.id.fl_container) != null) {
getFragmentManager()
.beginTransaction() | // Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/fragment/SettingsFragment.java
// public class SettingsFragment extends PreferenceFragment implements Preference.OnPreferenceClickListener {
//
// private final String TAG = getClass().getSimpleName();
//
// public static final String CLEAR_CACHE = "clear_cache";
// public static final String ABOUT_APP = "about_app";
// public static final String APP_VERSION = "app_version";
// public static final String ENABLE_SISTER = "enable_sister";
// public static final String ENABLE_FRESH_BIG = "enable_fresh_big";
//
// Preference mClearCachePref;
// Preference mAppVersionPref;
//
// @Override
// public void onCreate(Bundle savedInstanceState) {
// super.onCreate(savedInstanceState);
// addPreferencesFromResource(R.xml.preferences);
//
// mClearCachePref = findPreference(CLEAR_CACHE);
// mAppVersionPref = findPreference(APP_VERSION);
//
// mAppVersionPref.setSummary("当前版本号:" + App.getVersionName());
//
// double cacheSize = calculateCacheSize();
// DecimalFormat decimalFormat = new DecimalFormat("#0.00");
// mClearCachePref.setSummary("缓存大小:" + decimalFormat.format(cacheSize) + " M");
// mClearCachePref.setOnPreferenceClickListener(this);
// }
//
// @Override
// public boolean onPreferenceClick(Preference preference) {
// String key = preference.getKey();
// if (CLEAR_CACHE.equals(key)) {
// Snackbar.make(getView(), "确定要清除缓存?", Snackbar.LENGTH_LONG)
// .setActionTextColor(ColorStateList.valueOf(getResources().getColor(R.color.material_colorPrimary)))
// .setAction("Yes", new View.OnClickListener() {
// @Override
// public void onClick(View view) {
// clearCache();
// }
// })
// .show();
// }
// return true;
// }
//
// private double calculateCacheSize() {
// File uilCacheFile = ImageLoader.getInstance().getDiskCache().getDirectory();
// return ZHStorageUtils.getDirSize(uilCacheFile) +
// ZHStorageUtils.getFilesDiskCache(getActivity()).size() / (1024 * 1024);
// }
//
// private void clearCache() {
// try {
// ImageLoader.getInstance().getDiskCache().clear();
// ZHStorageUtils.getFilesDiskCache(getActivity()).delete();
// new WebView(getActivity()).clearCache(true);
// } catch (IOException e) {
// e.printStackTrace();
// }
// if (calculateCacheSize() == 0.0) {
// Snackbar.make(getView(), "缓存清理完成", Snackbar.LENGTH_SHORT).show();
// }
// mClearCachePref.setSummary("缓存大小:0.00 M");
// }
// }
// Path: app/src/main/java/io/gitcafe/zhanjiashu/newzhihudialy/activity/SettingsActivity.java
import android.os.Bundle;
import android.support.v7.app.ActionBar;
import android.support.v7.widget.Toolbar;
import android.view.MenuItem;
import butterknife.ButterKnife;
import butterknife.InjectView;
import io.gitcafe.zhanjiashu.newzhihudialy.R;
import io.gitcafe.zhanjiashu.newzhihudialy.fragment.SettingsFragment;
package io.gitcafe.zhanjiashu.newzhihudialy.activity;
public class SettingsActivity extends BaseActivity {
@InjectView(R.id.toolbar)
Toolbar mToolbar;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_settings);
ButterKnife.inject(this);
setSupportActionBar(mToolbar);
ActionBar actionBar = getSupportActionBar();
if (actionBar != null) {
actionBar.setDisplayHomeAsUpEnabled(true);
}
if (savedInstanceState == null && findViewById(R.id.fl_container) != null) {
getFragmentManager()
.beginTransaction() | .replace(R.id.fl_container, new SettingsFragment()) |
numenta/htm.java | src/test/java/org/numenta/nupic/network/ManualInputTest.java | // Path: src/main/java/org/numenta/nupic/model/Cell.java
// public class Cell implements Comparable<Cell>, Serializable {
// /** keep it simple */
// private static final long serialVersionUID = 1L;
//
// /** This cell's index */
// private final int index;
// /** Remove boxing where necessary */
// final Integer boxedIndex;
// /** The owning {@link Column} */
// private final Column column;
// /** Cash this because Cells are immutable */
// private final int hashcode;
//
//
// /**
// * Constructs a new {@code Cell} object
// * @param column the containing {@link Column}
// * @param colSeq this index of this {@code Cell} within its column
// */
// public Cell(Column column, int colSeq) {
// this.column = column;
// this.index = column.getIndex() * column.getNumCellsPerColumn() + colSeq;
// this.boxedIndex = new Integer(index);
// this.hashcode = hashCode();
// }
//
// /**
// * Returns this {@code Cell}'s index.
// * @return
// */
// public int getIndex() {
// return index;
// }
//
// /**
// * Returns the column within which this cell resides
// * @return
// */
// public Column getColumn() {
// return column;
// }
//
// /**
// * Returns the Set of {@link Synapse}s which have this cell
// * as their source cells.
// *
// * @param c the connections state of the temporal memory
// * return an orphaned empty set.
// * @return the Set of {@link Synapse}s which have this cell
// * as their source cells.
// */
// public Set<Synapse> getReceptorSynapses(Connections c) {
// return getReceptorSynapses(c, false);
// }
//
// /**
// * Returns the Set of {@link Synapse}s which have this cell
// * as their source cells.
// *
// * @param c the connections state of the temporal memory
// * @param doLazyCreate create a container for future use if true, if false
// * return an orphaned empty set.
// * @return the Set of {@link Synapse}s which have this cell
// * as their source cells.
// */
// public Set<Synapse> getReceptorSynapses(Connections c, boolean doLazyCreate) {
// return c.getReceptorSynapses(this, doLazyCreate);
// }
//
// /**
// * Returns a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// *
// * @param c the connections state of the temporal memory
// * @param doLazyCreate create a container for future use if true, if false
// * return an orphaned empty set.
// * @return a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// */
// public List<DistalDendrite> getSegments(Connections c) {
// return getSegments(c, false);
// }
//
// /**
// * Returns a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// *
// * @param c the connections state of the temporal memory
// * @param doLazyCreate create a container for future use if true, if false
// * return an orphaned empty set.
// * @return a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// */
// public List<DistalDendrite> getSegments(Connections c, boolean doLazyCreate) {
// return c.getSegments(this, doLazyCreate);
// }
//
// /**
// * {@inheritDoc}
// */
// public String toString() {
// return String.valueOf(index);
// }
//
// /**
// * {@inheritDoc}
// *
// * <em> Note: All comparisons use the cell's index only </em>
// */
// @Override
// public int compareTo(Cell arg0) {
// return boxedIndex.compareTo(arg0.boxedIndex);
// }
//
// @Override
// public int hashCode() {
// if(hashcode == 0) {
// final int prime = 31;
// int result = 1;
// result = prime * result + index;
// return result;
// }
// return hashcode;
// }
//
// @Override
// public boolean equals(Object obj) {
// if(this == obj)
// return true;
// if(obj == null)
// return false;
// if(getClass() != obj.getClass())
// return false;
// Cell other = (Cell)obj;
// if(index != other.index)
// return false;
// return true;
// }
//
// }
| import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Set;
import org.junit.Test;
import org.numenta.nupic.algorithms.Classification;
import org.numenta.nupic.model.Cell;
import org.numenta.nupic.model.Column;
import org.numenta.nupic.model.ComputeCycle;
import org.numenta.nupic.util.NamedTuple; | package org.numenta.nupic.network;
public class ManualInputTest {
/**
* ManualInput retVal = new ManualInput();
retVal.classifierInput = new HashMap<String, NamedTuple>(this.classifierInput);
retVal.classifiers = this.classifiers;
retVal.layerInput = this.layerInput;
retVal.sdr = this.sdr;
retVal.encoding = this.encoding;
retVal.activeColumns = this.activeColumns;
retVal.sparseActives = this.sparseActives;
retVal.previousPrediction = this.previousPrediction;
retVal.currentPrediction = this.currentPrediction;
retVal.classification = this.classification;
retVal.anomalyScore = this.anomalyScore;
retVal.customObject = this.customObject;
*/
@Test
public void testCopy() {
HashMap<String, NamedTuple> classifierInput = new HashMap<>();
NamedTuple classifiers = new NamedTuple(new String[] { "one", "two" }, 1, 2);
Object layerInput = new Object();
int[] sdr = new int[] { 20 };
int[] encoding = new int[40];
int[] activeColumns = new int[25];
int[] sparseActives = new int[2]; | // Path: src/main/java/org/numenta/nupic/model/Cell.java
// public class Cell implements Comparable<Cell>, Serializable {
// /** keep it simple */
// private static final long serialVersionUID = 1L;
//
// /** This cell's index */
// private final int index;
// /** Remove boxing where necessary */
// final Integer boxedIndex;
// /** The owning {@link Column} */
// private final Column column;
// /** Cash this because Cells are immutable */
// private final int hashcode;
//
//
// /**
// * Constructs a new {@code Cell} object
// * @param column the containing {@link Column}
// * @param colSeq this index of this {@code Cell} within its column
// */
// public Cell(Column column, int colSeq) {
// this.column = column;
// this.index = column.getIndex() * column.getNumCellsPerColumn() + colSeq;
// this.boxedIndex = new Integer(index);
// this.hashcode = hashCode();
// }
//
// /**
// * Returns this {@code Cell}'s index.
// * @return
// */
// public int getIndex() {
// return index;
// }
//
// /**
// * Returns the column within which this cell resides
// * @return
// */
// public Column getColumn() {
// return column;
// }
//
// /**
// * Returns the Set of {@link Synapse}s which have this cell
// * as their source cells.
// *
// * @param c the connections state of the temporal memory
// * return an orphaned empty set.
// * @return the Set of {@link Synapse}s which have this cell
// * as their source cells.
// */
// public Set<Synapse> getReceptorSynapses(Connections c) {
// return getReceptorSynapses(c, false);
// }
//
// /**
// * Returns the Set of {@link Synapse}s which have this cell
// * as their source cells.
// *
// * @param c the connections state of the temporal memory
// * @param doLazyCreate create a container for future use if true, if false
// * return an orphaned empty set.
// * @return the Set of {@link Synapse}s which have this cell
// * as their source cells.
// */
// public Set<Synapse> getReceptorSynapses(Connections c, boolean doLazyCreate) {
// return c.getReceptorSynapses(this, doLazyCreate);
// }
//
// /**
// * Returns a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// *
// * @param c the connections state of the temporal memory
// * @param doLazyCreate create a container for future use if true, if false
// * return an orphaned empty set.
// * @return a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// */
// public List<DistalDendrite> getSegments(Connections c) {
// return getSegments(c, false);
// }
//
// /**
// * Returns a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// *
// * @param c the connections state of the temporal memory
// * @param doLazyCreate create a container for future use if true, if false
// * return an orphaned empty set.
// * @return a {@link List} of this {@code Cell}'s {@link DistalDendrite}s
// */
// public List<DistalDendrite> getSegments(Connections c, boolean doLazyCreate) {
// return c.getSegments(this, doLazyCreate);
// }
//
// /**
// * {@inheritDoc}
// */
// public String toString() {
// return String.valueOf(index);
// }
//
// /**
// * {@inheritDoc}
// *
// * <em> Note: All comparisons use the cell's index only </em>
// */
// @Override
// public int compareTo(Cell arg0) {
// return boxedIndex.compareTo(arg0.boxedIndex);
// }
//
// @Override
// public int hashCode() {
// if(hashcode == 0) {
// final int prime = 31;
// int result = 1;
// result = prime * result + index;
// return result;
// }
// return hashcode;
// }
//
// @Override
// public boolean equals(Object obj) {
// if(this == obj)
// return true;
// if(obj == null)
// return false;
// if(getClass() != obj.getClass())
// return false;
// Cell other = (Cell)obj;
// if(index != other.index)
// return false;
// return true;
// }
//
// }
// Path: src/test/java/org/numenta/nupic/network/ManualInputTest.java
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.util.Arrays;
import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.Set;
import org.junit.Test;
import org.numenta.nupic.algorithms.Classification;
import org.numenta.nupic.model.Cell;
import org.numenta.nupic.model.Column;
import org.numenta.nupic.model.ComputeCycle;
import org.numenta.nupic.util.NamedTuple;
package org.numenta.nupic.network;
public class ManualInputTest {
/**
* ManualInput retVal = new ManualInput();
retVal.classifierInput = new HashMap<String, NamedTuple>(this.classifierInput);
retVal.classifiers = this.classifiers;
retVal.layerInput = this.layerInput;
retVal.sdr = this.sdr;
retVal.encoding = this.encoding;
retVal.activeColumns = this.activeColumns;
retVal.sparseActives = this.sparseActives;
retVal.previousPrediction = this.previousPrediction;
retVal.currentPrediction = this.currentPrediction;
retVal.classification = this.classification;
retVal.anomalyScore = this.anomalyScore;
retVal.customObject = this.customObject;
*/
@Test
public void testCopy() {
HashMap<String, NamedTuple> classifierInput = new HashMap<>();
NamedTuple classifiers = new NamedTuple(new String[] { "one", "two" }, 1, 2);
Object layerInput = new Object();
int[] sdr = new int[] { 20 };
int[] encoding = new int[40];
int[] activeColumns = new int[25];
int[] sparseActives = new int[2]; | Set<Cell> activeCells = new LinkedHashSet<>(); activeCells.add(new Cell(new Column(4, 0), 1)); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
| import java.util.concurrent.TimeUnit;
import biz.paluch.spinach.api.DisqueConnection; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.sync;
/**
*
* Synchronous executed commands for Disque. This API is thread-safe.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
DisqueClusterCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
String auth(String password);
/**
* Close the connection. The connection will become not usable anymore as soon as this method was called.
*/
void close();
/**
*
* @return the underlying connection.
*/ | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
import java.util.concurrent.TimeUnit;
import biz.paluch.spinach.api.DisqueConnection;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.sync;
/**
*
* Synchronous executed commands for Disque. This API is thread-safe.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
DisqueClusterCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
String auth(String password);
/**
* Close the connection. The connection will become not usable anymore as soon as this method was called.
*/
void close();
/**
*
* @return the underlying connection.
*/ | DisqueConnection<K, V> getConnection(); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/async/DisqueServerAsyncCommands.java | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
| import java.util.List;
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.KillArgs;
import com.lambdaworks.redis.RedisFuture; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.async;
/**
* Asynchronous executed commands related with Disque Server Control.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueServerAsyncCommands<K, V> {
/**
* Asynchronously rewrite the append-only file.
*
* @return String simple-string-reply always {@code OK}.
*/
RedisFuture<String> bgrewriteaof();
/**
* Get the current connection name.
*
* @return K bulk-string-reply The connection name, or a null bulk reply if no name is set.
*/
RedisFuture<K> clientGetname();
/**
* Kill connections of clients which are filtered by {@code killArgs}
*
* @param killArgs args for the kill operation
* @return Long integer-reply number of killed connections
*/
RedisFuture<Long> clientKill(KillArgs killArgs);
/**
* Kill the connection of a client identified by ip:port.
*
* @param addr ip:port
* @return String simple-string-reply {@code OK} if the connection exists and has been closed
*/
RedisFuture<String> clientKill(String addr);
/**
* Get the list of client connections.
*
* @return String bulk-string-reply a unique string, formatted as follows: One client connection per line (separated by LF),
* each line is composed of a succession of property=value fields separated by a space character.
*/
RedisFuture<String> clientList();
/**
* Stop processing commands from clients for some time.
*
* @param timeout the timeout value in milliseconds
* @return String simple-string-reply The command returns OK or an error if the timeout is invalid.
*/
RedisFuture<String> clientPause(long timeout);
/**
* Set the current connection name.
*
* @param name the client name
* @return simple-string-reply {@code OK} if the connection name was successfully set.
*/
RedisFuture<String> clientSetname(String name);
/**
* Returns an array reply of details about all Redis commands.
*
* @return List<Object> array-reply
*/
RedisFuture<List<Object>> command();
/**
* Get total number of Redis commands.
*
* @return Long integer-reply of number of total commands in this Redis server.
*/
RedisFuture<Long> commandCount();
/**
* Returns an array reply of details about the requested commands.
*
* @param commands the commands to query for
* @return List<Object> array-reply
*/ | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
// Path: src/main/java/biz/paluch/spinach/api/async/DisqueServerAsyncCommands.java
import java.util.List;
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.KillArgs;
import com.lambdaworks.redis.RedisFuture;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.async;
/**
* Asynchronous executed commands related with Disque Server Control.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueServerAsyncCommands<K, V> {
/**
* Asynchronously rewrite the append-only file.
*
* @return String simple-string-reply always {@code OK}.
*/
RedisFuture<String> bgrewriteaof();
/**
* Get the current connection name.
*
* @return K bulk-string-reply The connection name, or a null bulk reply if no name is set.
*/
RedisFuture<K> clientGetname();
/**
* Kill connections of clients which are filtered by {@code killArgs}
*
* @param killArgs args for the kill operation
* @return Long integer-reply number of killed connections
*/
RedisFuture<Long> clientKill(KillArgs killArgs);
/**
* Kill the connection of a client identified by ip:port.
*
* @param addr ip:port
* @return String simple-string-reply {@code OK} if the connection exists and has been closed
*/
RedisFuture<String> clientKill(String addr);
/**
* Get the list of client connections.
*
* @return String bulk-string-reply a unique string, formatted as follows: One client connection per line (separated by LF),
* each line is composed of a succession of property=value fields separated by a space character.
*/
RedisFuture<String> clientList();
/**
* Stop processing commands from clients for some time.
*
* @param timeout the timeout value in milliseconds
* @return String simple-string-reply The command returns OK or an error if the timeout is invalid.
*/
RedisFuture<String> clientPause(long timeout);
/**
* Set the current connection name.
*
* @param name the client name
* @return simple-string-reply {@code OK} if the connection name was successfully set.
*/
RedisFuture<String> clientSetname(String name);
/**
* Returns an array reply of details about all Redis commands.
*
* @return List<Object> array-reply
*/
RedisFuture<List<Object>> command();
/**
* Get total number of Redis commands.
*
* @return Long integer-reply of number of total commands in this Redis server.
*/
RedisFuture<Long> commandCount();
/**
* Returns an array reply of details about the requested commands.
*
* @param commands the commands to query for
* @return List<Object> array-reply
*/ | RedisFuture<List<Object>> commandInfo(CommandType... commands); |
mp911de/spinach | src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/cluster/DisqueNode.java
// @SuppressWarnings("serial")
// public class DisqueNode implements Serializable {
// private String addr;
// private int port;
// private String nodeId;
//
// private boolean connected;
// private long pingSentTimestamp;
// private long pongReceivedTimestamp;
//
// private Set<NodeFlag> flags;
//
// public DisqueNode() {
//
// }
//
// public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp,
// long pongReceivedTimestamp, Set<NodeFlag> flags) {
// this.addr = addr;
// this.port = port;
// this.nodeId = nodeId;
// this.connected = connected;
// this.pingSentTimestamp = pingSentTimestamp;
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// this.flags = flags;
// }
//
// public String getAddr() {
// return addr;
// }
//
// public void setAddr(String addr) {
// this.addr = addr;
// }
//
// public int getPort() {
// return port;
// }
//
// public void setPort(int port) {
// this.port = port;
// }
//
// public String getNodeId() {
// return nodeId;
// }
//
// public void setNodeId(String nodeId) {
// LettuceAssert.notNull(nodeId, "nodeId must not be null");
// this.nodeId = nodeId;
// }
//
// public boolean isConnected() {
// return connected;
// }
//
// public void setConnected(boolean connected) {
// this.connected = connected;
// }
//
// public long getPingSentTimestamp() {
// return pingSentTimestamp;
// }
//
// public void setPingSentTimestamp(long pingSentTimestamp) {
// this.pingSentTimestamp = pingSentTimestamp;
// }
//
// public long getPongReceivedTimestamp() {
// return pongReceivedTimestamp;
// }
//
// public void setPongReceivedTimestamp(long pongReceivedTimestamp) {
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// }
//
// public Set<NodeFlag> getFlags() {
// return flags;
// }
//
// public void setFlags(Set<NodeFlag> flags) {
// this.flags = flags;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) {
// return true;
// }
// if (!(o instanceof DisqueNode)) {
// return false;
// }
//
// DisqueNode that = (DisqueNode) o;
//
// if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) {
// return false;
// }
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = 31 * (nodeId != null ? nodeId.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// final StringBuilder sb = new StringBuilder();
// sb.append(getClass().getSimpleName());
// sb.append(" [addr=").append(addr);
// sb.append(", port='").append(port).append('\'');
// sb.append(", nodeId='").append(nodeId).append('\'');
// sb.append(", connected=").append(connected);
// sb.append(", pingSentTimestamp=").append(pingSentTimestamp);
// sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp);
// sb.append(", flags=").append(flags);
// sb.append(']');
// return sb.toString();
// }
//
// public enum NodeFlag {
// NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR;
// }
// }
| import java.net.InetSocketAddress;
import java.net.SocketAddress;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.cluster.DisqueNode; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* Supplier for {@link SocketAddress adresses} that is aware of the cluster nodes.
* <p>
* This class performs a {@code HELLO} command handshake upon connection and retrieves the nodes from the command result. The
* node set is not refreshed once it is retrieved. The nodes are used in the order of their priority in a round-robin fashion.
* Until the handshake is completed a fallback {@link SocketAddressSupplier} is used.
* </p>
*
* @author Mark Paluch
*/
public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier,
ConnectionAware {
protected final SocketAddressSupplier bootstrap; | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/cluster/DisqueNode.java
// @SuppressWarnings("serial")
// public class DisqueNode implements Serializable {
// private String addr;
// private int port;
// private String nodeId;
//
// private boolean connected;
// private long pingSentTimestamp;
// private long pongReceivedTimestamp;
//
// private Set<NodeFlag> flags;
//
// public DisqueNode() {
//
// }
//
// public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp,
// long pongReceivedTimestamp, Set<NodeFlag> flags) {
// this.addr = addr;
// this.port = port;
// this.nodeId = nodeId;
// this.connected = connected;
// this.pingSentTimestamp = pingSentTimestamp;
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// this.flags = flags;
// }
//
// public String getAddr() {
// return addr;
// }
//
// public void setAddr(String addr) {
// this.addr = addr;
// }
//
// public int getPort() {
// return port;
// }
//
// public void setPort(int port) {
// this.port = port;
// }
//
// public String getNodeId() {
// return nodeId;
// }
//
// public void setNodeId(String nodeId) {
// LettuceAssert.notNull(nodeId, "nodeId must not be null");
// this.nodeId = nodeId;
// }
//
// public boolean isConnected() {
// return connected;
// }
//
// public void setConnected(boolean connected) {
// this.connected = connected;
// }
//
// public long getPingSentTimestamp() {
// return pingSentTimestamp;
// }
//
// public void setPingSentTimestamp(long pingSentTimestamp) {
// this.pingSentTimestamp = pingSentTimestamp;
// }
//
// public long getPongReceivedTimestamp() {
// return pongReceivedTimestamp;
// }
//
// public void setPongReceivedTimestamp(long pongReceivedTimestamp) {
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// }
//
// public Set<NodeFlag> getFlags() {
// return flags;
// }
//
// public void setFlags(Set<NodeFlag> flags) {
// this.flags = flags;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) {
// return true;
// }
// if (!(o instanceof DisqueNode)) {
// return false;
// }
//
// DisqueNode that = (DisqueNode) o;
//
// if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) {
// return false;
// }
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = 31 * (nodeId != null ? nodeId.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// final StringBuilder sb = new StringBuilder();
// sb.append(getClass().getSimpleName());
// sb.append(" [addr=").append(addr);
// sb.append(", port='").append(port).append('\'');
// sb.append(", nodeId='").append(nodeId).append('\'');
// sb.append(", connected=").append(connected);
// sb.append(", pingSentTimestamp=").append(pingSentTimestamp);
// sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp);
// sb.append(", flags=").append(flags);
// sb.append(']');
// return sb.toString();
// }
//
// public enum NodeFlag {
// NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR;
// }
// }
// Path: src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.cluster.DisqueNode;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* Supplier for {@link SocketAddress adresses} that is aware of the cluster nodes.
* <p>
* This class performs a {@code HELLO} command handshake upon connection and retrieves the nodes from the command result. The
* node set is not refreshed once it is retrieved. The nodes are used in the order of their priority in a round-robin fashion.
* Until the handshake is completed a fallback {@link SocketAddressSupplier} is used.
* </p>
*
* @author Mark Paluch
*/
public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier,
ConnectionAware {
protected final SocketAddressSupplier bootstrap; | protected RoundRobin<DisqueNode> roundRobin; |
mp911de/spinach | src/test/java/biz/paluch/spinach/impl/DisqueCommandTest.java | // Path: src/main/java/biz/paluch/spinach/api/CommandKeyword.java
// public enum CommandKeyword implements ProtocolKeyword {
//
// ALL, ASYNC, BCAST, BLOCKING, BUSYLOOP, COUNT, DELAY, FLUSHALL, FORGET, FROM, GET, HARD, ID, IMPORTRATE, IN, LEAVING,
//
// MAXLEN, MEET, MINLEN, NODES, NOHANG, NONE, OUT, QUEUE, REPLICATE, REPLY, RESET, RESETSTAT, RETRY,
//
// REWRITE, SAVECONFIG, SET, SOFT, STATE, TIMEOUT, TTL, WITHCOUNTERS;
//
// public final byte[] bytes;
//
// private CommandKeyword() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
// }
| import static org.assertj.core.api.Assertions.assertThat;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutionException;
import com.lambdaworks.redis.codec.Utf8StringCodec;
import com.lambdaworks.redis.protocol.AsyncCommand;
import org.junit.After;
import org.junit.Test;
import biz.paluch.spinach.api.CommandKeyword;
import com.lambdaworks.redis.output.StatusOutput; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* @author Mark Paluch
*/
public class DisqueCommandTest {
private Utf8StringCodec codec = new Utf8StringCodec(); | // Path: src/main/java/biz/paluch/spinach/api/CommandKeyword.java
// public enum CommandKeyword implements ProtocolKeyword {
//
// ALL, ASYNC, BCAST, BLOCKING, BUSYLOOP, COUNT, DELAY, FLUSHALL, FORGET, FROM, GET, HARD, ID, IMPORTRATE, IN, LEAVING,
//
// MAXLEN, MEET, MINLEN, NODES, NOHANG, NONE, OUT, QUEUE, REPLICATE, REPLY, RESET, RESETSTAT, RETRY,
//
// REWRITE, SAVECONFIG, SET, SOFT, STATE, TIMEOUT, TTL, WITHCOUNTERS;
//
// public final byte[] bytes;
//
// private CommandKeyword() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
// }
// Path: src/test/java/biz/paluch/spinach/impl/DisqueCommandTest.java
import static org.assertj.core.api.Assertions.assertThat;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutionException;
import com.lambdaworks.redis.codec.Utf8StringCodec;
import com.lambdaworks.redis.protocol.AsyncCommand;
import org.junit.After;
import org.junit.Test;
import biz.paluch.spinach.api.CommandKeyword;
import com.lambdaworks.redis.output.StatusOutput;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* @author Mark Paluch
*/
public class DisqueCommandTest {
private Utf8StringCodec codec = new Utf8StringCodec(); | private DisqueCommand<String, String, String> command = new DisqueCommand<String, String, String>(CommandKeyword.GET, |
mp911de/spinach | src/main/java/biz/paluch/spinach/cluster/GetJobsAction.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/Job.java
// public class Job<K, V> {
// private K queue;
// private String id;
// private V body;
// private Map<String, Long> counters;
//
// protected Job() {
// }
//
// public Job(K queue, String id, V body, Map<String, Long> counters) {
// this.queue = queue;
// this.id = id;
// this.body = body;
// this.counters = counters;
// }
//
// /**
// *
// * @return the queue
// */
// public K getQueue() {
// return queue;
// }
//
// /**
// *
// * @return the JobId
// */
// public String getId() {
// return id;
// }
//
// /**
// *
// * @return the Job body
// */
// public V getBody() {
// return body;
// }
//
// /**
// * If requested with a WITHCOUNTERS flag, getjob also populates a counters field.
// *
// * @return map of counters
// */
// public Map<String, Long> getCounters() { return counters; }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import java.lang.reflect.Field;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Supplier;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.Job;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.google.common.collect.ConcurrentHashMultiset;
import com.google.common.collect.Multiset;
import com.google.common.collect.Multisets;
import com.lambdaworks.redis.RedisChannelHandler;
import com.lambdaworks.redis.RedisChannelWriter;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.protocol.CommandHandler;
import io.netty.channel.Channel;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.cluster;
/**
* Subscription action to emit {@link Job} objects. This action is intended to be {@link #call() called} regularly by a
* {@link rx.Scheduler} and emits {@link Job jobs} upon reception from Disque.
* <p>
* The subscription action allows tracking of the producer nodeId when receiving messages from Disque and a reconnect/locality
* improvement by switching the Disque node. Instances are stateful and need to be {@link #close(long, TimeUnit)} closed. The
* shutdown is graceful and waits up to the specified getjobs timeout. The connection is force closed on timeout expiry.
* </p>
*
*/
class GetJobsAction<K, V> implements Action0 {
private static final InternalLogger log = InternalLoggerFactory.getInstance(GetJobsAction.class);
| // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/Job.java
// public class Job<K, V> {
// private K queue;
// private String id;
// private V body;
// private Map<String, Long> counters;
//
// protected Job() {
// }
//
// public Job(K queue, String id, V body, Map<String, Long> counters) {
// this.queue = queue;
// this.id = id;
// this.body = body;
// this.counters = counters;
// }
//
// /**
// *
// * @return the queue
// */
// public K getQueue() {
// return queue;
// }
//
// /**
// *
// * @return the JobId
// */
// public String getId() {
// return id;
// }
//
// /**
// *
// * @return the Job body
// */
// public V getBody() {
// return body;
// }
//
// /**
// * If requested with a WITHCOUNTERS flag, getjob also populates a counters field.
// *
// * @return map of counters
// */
// public Map<String, Long> getCounters() { return counters; }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/main/java/biz/paluch/spinach/cluster/GetJobsAction.java
import java.lang.reflect.Field;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Supplier;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.Job;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.google.common.collect.ConcurrentHashMultiset;
import com.google.common.collect.Multiset;
import com.google.common.collect.Multisets;
import com.lambdaworks.redis.RedisChannelHandler;
import com.lambdaworks.redis.RedisChannelWriter;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.protocol.CommandHandler;
import io.netty.channel.Channel;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.cluster;
/**
* Subscription action to emit {@link Job} objects. This action is intended to be {@link #call() called} regularly by a
* {@link rx.Scheduler} and emits {@link Job jobs} upon reception from Disque.
* <p>
* The subscription action allows tracking of the producer nodeId when receiving messages from Disque and a reconnect/locality
* improvement by switching the Disque node. Instances are stateful and need to be {@link #close(long, TimeUnit)} closed. The
* shutdown is graceful and waits up to the specified getjobs timeout. The connection is force closed on timeout expiry.
* </p>
*
*/
class GetJobsAction<K, V> implements Action0 {
private static final InternalLogger log = InternalLoggerFactory.getInstance(GetJobsAction.class);
| private final DisqueConnection<K, V> disqueConnection; |
mp911de/spinach | src/main/java/biz/paluch/spinach/cluster/GetJobsAction.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/Job.java
// public class Job<K, V> {
// private K queue;
// private String id;
// private V body;
// private Map<String, Long> counters;
//
// protected Job() {
// }
//
// public Job(K queue, String id, V body, Map<String, Long> counters) {
// this.queue = queue;
// this.id = id;
// this.body = body;
// this.counters = counters;
// }
//
// /**
// *
// * @return the queue
// */
// public K getQueue() {
// return queue;
// }
//
// /**
// *
// * @return the JobId
// */
// public String getId() {
// return id;
// }
//
// /**
// *
// * @return the Job body
// */
// public V getBody() {
// return body;
// }
//
// /**
// * If requested with a WITHCOUNTERS flag, getjob also populates a counters field.
// *
// * @return map of counters
// */
// public Map<String, Long> getCounters() { return counters; }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import java.lang.reflect.Field;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Supplier;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.Job;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.google.common.collect.ConcurrentHashMultiset;
import com.google.common.collect.Multiset;
import com.google.common.collect.Multisets;
import com.lambdaworks.redis.RedisChannelHandler;
import com.lambdaworks.redis.RedisChannelWriter;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.protocol.CommandHandler;
import io.netty.channel.Channel;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.cluster;
/**
* Subscription action to emit {@link Job} objects. This action is intended to be {@link #call() called} regularly by a
* {@link rx.Scheduler} and emits {@link Job jobs} upon reception from Disque.
* <p>
* The subscription action allows tracking of the producer nodeId when receiving messages from Disque and a reconnect/locality
* improvement by switching the Disque node. Instances are stateful and need to be {@link #close(long, TimeUnit)} closed. The
* shutdown is graceful and waits up to the specified getjobs timeout. The connection is force closed on timeout expiry.
* </p>
*
*/
class GetJobsAction<K, V> implements Action0 {
private static final InternalLogger log = InternalLoggerFactory.getInstance(GetJobsAction.class);
private final DisqueConnection<K, V> disqueConnection;
private final String subscriptionId; | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/Job.java
// public class Job<K, V> {
// private K queue;
// private String id;
// private V body;
// private Map<String, Long> counters;
//
// protected Job() {
// }
//
// public Job(K queue, String id, V body, Map<String, Long> counters) {
// this.queue = queue;
// this.id = id;
// this.body = body;
// this.counters = counters;
// }
//
// /**
// *
// * @return the queue
// */
// public K getQueue() {
// return queue;
// }
//
// /**
// *
// * @return the JobId
// */
// public String getId() {
// return id;
// }
//
// /**
// *
// * @return the Job body
// */
// public V getBody() {
// return body;
// }
//
// /**
// * If requested with a WITHCOUNTERS flag, getjob also populates a counters field.
// *
// * @return map of counters
// */
// public Map<String, Long> getCounters() { return counters; }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/main/java/biz/paluch/spinach/cluster/GetJobsAction.java
import java.lang.reflect.Field;
import java.util.List;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Supplier;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.Job;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.google.common.collect.ConcurrentHashMultiset;
import com.google.common.collect.Multiset;
import com.google.common.collect.Multisets;
import com.lambdaworks.redis.RedisChannelHandler;
import com.lambdaworks.redis.RedisChannelWriter;
import com.lambdaworks.redis.RedisException;
import com.lambdaworks.redis.protocol.CommandHandler;
import io.netty.channel.Channel;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.cluster;
/**
* Subscription action to emit {@link Job} objects. This action is intended to be {@link #call() called} regularly by a
* {@link rx.Scheduler} and emits {@link Job jobs} upon reception from Disque.
* <p>
* The subscription action allows tracking of the producer nodeId when receiving messages from Disque and a reconnect/locality
* improvement by switching the Disque node. Instances are stateful and need to be {@link #close(long, TimeUnit)} closed. The
* shutdown is graceful and waits up to the specified getjobs timeout. The connection is force closed on timeout expiry.
* </p>
*
*/
class GetJobsAction<K, V> implements Action0 {
private static final InternalLogger log = InternalLoggerFactory.getInstance(GetJobsAction.class);
private final DisqueConnection<K, V> disqueConnection;
private final String subscriptionId; | private final Subscriber<? super Job<K, V>> subscriber; |
mp911de/spinach | src/main/java/biz/paluch/spinach/cluster/NodeIdAwareSocketAddressSupplier.java | // Path: src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java
// public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier,
// ConnectionAware {
//
// protected final SocketAddressSupplier bootstrap;
// protected RoundRobin<DisqueNode> roundRobin;
//
// /**
// *
// * @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done.
// */
// public HelloClusterSocketAddressSupplier(SocketAddressSupplier bootstrap) {
// this.bootstrap = bootstrap;
// }
//
// @Override
// public SocketAddress get() {
//
// if (getNodes().isEmpty()) {
// return bootstrap.get();
// }
//
// DisqueNode disqueNode = roundRobin.next();
// return InetSocketAddress.createUnresolved(disqueNode.getAddr(), disqueNode.getPort());
// }
//
// @Override
// public <K, V> void setConnection(DisqueConnection<K, V> disqueConnection) {
// super.setConnection(disqueConnection);
// reloadNodes();
// }
//
// @Override
// public void reloadNodes() {
// super.reloadNodes();
// roundRobin = new RoundRobin<DisqueNode>(getNodes());
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/RoundRobin.java
// public class RoundRobin<V> {
//
// protected final Collection<? extends V> collection;
// protected V offset;
//
// public RoundRobin(Collection<? extends V> collection) {
// this(collection, null);
// }
//
// public RoundRobin(Collection<? extends V> collection, V offset) {
// this.collection = collection;
// this.offset = offset;
// }
//
// /**
// * Returns the next item.
// *
// * @return the next item
// */
// public V next() {
// if (offset != null) {
// boolean accept = false;
// for (V element : collection) {
// if (element == offset) {
// accept = true;
// continue;
// }
//
// if (accept) {
// return offset = element;
// }
// }
// }
//
// return offset = collection.iterator().next();
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/SocketAddressSupplier.java
// public interface SocketAddressSupplier extends Supplier<SocketAddress> {
//
// }
| import java.net.InetSocketAddress;
import java.net.SocketAddress;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier;
import biz.paluch.spinach.impl.RoundRobin;
import biz.paluch.spinach.impl.SocketAddressSupplier; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.cluster;
/**
* This mechanism allows to set a preferred node Id for next {@code HELLO} handshake. If the
* {@link #setPreferredNodeIdPrefix(String)} is set, the selection mechanism tries to provide a {@link SocketAddress} from the
* preferred node.
*
* @see biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier
*/
public class NodeIdAwareSocketAddressSupplier extends HelloClusterSocketAddressSupplier {
private transient String currentNodeId;
private transient InetSocketAddress currentSocketAddress;
private String preferredNodeIdPrefix;
/**
*
* @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done.
*/ | // Path: src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java
// public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier,
// ConnectionAware {
//
// protected final SocketAddressSupplier bootstrap;
// protected RoundRobin<DisqueNode> roundRobin;
//
// /**
// *
// * @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done.
// */
// public HelloClusterSocketAddressSupplier(SocketAddressSupplier bootstrap) {
// this.bootstrap = bootstrap;
// }
//
// @Override
// public SocketAddress get() {
//
// if (getNodes().isEmpty()) {
// return bootstrap.get();
// }
//
// DisqueNode disqueNode = roundRobin.next();
// return InetSocketAddress.createUnresolved(disqueNode.getAddr(), disqueNode.getPort());
// }
//
// @Override
// public <K, V> void setConnection(DisqueConnection<K, V> disqueConnection) {
// super.setConnection(disqueConnection);
// reloadNodes();
// }
//
// @Override
// public void reloadNodes() {
// super.reloadNodes();
// roundRobin = new RoundRobin<DisqueNode>(getNodes());
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/RoundRobin.java
// public class RoundRobin<V> {
//
// protected final Collection<? extends V> collection;
// protected V offset;
//
// public RoundRobin(Collection<? extends V> collection) {
// this(collection, null);
// }
//
// public RoundRobin(Collection<? extends V> collection, V offset) {
// this.collection = collection;
// this.offset = offset;
// }
//
// /**
// * Returns the next item.
// *
// * @return the next item
// */
// public V next() {
// if (offset != null) {
// boolean accept = false;
// for (V element : collection) {
// if (element == offset) {
// accept = true;
// continue;
// }
//
// if (accept) {
// return offset = element;
// }
// }
// }
//
// return offset = collection.iterator().next();
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/SocketAddressSupplier.java
// public interface SocketAddressSupplier extends Supplier<SocketAddress> {
//
// }
// Path: src/main/java/biz/paluch/spinach/cluster/NodeIdAwareSocketAddressSupplier.java
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier;
import biz.paluch.spinach.impl.RoundRobin;
import biz.paluch.spinach.impl.SocketAddressSupplier;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.cluster;
/**
* This mechanism allows to set a preferred node Id for next {@code HELLO} handshake. If the
* {@link #setPreferredNodeIdPrefix(String)} is set, the selection mechanism tries to provide a {@link SocketAddress} from the
* preferred node.
*
* @see biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier
*/
public class NodeIdAwareSocketAddressSupplier extends HelloClusterSocketAddressSupplier {
private transient String currentNodeId;
private transient InetSocketAddress currentSocketAddress;
private String preferredNodeIdPrefix;
/**
*
* @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done.
*/ | public NodeIdAwareSocketAddressSupplier(SocketAddressSupplier bootstrap) { |
mp911de/spinach | src/main/java/biz/paluch/spinach/cluster/NodeIdAwareSocketAddressSupplier.java | // Path: src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java
// public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier,
// ConnectionAware {
//
// protected final SocketAddressSupplier bootstrap;
// protected RoundRobin<DisqueNode> roundRobin;
//
// /**
// *
// * @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done.
// */
// public HelloClusterSocketAddressSupplier(SocketAddressSupplier bootstrap) {
// this.bootstrap = bootstrap;
// }
//
// @Override
// public SocketAddress get() {
//
// if (getNodes().isEmpty()) {
// return bootstrap.get();
// }
//
// DisqueNode disqueNode = roundRobin.next();
// return InetSocketAddress.createUnresolved(disqueNode.getAddr(), disqueNode.getPort());
// }
//
// @Override
// public <K, V> void setConnection(DisqueConnection<K, V> disqueConnection) {
// super.setConnection(disqueConnection);
// reloadNodes();
// }
//
// @Override
// public void reloadNodes() {
// super.reloadNodes();
// roundRobin = new RoundRobin<DisqueNode>(getNodes());
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/RoundRobin.java
// public class RoundRobin<V> {
//
// protected final Collection<? extends V> collection;
// protected V offset;
//
// public RoundRobin(Collection<? extends V> collection) {
// this(collection, null);
// }
//
// public RoundRobin(Collection<? extends V> collection, V offset) {
// this.collection = collection;
// this.offset = offset;
// }
//
// /**
// * Returns the next item.
// *
// * @return the next item
// */
// public V next() {
// if (offset != null) {
// boolean accept = false;
// for (V element : collection) {
// if (element == offset) {
// accept = true;
// continue;
// }
//
// if (accept) {
// return offset = element;
// }
// }
// }
//
// return offset = collection.iterator().next();
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/SocketAddressSupplier.java
// public interface SocketAddressSupplier extends Supplier<SocketAddress> {
//
// }
| import java.net.InetSocketAddress;
import java.net.SocketAddress;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier;
import biz.paluch.spinach.impl.RoundRobin;
import biz.paluch.spinach.impl.SocketAddressSupplier; | /**
* Set the id prefix of the preferred node.
*
* @param preferredNodeIdPrefix the id prefix of the preferred node
*/
public void setPreferredNodeIdPrefix(String preferredNodeIdPrefix) {
LettuceAssert.notNull(preferredNodeIdPrefix, "preferredNodeIdPrefix must not be null");
boolean resetRoundRobin = false;
if (this.preferredNodeIdPrefix == null || !preferredNodeIdPrefix.equals(this.preferredNodeIdPrefix)) {
resetRoundRobin = true;
}
this.preferredNodeIdPrefix = preferredNodeIdPrefix;
if (resetRoundRobin) {
resetRoundRobin(preferredNodeIdPrefix);
}
}
/**
* Reset the {@link RoundRobin} to start with the node matching the {@code preferredNodeIdPrefix}.
*
* @param preferredNodeIdPrefix the id prefix of the preferred node
*/
private void resetRoundRobin(String preferredNodeIdPrefix) {
DisqueNode previous = null; // remember the previous node because the offset is a marker to start with the next
// element
for (DisqueNode disqueNode : getNodes()) {
if (disqueNode.getNodeId().startsWith(preferredNodeIdPrefix)) { | // Path: src/main/java/biz/paluch/spinach/impl/HelloClusterSocketAddressSupplier.java
// public class HelloClusterSocketAddressSupplier extends ClusterAwareNodeSupport implements SocketAddressSupplier,
// ConnectionAware {
//
// protected final SocketAddressSupplier bootstrap;
// protected RoundRobin<DisqueNode> roundRobin;
//
// /**
// *
// * @param bootstrap bootstrap/fallback {@link SocketAddressSupplier} for bootstrapping before any communication is done.
// */
// public HelloClusterSocketAddressSupplier(SocketAddressSupplier bootstrap) {
// this.bootstrap = bootstrap;
// }
//
// @Override
// public SocketAddress get() {
//
// if (getNodes().isEmpty()) {
// return bootstrap.get();
// }
//
// DisqueNode disqueNode = roundRobin.next();
// return InetSocketAddress.createUnresolved(disqueNode.getAddr(), disqueNode.getPort());
// }
//
// @Override
// public <K, V> void setConnection(DisqueConnection<K, V> disqueConnection) {
// super.setConnection(disqueConnection);
// reloadNodes();
// }
//
// @Override
// public void reloadNodes() {
// super.reloadNodes();
// roundRobin = new RoundRobin<DisqueNode>(getNodes());
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/RoundRobin.java
// public class RoundRobin<V> {
//
// protected final Collection<? extends V> collection;
// protected V offset;
//
// public RoundRobin(Collection<? extends V> collection) {
// this(collection, null);
// }
//
// public RoundRobin(Collection<? extends V> collection, V offset) {
// this.collection = collection;
// this.offset = offset;
// }
//
// /**
// * Returns the next item.
// *
// * @return the next item
// */
// public V next() {
// if (offset != null) {
// boolean accept = false;
// for (V element : collection) {
// if (element == offset) {
// accept = true;
// continue;
// }
//
// if (accept) {
// return offset = element;
// }
// }
// }
//
// return offset = collection.iterator().next();
// }
//
// }
//
// Path: src/main/java/biz/paluch/spinach/impl/SocketAddressSupplier.java
// public interface SocketAddressSupplier extends Supplier<SocketAddress> {
//
// }
// Path: src/main/java/biz/paluch/spinach/cluster/NodeIdAwareSocketAddressSupplier.java
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.impl.HelloClusterSocketAddressSupplier;
import biz.paluch.spinach.impl.RoundRobin;
import biz.paluch.spinach.impl.SocketAddressSupplier;
/**
* Set the id prefix of the preferred node.
*
* @param preferredNodeIdPrefix the id prefix of the preferred node
*/
public void setPreferredNodeIdPrefix(String preferredNodeIdPrefix) {
LettuceAssert.notNull(preferredNodeIdPrefix, "preferredNodeIdPrefix must not be null");
boolean resetRoundRobin = false;
if (this.preferredNodeIdPrefix == null || !preferredNodeIdPrefix.equals(this.preferredNodeIdPrefix)) {
resetRoundRobin = true;
}
this.preferredNodeIdPrefix = preferredNodeIdPrefix;
if (resetRoundRobin) {
resetRoundRobin(preferredNodeIdPrefix);
}
}
/**
* Reset the {@link RoundRobin} to start with the node matching the {@code preferredNodeIdPrefix}.
*
* @param preferredNodeIdPrefix the id prefix of the preferred node
*/
private void resetRoundRobin(String preferredNodeIdPrefix) {
DisqueNode previous = null; // remember the previous node because the offset is a marker to start with the next
// element
for (DisqueNode disqueNode : getNodes()) {
if (disqueNode.getNodeId().startsWith(preferredNodeIdPrefix)) { | roundRobin = new RoundRobin<DisqueNode>(getNodes(), previous); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
| import biz.paluch.spinach.api.DisqueConnection;
import com.lambdaworks.redis.RedisFuture; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.async;
/**
* Asynchronous executed commands for Disque. This API is thread-safe.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
RedisFuture<String> auth(String password);
/**
* Close the connection. The connection will become not usable anymore as soon as this method was called.
*/
void close();
/**
*
* @return the underlying connection.
*/ | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
// Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
import biz.paluch.spinach.api.DisqueConnection;
import com.lambdaworks.redis.RedisFuture;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.async;
/**
* Asynchronous executed commands for Disque. This API is thread-safe.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
RedisFuture<String> auth(String password);
/**
* Close the connection. The connection will become not usable anymore as soon as this method was called.
*/
void close();
/**
*
* @return the underlying connection.
*/ | DisqueConnection<K, V> getConnection(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/Example.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.sync.DisqueCommands; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class Example {
public static void main(String[] args) {
String nodes = System.getenv("TYND_DISQUE_NODES");
String auth = System.getenv("TYND_DISQUE_AUTH");
DisqueClient disqueClient = new DisqueClient("disque://" + auth + "@" + nodes);
| // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/test/java/biz/paluch/spinach/Example.java
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.sync.DisqueCommands;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class Example {
public static void main(String[] args) {
String nodes = System.getenv("TYND_DISQUE_NODES");
String auth = System.getenv("TYND_DISQUE_AUTH");
DisqueClient disqueClient = new DisqueClient("disque://" + auth + "@" + nodes);
| DisqueCommands<String, String> connection = disqueClient.connect().sync(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/SyncAsyncApiConvergenceTest.java | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import static org.assertj.core.api.Assertions.*;
import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.RedisFuture; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
@RunWith(Parameterized.class)
public class SyncAsyncApiConvergenceTest {
private Method method;
@SuppressWarnings("rawtypes") | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/test/java/biz/paluch/spinach/SyncAsyncApiConvergenceTest.java
import static org.assertj.core.api.Assertions.*;
import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.RedisFuture;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
@RunWith(Parameterized.class)
public class SyncAsyncApiConvergenceTest {
private Method method;
@SuppressWarnings("rawtypes") | private Class<DisqueAsyncCommands> asyncClass = DisqueAsyncCommands.class; |
mp911de/spinach | src/test/java/biz/paluch/spinach/SyncAsyncApiConvergenceTest.java | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import static org.assertj.core.api.Assertions.*;
import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.RedisFuture; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
@RunWith(Parameterized.class)
public class SyncAsyncApiConvergenceTest {
private Method method;
@SuppressWarnings("rawtypes")
private Class<DisqueAsyncCommands> asyncClass = DisqueAsyncCommands.class;
@Parameterized.Parameters(name = "Method {0}/{1}")
public static List<Object[]> parameters() {
List<Object[]> result = new ArrayList<Object[]>(); | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/test/java/biz/paluch/spinach/SyncAsyncApiConvergenceTest.java
import static org.assertj.core.api.Assertions.*;
import java.lang.reflect.*;
import java.util.ArrayList;
import java.util.List;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.RedisFuture;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
@RunWith(Parameterized.class)
public class SyncAsyncApiConvergenceTest {
private Method method;
@SuppressWarnings("rawtypes")
private Class<DisqueAsyncCommands> asyncClass = DisqueAsyncCommands.class;
@Parameterized.Parameters(name = "Method {0}/{1}")
public static List<Object[]> parameters() {
List<Object[]> result = new ArrayList<Object[]>(); | Method[] methods = DisqueCommands.class.getMethods(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/commands/rx/RxSyncInvocationHandler.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import com.lambdaworks.redis.internal.AbstractInvocationHandler;
import com.lambdaworks.redis.internal.LettuceLists;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.sync.DisqueCommands;
import rx.Observable; | Object result = targetMethod.invoke(rxApi, args);
if (result == null || !(result instanceof Observable<?>)) {
return result;
}
Observable<?> observable = (Observable<?>) result;
Iterable<?> objects = observable.toBlocking().toIterable();
if (method.getReturnType().equals(List.class)) {
return LettuceLists.newList(objects);
}
if (method.getReturnType().equals(Set.class)) {
return new LinkedHashSet<>(LettuceLists.newList(objects));
}
Iterator<?> iterator = objects.iterator();
if (iterator.hasNext()) {
return iterator.next();
}
return null;
} catch (InvocationTargetException e) {
throw e.getTargetException();
}
}
| // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/test/java/biz/paluch/spinach/commands/rx/RxSyncInvocationHandler.java
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Set;
import com.lambdaworks.redis.internal.AbstractInvocationHandler;
import com.lambdaworks.redis.internal.LettuceLists;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.sync.DisqueCommands;
import rx.Observable;
Object result = targetMethod.invoke(rxApi, args);
if (result == null || !(result instanceof Observable<?>)) {
return result;
}
Observable<?> observable = (Observable<?>) result;
Iterable<?> objects = observable.toBlocking().toIterable();
if (method.getReturnType().equals(List.class)) {
return LettuceLists.newList(objects);
}
if (method.getReturnType().equals(Set.class)) {
return new LinkedHashSet<>(LettuceLists.newList(objects));
}
Iterator<?> iterator = objects.iterator();
if (iterator.hasNext()) {
return iterator.next();
}
return null;
} catch (InvocationTargetException e) {
throw e.getTargetException();
}
}
| public static <K, V> DisqueCommands<K, V> sync(DisqueConnection<K, V> connection) { |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClientMetricsTest.java | // Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static com.google.code.tempusfugit.temporal.Duration.seconds;
import static com.google.code.tempusfugit.temporal.Timeout.timeout;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.springframework.test.util.ReflectionTestUtils;
import com.google.code.tempusfugit.temporal.Condition;
import com.google.code.tempusfugit.temporal.WaitFor;
import com.lambdaworks.redis.event.DefaultEventPublisherOptions;
import com.lambdaworks.redis.event.Event;
import com.lambdaworks.redis.event.EventBus;
import com.lambdaworks.redis.event.metrics.CommandLatencyEvent;
import com.lambdaworks.redis.event.metrics.MetricEventPublisher;
import com.lambdaworks.redis.resource.ClientResources;
import com.lambdaworks.redis.resource.DefaultClientResources;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
import rx.Subscription;
import rx.functions.Func1;
import rx.observers.TestSubscriber; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClientMetricsTest {
private ClientResources clientResources;
private DisqueClient disqueClient; | // Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClientMetricsTest.java
import static com.google.code.tempusfugit.temporal.Duration.seconds;
import static com.google.code.tempusfugit.temporal.Timeout.timeout;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.springframework.test.util.ReflectionTestUtils;
import com.google.code.tempusfugit.temporal.Condition;
import com.google.code.tempusfugit.temporal.WaitFor;
import com.lambdaworks.redis.event.DefaultEventPublisherOptions;
import com.lambdaworks.redis.event.Event;
import com.lambdaworks.redis.event.EventBus;
import com.lambdaworks.redis.event.metrics.CommandLatencyEvent;
import com.lambdaworks.redis.event.metrics.MetricEventPublisher;
import com.lambdaworks.redis.resource.ClientResources;
import com.lambdaworks.redis.resource.DefaultClientResources;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
import rx.Subscription;
import rx.functions.Func1;
import rx.observers.TestSubscriber;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClientMetricsTest {
private ClientResources clientResources;
private DisqueClient disqueClient; | private DisqueCommands<String, String> disque; |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClientMetricsTest.java | // Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static com.google.code.tempusfugit.temporal.Duration.seconds;
import static com.google.code.tempusfugit.temporal.Timeout.timeout;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.springframework.test.util.ReflectionTestUtils;
import com.google.code.tempusfugit.temporal.Condition;
import com.google.code.tempusfugit.temporal.WaitFor;
import com.lambdaworks.redis.event.DefaultEventPublisherOptions;
import com.lambdaworks.redis.event.Event;
import com.lambdaworks.redis.event.EventBus;
import com.lambdaworks.redis.event.metrics.CommandLatencyEvent;
import com.lambdaworks.redis.event.metrics.MetricEventPublisher;
import com.lambdaworks.redis.resource.ClientResources;
import com.lambdaworks.redis.resource.DefaultClientResources;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
import rx.Subscription;
import rx.functions.Func1;
import rx.observers.TestSubscriber; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClientMetricsTest {
private ClientResources clientResources;
private DisqueClient disqueClient;
private DisqueCommands<String, String> disque;
@Before
public void before() throws Exception {
clientResources = new DefaultClientResources.Builder()
.commandLatencyPublisherOptions(DefaultEventPublisherOptions.create()).build();
disqueClient = DisqueClient.create(clientResources, DisqueURI.create(TestSettings.host(), TestSettings.port()));
disque = disqueClient.connect().sync();
}
@After
public void after() throws Exception {
disque.close();
| // Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClientMetricsTest.java
import static com.google.code.tempusfugit.temporal.Duration.seconds;
import static com.google.code.tempusfugit.temporal.Timeout.timeout;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.springframework.test.util.ReflectionTestUtils;
import com.google.code.tempusfugit.temporal.Condition;
import com.google.code.tempusfugit.temporal.WaitFor;
import com.lambdaworks.redis.event.DefaultEventPublisherOptions;
import com.lambdaworks.redis.event.Event;
import com.lambdaworks.redis.event.EventBus;
import com.lambdaworks.redis.event.metrics.CommandLatencyEvent;
import com.lambdaworks.redis.event.metrics.MetricEventPublisher;
import com.lambdaworks.redis.resource.ClientResources;
import com.lambdaworks.redis.resource.DefaultClientResources;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
import rx.Subscription;
import rx.functions.Func1;
import rx.observers.TestSubscriber;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClientMetricsTest {
private ClientResources clientResources;
private DisqueClient disqueClient;
private DisqueCommands<String, String> disque;
@Before
public void before() throws Exception {
clientResources = new DefaultClientResources.Builder()
.commandLatencyPublisherOptions(DefaultEventPublisherOptions.create()).build();
disqueClient = DisqueClient.create(clientResources, DisqueURI.create(TestSettings.host(), TestSettings.port()));
disque = disqueClient.connect().sync();
}
@After
public void after() throws Exception {
disque.close();
| FastShutdown.shutdown(disqueClient); |
mp911de/spinach | src/test/java/biz/paluch/spinach/SslTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks"; | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/SslTest.java
import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks"; | public static ClientResources clientResources = DefaultDisqueClient.getClientResources(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/SslTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception { | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/SslTest.java
import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception { | assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort())); |
mp911de/spinach | src/test/java/biz/paluch/spinach/SslTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception { | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/SslTest.java
import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception { | assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort())); |
mp911de/spinach | src/test/java/biz/paluch/spinach/SslTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception {
assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort()));
assertThat(new File(KEYSTORE)).exists();
System.setProperty("javax.net.ssl.trustStore", KEYSTORE);
}
@AfterClass
public static void afterClass() { | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/SslTest.java
import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception {
assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort()));
assertThat(new File(KEYSTORE)).exists();
System.setProperty("javax.net.ssl.trustStore", KEYSTORE);
}
@AfterClass
public static void afterClass() { | FastShutdown.shutdown(disqueClient); |
mp911de/spinach | src/test/java/biz/paluch/spinach/SslTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception {
assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort()));
assertThat(new File(KEYSTORE)).exists();
System.setProperty("javax.net.ssl.trustStore", KEYSTORE);
}
@AfterClass
public static void afterClass() {
FastShutdown.shutdown(disqueClient);
}
@Test
public void regularSsl() throws Exception {
DisqueURI disqueUri = DisqueURI.Builder.disque(host(), sslPort()).withSsl(true).withVerifyPeer(false).build();
| // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int sslPort() {
// return Integer.valueOf(System.getProperty("sslport", "7443"));
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/SslTest.java
import com.lambdaworks.redis.ClientOptions;
import com.lambdaworks.redis.JavaRuntime;
import com.lambdaworks.redis.RedisConnectionException;
import com.lambdaworks.redis.resource.ClientResources;
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.sslPort;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assume.assumeTrue;
import java.io.File;
import biz.paluch.spinach.support.DefaultDisqueClient;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Test;
import biz.paluch.spinach.api.sync.DisqueCommands;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class SslTest {
public static final String KEYSTORE = "work/keystore.jks";
public static ClientResources clientResources = DefaultDisqueClient.getClientResources();
public static DisqueClient disqueClient = DisqueClient.create(clientResources);
@Before
public void before() throws Exception {
assumeTrue("Assume that stunnel runs on port 7443", Sockets.isOpen(host(), sslPort()));
assertThat(new File(KEYSTORE)).exists();
System.setProperty("javax.net.ssl.trustStore", KEYSTORE);
}
@AfterClass
public static void afterClass() {
FastShutdown.shutdown(disqueClient);
}
@Test
public void regularSsl() throws Exception {
DisqueURI disqueUri = DisqueURI.Builder.disque(host(), sslPort()).withSsl(true).withVerifyPeer(false).build();
| DisqueCommands<String, String> connection = disqueClient.connect(disqueUri).sync(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/RoundRobinSocketAddressSupplierTest.java | // Path: src/main/java/biz/paluch/spinach/impl/RoundRobinSocketAddressSupplier.java
// public class RoundRobinSocketAddressSupplier implements SocketAddressSupplier {
//
// protected final Collection<? extends ConnectionPoint> connectionPoint;
// protected RoundRobin<? extends ConnectionPoint> roundRobin;
//
// /**
// *
// * @param connectionPoints the collection of {@link ConnectionPoint connection points}, must not be {@literal null}.
// */
// public RoundRobinSocketAddressSupplier(Collection<? extends ConnectionPoint> connectionPoints) {
// this(connectionPoints, null);
// }
//
// /**
// *
// * @param connectionPoints the collection of {@link ConnectionPoint connection points}, must not be {@literal null}.
// * @param offset {@link ConnectionPoint connection point} offset for starting the round robin cycle at that point, can be
// * {@literal null}.
// */
// public RoundRobinSocketAddressSupplier(Collection<? extends ConnectionPoint> connectionPoints, ConnectionPoint offset) {
// LettuceAssert.notNull(connectionPoints, "ConnectionPoints must not be null");
// this.connectionPoint = connectionPoints;
// this.roundRobin = new RoundRobin<ConnectionPoint>(connectionPoints, offset);
// }
//
// @Override
// public SocketAddress get() {
// ConnectionPoint connectionPoint = roundRobin.next();
// return getSocketAddress(connectionPoint);
// }
//
// protected static SocketAddress getSocketAddress(ConnectionPoint connectionPoint) {
//
// if (connectionPoint instanceof DisqueURI.DisqueSocket) {
// return ((DisqueURI.DisqueSocket) connectionPoint).getSocketAddress();
// }
// return InetSocketAddress.createUnresolved(connectionPoint.getHost(), connectionPoint.getPort());
// }
// }
| import static org.assertj.core.api.Assertions.assertThat;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
import org.junit.Test;
import biz.paluch.spinach.impl.RoundRobinSocketAddressSupplier; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class RoundRobinSocketAddressSupplierTest {
private static DisqueURI.DisqueHostAndPort hap1 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 1);
private static DisqueURI.DisqueHostAndPort hap2 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 2);
private static DisqueURI.DisqueHostAndPort hap3 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 3);
private Collection<DisqueURI.DisqueHostAndPort> points = Arrays.asList(hap1, hap2, hap3);
@Test
public void noOffset() throws Exception {
| // Path: src/main/java/biz/paluch/spinach/impl/RoundRobinSocketAddressSupplier.java
// public class RoundRobinSocketAddressSupplier implements SocketAddressSupplier {
//
// protected final Collection<? extends ConnectionPoint> connectionPoint;
// protected RoundRobin<? extends ConnectionPoint> roundRobin;
//
// /**
// *
// * @param connectionPoints the collection of {@link ConnectionPoint connection points}, must not be {@literal null}.
// */
// public RoundRobinSocketAddressSupplier(Collection<? extends ConnectionPoint> connectionPoints) {
// this(connectionPoints, null);
// }
//
// /**
// *
// * @param connectionPoints the collection of {@link ConnectionPoint connection points}, must not be {@literal null}.
// * @param offset {@link ConnectionPoint connection point} offset for starting the round robin cycle at that point, can be
// * {@literal null}.
// */
// public RoundRobinSocketAddressSupplier(Collection<? extends ConnectionPoint> connectionPoints, ConnectionPoint offset) {
// LettuceAssert.notNull(connectionPoints, "ConnectionPoints must not be null");
// this.connectionPoint = connectionPoints;
// this.roundRobin = new RoundRobin<ConnectionPoint>(connectionPoints, offset);
// }
//
// @Override
// public SocketAddress get() {
// ConnectionPoint connectionPoint = roundRobin.next();
// return getSocketAddress(connectionPoint);
// }
//
// protected static SocketAddress getSocketAddress(ConnectionPoint connectionPoint) {
//
// if (connectionPoint instanceof DisqueURI.DisqueSocket) {
// return ((DisqueURI.DisqueSocket) connectionPoint).getSocketAddress();
// }
// return InetSocketAddress.createUnresolved(connectionPoint.getHost(), connectionPoint.getPort());
// }
// }
// Path: src/test/java/biz/paluch/spinach/RoundRobinSocketAddressSupplierTest.java
import static org.assertj.core.api.Assertions.assertThat;
import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
import org.junit.Test;
import biz.paluch.spinach.impl.RoundRobinSocketAddressSupplier;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class RoundRobinSocketAddressSupplierTest {
private static DisqueURI.DisqueHostAndPort hap1 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 1);
private static DisqueURI.DisqueHostAndPort hap2 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 2);
private static DisqueURI.DisqueHostAndPort hap3 = new DisqueURI.DisqueHostAndPort("127.0.0.1", 3);
private Collection<DisqueURI.DisqueHostAndPort> points = Arrays.asList(hap1, hap2, hap3);
@Test
public void noOffset() throws Exception {
| RoundRobinSocketAddressSupplier sut = new RoundRobinSocketAddressSupplier(points, null); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/rx/DisqueServerReactiveCommands.java | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
| import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.KillArgs;
import rx.Observable; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.rx;
/**
* Reactive commands related with Disque Server Control.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueServerReactiveCommands<K, V> {
/**
* Asynchronously rewrite the append-only file.
*
* @return String simple-string-reply always {@code OK}.
*/
Observable<String> bgrewriteaof();
/**
* Get the current connection name.
*
* @return K bulk-string-reply The connection name, or a null bulk reply if no name is set.
*/
Observable<K> clientGetname();
/**
* Kill connections of clients which are filtered by {@code killArgs}
*
* @param killArgs args for the kill operation
* @return Long integer-reply number of killed connections
*/
Observable<Long> clientKill(KillArgs killArgs);
/**
* Kill the connection of a client identified by ip:port.
*
* @param addr ip:port
* @return String simple-string-reply {@code OK} if the connection exists and has been closed
*/
Observable<String> clientKill(String addr);
/**
* Get the list of client connections.
*
* @return String bulk-string-reply a unique string, formatted as follows: One client connection per line (separated by LF),
* each line is composed of a succession of property=value fields separated by a space character.
*/
Observable<String> clientList();
/**
* Stop processing commands from clients for some time.
*
* @param timeout the timeout value in milliseconds
* @return String simple-string-reply The command returns OK or an error if the timeout is invalid.
*/
Observable<String> clientPause(long timeout);
/**
* Set the current connection name.
*
* @param name the client name
* @return simple-string-reply {@code OK} if the connection name was successfully set.
*/
Observable<String> clientSetname(String name);
/**
* Returns an array reply of details about all Redis commands.
*
* @return List<Object> array-reply
*/
Observable<Object> command();
/**
* Get total number of Redis commands.
*
* @return Long integer-reply of number of total commands in this Redis server.
*/
Observable<Long> commandCount();
/**
* Returns an array reply of details about the requested commands.
*
* @param commands the commands to query for
* @return List<Object> array-reply
*/ | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
// Path: src/main/java/biz/paluch/spinach/api/rx/DisqueServerReactiveCommands.java
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.KillArgs;
import rx.Observable;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.rx;
/**
* Reactive commands related with Disque Server Control.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueServerReactiveCommands<K, V> {
/**
* Asynchronously rewrite the append-only file.
*
* @return String simple-string-reply always {@code OK}.
*/
Observable<String> bgrewriteaof();
/**
* Get the current connection name.
*
* @return K bulk-string-reply The connection name, or a null bulk reply if no name is set.
*/
Observable<K> clientGetname();
/**
* Kill connections of clients which are filtered by {@code killArgs}
*
* @param killArgs args for the kill operation
* @return Long integer-reply number of killed connections
*/
Observable<Long> clientKill(KillArgs killArgs);
/**
* Kill the connection of a client identified by ip:port.
*
* @param addr ip:port
* @return String simple-string-reply {@code OK} if the connection exists and has been closed
*/
Observable<String> clientKill(String addr);
/**
* Get the list of client connections.
*
* @return String bulk-string-reply a unique string, formatted as follows: One client connection per line (separated by LF),
* each line is composed of a succession of property=value fields separated by a space character.
*/
Observable<String> clientList();
/**
* Stop processing commands from clients for some time.
*
* @param timeout the timeout value in milliseconds
* @return String simple-string-reply The command returns OK or an error if the timeout is invalid.
*/
Observable<String> clientPause(long timeout);
/**
* Set the current connection name.
*
* @param name the client name
* @return simple-string-reply {@code OK} if the connection name was successfully set.
*/
Observable<String> clientSetname(String name);
/**
* Returns an array reply of details about all Redis commands.
*
* @return List<Object> array-reply
*/
Observable<Object> command();
/**
* Get total number of Redis commands.
*
* @return Long integer-reply of number of total commands in this Redis server.
*/
Observable<Long> commandCount();
/**
* Returns an array reply of details about the requested commands.
*
* @param commands the commands to query for
* @return List<Object> array-reply
*/ | Observable<Object> commandInfo(CommandType... commands); |
mp911de/spinach | src/test/java/biz/paluch/spinach/commands/ServerCommandTest.java | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
| import com.lambdaworks.redis.models.command.CommandDetailParser;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hamcrest.CoreMatchers.*;
import static org.junit.Assert.assertThat;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.junit.Ignore;
import org.junit.Test;
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.RedisCommandExecutionException;
import com.lambdaworks.redis.models.command.CommandDetail; |
@Test(expected = RedisCommandExecutionException.class)
public void clientKillUnknown() throws Exception {
disque.clientKill("afdsfads");
}
@Test
public void clientList() throws Exception {
assertThat(disque.clientList().contains("addr=")).isTrue();
}
@Test
public void commandCount() throws Exception {
assertThat(disque.commandCount()).isGreaterThan(20);
}
@Test
public void command() throws Exception {
List<Object> result = disque.command();
assertThat(result.size()).isGreaterThan(10);
List<CommandDetail> commands = CommandDetailParser.parse(result);
assertThat(commands).hasSameSizeAs(result);
}
@Test
public void commandInfo() throws Exception {
| // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
// Path: src/test/java/biz/paluch/spinach/commands/ServerCommandTest.java
import com.lambdaworks.redis.models.command.CommandDetailParser;
import static org.assertj.core.api.Assertions.assertThat;
import static org.hamcrest.CoreMatchers.*;
import static org.junit.Assert.assertThat;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.junit.Ignore;
import org.junit.Test;
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.RedisCommandExecutionException;
import com.lambdaworks.redis.models.command.CommandDetail;
@Test(expected = RedisCommandExecutionException.class)
public void clientKillUnknown() throws Exception {
disque.clientKill("afdsfads");
}
@Test
public void clientList() throws Exception {
assertThat(disque.clientList().contains("addr=")).isTrue();
}
@Test
public void commandCount() throws Exception {
assertThat(disque.commandCount()).isGreaterThan(20);
}
@Test
public void command() throws Exception {
List<Object> result = disque.command();
assertThat(result.size()).isGreaterThan(10);
List<CommandDetail> commands = CommandDetailParser.parse(result);
assertThat(commands).hasSameSizeAs(result);
}
@Test
public void commandInfo() throws Exception {
| List<Object> result = disque.commandInfo(CommandType.ACKJOB); |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClusterConnectionTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
| // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClusterConnectionTest.java
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
| DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClusterConnectionTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
| // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClusterConnectionTest.java
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
| DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build(); |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClusterConnectionTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build(); | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClusterConnectionTest.java
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build(); | disqueClient = DisqueClient.create(DefaultDisqueClient.getClientResources(), disqueURI); |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClusterConnectionTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build();
disqueClient = DisqueClient.create(DefaultDisqueClient.getClientResources(), disqueURI);
}
@AfterClass
public static void afterClass() { | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClusterConnectionTest.java
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build();
disqueClient = DisqueClient.create(DefaultDisqueClient.getClientResources(), disqueURI);
}
@AfterClass
public static void afterClass() { | FastShutdown.shutdown(disqueClient); |
mp911de/spinach | src/test/java/biz/paluch/spinach/ClusterConnectionTest.java | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
| import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build();
disqueClient = DisqueClient.create(DefaultDisqueClient.getClientResources(), disqueURI);
}
@AfterClass
public static void afterClass() {
FastShutdown.shutdown(disqueClient);
}
@Test
public void connect() throws Exception { | // Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static String host() {
// return System.getProperty("host", "localhost");
// }
//
// Path: src/test/java/biz/paluch/spinach/TestSettings.java
// public static int port() {
// return Integer.valueOf(System.getProperty("port", "7711"));
// }
//
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/test/java/biz/paluch/spinach/support/DefaultDisqueClient.java
// public class DefaultDisqueClient {
//
// public final static DefaultDisqueClient instance = new DefaultDisqueClient();
//
// private DisqueClient disqueClient;
// private ClientResources clientResources;
//
// public DefaultDisqueClient() {
// clientResources = TestClientResources.create();
// disqueClient = DisqueClient.create(clientResources, DisqueURI.Builder.disque(TestSettings.host(), TestSettings.port())
// .build());
// Runtime.getRuntime().addShutdownHook(new Thread() {
// @Override
// public void run() {
// FastShutdown.shutdown(disqueClient);
// }
// });
// }
//
// /**
// * Do not close the client.
// *
// * @return the default disque client for the tests.
// */
// public static DisqueClient get() {
// instance.disqueClient.setDefaultTimeout(60, TimeUnit.SECONDS);
// return instance.disqueClient;
// }
//
// /**
// * Do not close the client resources.
// * @return the default client resources for the tests.
// */
// public static ClientResources getClientResources() {
// return instance.clientResources;
// }
// }
//
// Path: src/test/java/biz/paluch/spinach/support/FastShutdown.java
// public class FastShutdown {
//
// /**
// * Shut down a {@link AbstractRedisClient} with a timeout of 10ms.
// *
// * @param redisClient
// */
// public static void shutdown(AbstractRedisClient redisClient) {
// redisClient.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
//
// /**
// * Shut down a {@link ClientResources} client with a timeout of 10ms.
// *
// * @param clientResources
// */
// public static void shutdown(ClientResources clientResources) {
// clientResources.shutdown(10, 10, TimeUnit.MILLISECONDS);
// }
// }
// Path: src/test/java/biz/paluch/spinach/ClusterConnectionTest.java
import static biz.paluch.spinach.TestSettings.host;
import static biz.paluch.spinach.TestSettings.port;
import static org.assertj.core.api.Assertions.assertThat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.support.DefaultDisqueClient;
import biz.paluch.spinach.support.FastShutdown;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach;
/**
* @author Mark Paluch
*/
public class ClusterConnectionTest {
private static DisqueClient disqueClient;
@BeforeClass
public static void beforeClass() {
DisqueURI disqueURI = new DisqueURI.Builder().withDisque(host(), port()).withDisque(host(), port(1)).build();
disqueClient = DisqueClient.create(DefaultDisqueClient.getClientResources(), disqueURI);
}
@AfterClass
public static void afterClass() {
FastShutdown.shutdown(disqueClient);
}
@Test
public void connect() throws Exception { | DisqueConnection<String, String> connection = disqueClient.connect(); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/DisqueConnection.java | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java
// public interface DisqueReactiveCommands<K, V> extends DisqueJobReactiveCommands<K, V>, DisqueQueueReactiveCommands<K, V>,
// DisqueServerReactiveCommands<K, V>, DisqueClusterReactiveCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// Observable<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// Observable<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// Observable<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.rx.DisqueReactiveCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.api.StatefulConnection; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api;
/**
* A thread-safe connection to a redis server. Multiple threads may share one {@link DisqueConnection}.
*
* A {@link com.lambdaworks.redis.protocol.ConnectionWatchdog} monitors each connection and reconnects automatically until
* {@link #close} is called. All pending commands will be (re)sent after successful reconnection.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
/**
* Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
*
* @return the synchronous API for the underlying connection.
*/
DisqueCommands<K, V> sync();
/**
* Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
*
* @return the asynchronous API for the underlying connection.
*/ | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java
// public interface DisqueReactiveCommands<K, V> extends DisqueJobReactiveCommands<K, V>, DisqueQueueReactiveCommands<K, V>,
// DisqueServerReactiveCommands<K, V>, DisqueClusterReactiveCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// Observable<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// Observable<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// Observable<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.rx.DisqueReactiveCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.api.StatefulConnection;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api;
/**
* A thread-safe connection to a redis server. Multiple threads may share one {@link DisqueConnection}.
*
* A {@link com.lambdaworks.redis.protocol.ConnectionWatchdog} monitors each connection and reconnects automatically until
* {@link #close} is called. All pending commands will be (re)sent after successful reconnection.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
/**
* Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
*
* @return the synchronous API for the underlying connection.
*/
DisqueCommands<K, V> sync();
/**
* Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
*
* @return the asynchronous API for the underlying connection.
*/ | DisqueAsyncCommands<K, V> async(); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/DisqueConnection.java | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java
// public interface DisqueReactiveCommands<K, V> extends DisqueJobReactiveCommands<K, V>, DisqueQueueReactiveCommands<K, V>,
// DisqueServerReactiveCommands<K, V>, DisqueClusterReactiveCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// Observable<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// Observable<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// Observable<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
| import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.rx.DisqueReactiveCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.api.StatefulConnection; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api;
/**
* A thread-safe connection to a redis server. Multiple threads may share one {@link DisqueConnection}.
*
* A {@link com.lambdaworks.redis.protocol.ConnectionWatchdog} monitors each connection and reconnects automatically until
* {@link #close} is called. All pending commands will be (re)sent after successful reconnection.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
/**
* Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
*
* @return the synchronous API for the underlying connection.
*/
DisqueCommands<K, V> sync();
/**
* Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
*
* @return the asynchronous API for the underlying connection.
*/
DisqueAsyncCommands<K, V> async();
/**
* Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
*
* @return the reactive API for the underlying connection.
*/ | // Path: src/main/java/biz/paluch/spinach/api/async/DisqueAsyncCommands.java
// public interface DisqueAsyncCommands<K, V> extends DisqueJobAsyncCommands<K, V>, DisqueQueueAsyncCommands<K, V>,
// DisqueServerAsyncCommands<K, V>, DisqueClusterAsyncCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// RedisFuture<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// RedisFuture<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// RedisFuture<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java
// public interface DisqueReactiveCommands<K, V> extends DisqueJobReactiveCommands<K, V>, DisqueQueueReactiveCommands<K, V>,
// DisqueServerReactiveCommands<K, V>, DisqueClusterReactiveCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// Observable<String> auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// Observable<String> ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// Observable<String> quit();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueCommands.java
// public interface DisqueCommands<K, V> extends DisqueJobCommands<K, V>, DisqueQueueCommands<K, V>, DisqueServerCommands<K, V>,
// DisqueClusterCommands<K, V> {
//
// /**
// * Authenticate to the server.
// *
// * @param password the password
// * @return String simple-string-reply
// */
// String auth(String password);
//
// /**
// * Close the connection. The connection will become not usable anymore as soon as this method was called.
// */
// void close();
//
// /**
// *
// * @return the underlying connection.
// */
// DisqueConnection<K, V> getConnection();
//
// /**
// *
// * @return true if the connection is open (connected and not closed).
// */
// boolean isOpen();
//
// /**
// * Ping the server.
// *
// * @return simple-string-reply
// */
// String ping();
//
// /**
// * Close the connection.
// *
// * @return String simple-string-reply always OK.
// */
// String quit();
//
// /**
// * Set the default timeout for operations.
// *
// * @param timeout the timeout value
// * @param unit the unit of the timeout value
// */
// void setTimeout(long timeout, TimeUnit unit);
// }
// Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
import biz.paluch.spinach.api.async.DisqueAsyncCommands;
import biz.paluch.spinach.api.rx.DisqueReactiveCommands;
import biz.paluch.spinach.api.sync.DisqueCommands;
import com.lambdaworks.redis.api.StatefulConnection;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api;
/**
* A thread-safe connection to a redis server. Multiple threads may share one {@link DisqueConnection}.
*
* A {@link com.lambdaworks.redis.protocol.ConnectionWatchdog} monitors each connection and reconnects automatically until
* {@link #close} is called. All pending commands will be (re)sent after successful reconnection.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
/**
* Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
*
* @return the synchronous API for the underlying connection.
*/
DisqueCommands<K, V> sync();
/**
* Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
*
* @return the asynchronous API for the underlying connection.
*/
DisqueAsyncCommands<K, V> async();
/**
* Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
*
* @return the reactive API for the underlying connection.
*/ | DisqueReactiveCommands<K, V> reactive(); |
mp911de/spinach | src/main/java/biz/paluch/spinach/DisqueClient.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
| import com.lambdaworks.redis.resource.ClientResources;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.impl.*;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import com.lambdaworks.redis.*;
import com.lambdaworks.redis.codec.RedisCodec;
import com.lambdaworks.redis.codec.Utf8StringCodec;
import com.lambdaworks.redis.internal.LettuceAssert;
import com.lambdaworks.redis.protocol.CommandHandler;
import com.lambdaworks.redis.protocol.RedisCommand; | * @return a new instance of {@link DisqueClient}
*/
public static DisqueClient create(ClientResources clientResources, String uri) {
assertNotNull(clientResources);
LettuceAssert.notNull(uri, "uri must not be null");
return create(clientResources, DisqueURI.create(uri));
}
/**
* Create a new client that connects to the supplied {@link RedisURI uri} with shared {@link ClientResources}. You need to
* shut down the {@link ClientResources} upon shutting down your application.You can connect to different Redis servers but
* you must supply a {@link RedisURI} on connecting.
*
* @param clientResources the client resources, must not be {@literal null}
* @param disqueURI the Redis URI, must not be {@literal null}
* @return a new instance of {@link DisqueClient}
*/
public static DisqueClient create(ClientResources clientResources, DisqueURI disqueURI) {
assertNotNull(clientResources);
assertNotNull(disqueURI);
return new DisqueClient(clientResources, disqueURI);
}
/**
* Open a new connection to a Disque server that treats keys and values as UTF-8 strings. This method requires to have the
* {@link DisqueURI} specified when constructing the client. Command timeouts are applied from the default
* {@link #setDefaultTimeout(long, TimeUnit)} settings.
*
* @return A new connection.
*/ | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
// Path: src/main/java/biz/paluch/spinach/DisqueClient.java
import com.lambdaworks.redis.resource.ClientResources;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.impl.*;
import java.net.ConnectException;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import com.lambdaworks.redis.*;
import com.lambdaworks.redis.codec.RedisCodec;
import com.lambdaworks.redis.codec.Utf8StringCodec;
import com.lambdaworks.redis.internal.LettuceAssert;
import com.lambdaworks.redis.protocol.CommandHandler;
import com.lambdaworks.redis.protocol.RedisCommand;
* @return a new instance of {@link DisqueClient}
*/
public static DisqueClient create(ClientResources clientResources, String uri) {
assertNotNull(clientResources);
LettuceAssert.notNull(uri, "uri must not be null");
return create(clientResources, DisqueURI.create(uri));
}
/**
* Create a new client that connects to the supplied {@link RedisURI uri} with shared {@link ClientResources}. You need to
* shut down the {@link ClientResources} upon shutting down your application.You can connect to different Redis servers but
* you must supply a {@link RedisURI} on connecting.
*
* @param clientResources the client resources, must not be {@literal null}
* @param disqueURI the Redis URI, must not be {@literal null}
* @return a new instance of {@link DisqueClient}
*/
public static DisqueClient create(ClientResources clientResources, DisqueURI disqueURI) {
assertNotNull(clientResources);
assertNotNull(disqueURI);
return new DisqueClient(clientResources, disqueURI);
}
/**
* Open a new connection to a Disque server that treats keys and values as UTF-8 strings. This method requires to have the
* {@link DisqueURI} specified when constructing the client. Command timeouts are applied from the default
* {@link #setDefaultTimeout(long, TimeUnit)} settings.
*
* @return A new connection.
*/ | public DisqueConnection<String, String> connect() { |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
| import rx.Observable;
import biz.paluch.spinach.api.DisqueConnection; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.rx;
/**
* Reactive commands for Disque. This API is thread-safe.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueReactiveCommands<K, V> extends DisqueJobReactiveCommands<K, V>, DisqueQueueReactiveCommands<K, V>,
DisqueServerReactiveCommands<K, V>, DisqueClusterReactiveCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
Observable<String> auth(String password);
/**
* Close the connection. The connection will become not usable anymore as soon as this method was called.
*/
void close();
/**
*
* @return the underlying connection.
*/ | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
// Path: src/main/java/biz/paluch/spinach/api/rx/DisqueReactiveCommands.java
import rx.Observable;
import biz.paluch.spinach.api.DisqueConnection;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.rx;
/**
* Reactive commands for Disque. This API is thread-safe.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueReactiveCommands<K, V> extends DisqueJobReactiveCommands<K, V>, DisqueQueueReactiveCommands<K, V>,
DisqueServerReactiveCommands<K, V>, DisqueClusterReactiveCommands<K, V> {
/**
* Authenticate to the server.
*
* @param password the password
* @return String simple-string-reply
*/
Observable<String> auth(String password);
/**
* Close the connection. The connection will become not usable anymore as soon as this method was called.
*/
void close();
/**
*
* @return the underlying connection.
*/ | DisqueConnection<K, V> getConnection(); |
mp911de/spinach | src/main/java/biz/paluch/spinach/api/sync/DisqueServerCommands.java | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
| import java.util.List;
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.KillArgs; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.sync;
/**
* Synchronous executed commands related with Disque Server Control.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueServerCommands<K, V> {
/**
* Asynchronously rewrite the append-only file.
*
* @return String simple-string-reply always {@code OK}.
*/
String bgrewriteaof();
/**
* Get the current connection name.
*
* @return K bulk-string-reply The connection name, or a null bulk reply if no name is set.
*/
K clientGetname();
/**
* Kill connections of clients which are filtered by {@code killArgs}
*
* @param killArgs args for the kill operation
* @return Long integer-reply number of killed connections
*/
Long clientKill(KillArgs killArgs);
/**
* Kill the connection of a client identified by ip:port.
*
* @param addr ip:port
* @return String simple-string-reply {@code OK} if the connection exists and has been closed
*/
String clientKill(String addr);
/**
* Get the list of client connections.
*
* @return String bulk-string-reply a unique string, formatted as follows: One client connection per line (separated by LF),
* each line is composed of a succession of property=value fields separated by a space character.
*/
String clientList();
/**
* Stop processing commands from clients for some time.
*
* @param timeout the timeout value in milliseconds
* @return String simple-string-reply The command returns OK or an error if the timeout is invalid.
*/
String clientPause(long timeout);
/**
* Set the current connection name.
*
* @param name the client name
* @return simple-string-reply {@code OK} if the connection name was successfully set.
*/
String clientSetname(String name);
/**
* Returns an array reply of details about all Redis commands.
*
* @return List<Object> array-reply
*/
List<Object> command();
/**
* Get total number of Redis commands.
*
* @return Long integer-reply of number of total commands in this Redis server.
*/
Long commandCount();
/**
* Returns an array reply of details about the requested commands.
*
* @param commands the commands to query for
* @return List<Object> array-reply
*/ | // Path: src/main/java/biz/paluch/spinach/api/CommandType.java
// public enum CommandType implements ProtocolKeyword {
// // Jobs
// ADDJOB, ACKJOB, DELJOB, FASTACK, GETJOB, JSCAN, SHOW,
//
// // Queues
// ENQUEUE, DEQUEUE, NACK, PAUSE, QLEN, QPEEK, QSCAN, QSTAT, WORKING,
//
// // AOF
// BGREWRITEAOF,
//
// // Server commands
// AUTH, CONFIG, CLUSTER, CLIENT, COMMAND, DEBUG, INFO, /* LATENCY, */HELLO, PING, QUIT, SHUTDOWN, SLOWLOG, TIME;
//
// public final byte[] bytes;
//
// CommandType() {
// bytes = name().getBytes(LettuceCharsets.ASCII);
// }
//
// @Override
// public byte[] getBytes() {
// return bytes;
// }
//
// }
// Path: src/main/java/biz/paluch/spinach/api/sync/DisqueServerCommands.java
import java.util.List;
import biz.paluch.spinach.api.CommandType;
import com.lambdaworks.redis.KillArgs;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.api.sync;
/**
* Synchronous executed commands related with Disque Server Control.
*
* @param <K> Key type.
* @param <V> Value type.
* @author Mark Paluch
*/
public interface DisqueServerCommands<K, V> {
/**
* Asynchronously rewrite the append-only file.
*
* @return String simple-string-reply always {@code OK}.
*/
String bgrewriteaof();
/**
* Get the current connection name.
*
* @return K bulk-string-reply The connection name, or a null bulk reply if no name is set.
*/
K clientGetname();
/**
* Kill connections of clients which are filtered by {@code killArgs}
*
* @param killArgs args for the kill operation
* @return Long integer-reply number of killed connections
*/
Long clientKill(KillArgs killArgs);
/**
* Kill the connection of a client identified by ip:port.
*
* @param addr ip:port
* @return String simple-string-reply {@code OK} if the connection exists and has been closed
*/
String clientKill(String addr);
/**
* Get the list of client connections.
*
* @return String bulk-string-reply a unique string, formatted as follows: One client connection per line (separated by LF),
* each line is composed of a succession of property=value fields separated by a space character.
*/
String clientList();
/**
* Stop processing commands from clients for some time.
*
* @param timeout the timeout value in milliseconds
* @return String simple-string-reply The command returns OK or an error if the timeout is invalid.
*/
String clientPause(long timeout);
/**
* Set the current connection name.
*
* @param name the client name
* @return simple-string-reply {@code OK} if the connection name was successfully set.
*/
String clientSetname(String name);
/**
* Returns an array reply of details about all Redis commands.
*
* @return List<Object> array-reply
*/
List<Object> command();
/**
* Get total number of Redis commands.
*
* @return Long integer-reply of number of total commands in this Redis server.
*/
Long commandCount();
/**
* Returns an array reply of details about the requested commands.
*
* @param commands the commands to query for
* @return List<Object> array-reply
*/ | List<Object> commandInfo(CommandType... commands); |
mp911de/spinach | src/main/java/biz/paluch/spinach/cluster/QueueListener.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/Job.java
// public class Job<K, V> {
// private K queue;
// private String id;
// private V body;
// private Map<String, Long> counters;
//
// protected Job() {
// }
//
// public Job(K queue, String id, V body, Map<String, Long> counters) {
// this.queue = queue;
// this.id = id;
// this.body = body;
// this.counters = counters;
// }
//
// /**
// *
// * @return the queue
// */
// public K getQueue() {
// return queue;
// }
//
// /**
// *
// * @return the JobId
// */
// public String getId() {
// return id;
// }
//
// /**
// *
// * @return the Job body
// */
// public V getBody() {
// return body;
// }
//
// /**
// * If requested with a WITHCOUNTERS flag, getjob also populates a counters field.
// *
// * @return map of counters
// */
// public Map<String, Long> getCounters() { return counters; }
// }
| import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.Job;
import io.netty.util.internal.ConcurrentSet;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0; |
if (reconnectTrigger != null) {
reconnectTrigger.unsubscribe();
reconnectTrigger = null;
}
}
/**
* Enable job locality tracking.
*/
void withJobLocalityTracking() {
this.jobLocalityTracking = true;
}
void withNodeSwitching(long nodeReconnectCheckInterval, TimeUnit nodeReconnectCheckTimeUnit) {
this.improveLocalityInterval = nodeReconnectCheckInterval;
this.improveLocalityTimeUnit = nodeReconnectCheckTimeUnit;
}
/**
* Initiate the switch nodes check.
*/
void switchNodes() {
for (GetJobsAction action : actions) {
action.switchNodes();
}
}
static class LocalityAwareConnection<K, V> {
private final NodeIdAwareSocketAddressSupplier socketAddressSupplier; | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/api/Job.java
// public class Job<K, V> {
// private K queue;
// private String id;
// private V body;
// private Map<String, Long> counters;
//
// protected Job() {
// }
//
// public Job(K queue, String id, V body, Map<String, Long> counters) {
// this.queue = queue;
// this.id = id;
// this.body = body;
// this.counters = counters;
// }
//
// /**
// *
// * @return the queue
// */
// public K getQueue() {
// return queue;
// }
//
// /**
// *
// * @return the JobId
// */
// public String getId() {
// return id;
// }
//
// /**
// *
// * @return the Job body
// */
// public V getBody() {
// return body;
// }
//
// /**
// * If requested with a WITHCOUNTERS flag, getjob also populates a counters field.
// *
// * @return map of counters
// */
// public Map<String, Long> getCounters() { return counters; }
// }
// Path: src/main/java/biz/paluch/spinach/cluster/QueueListener.java
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Supplier;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.api.Job;
import io.netty.util.internal.ConcurrentSet;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;
import rx.Observable;
import rx.Scheduler;
import rx.Subscriber;
import rx.Subscription;
import rx.functions.Action0;
if (reconnectTrigger != null) {
reconnectTrigger.unsubscribe();
reconnectTrigger = null;
}
}
/**
* Enable job locality tracking.
*/
void withJobLocalityTracking() {
this.jobLocalityTracking = true;
}
void withNodeSwitching(long nodeReconnectCheckInterval, TimeUnit nodeReconnectCheckTimeUnit) {
this.improveLocalityInterval = nodeReconnectCheckInterval;
this.improveLocalityTimeUnit = nodeReconnectCheckTimeUnit;
}
/**
* Initiate the switch nodes check.
*/
void switchNodes() {
for (GetJobsAction action : actions) {
action.switchNodes();
}
}
static class LocalityAwareConnection<K, V> {
private final NodeIdAwareSocketAddressSupplier socketAddressSupplier; | private final DisqueConnection<K, V> connection; |
mp911de/spinach | src/main/java/biz/paluch/spinach/impl/ClusterAwareNodeSupport.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/cluster/DisqueNode.java
// @SuppressWarnings("serial")
// public class DisqueNode implements Serializable {
// private String addr;
// private int port;
// private String nodeId;
//
// private boolean connected;
// private long pingSentTimestamp;
// private long pongReceivedTimestamp;
//
// private Set<NodeFlag> flags;
//
// public DisqueNode() {
//
// }
//
// public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp,
// long pongReceivedTimestamp, Set<NodeFlag> flags) {
// this.addr = addr;
// this.port = port;
// this.nodeId = nodeId;
// this.connected = connected;
// this.pingSentTimestamp = pingSentTimestamp;
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// this.flags = flags;
// }
//
// public String getAddr() {
// return addr;
// }
//
// public void setAddr(String addr) {
// this.addr = addr;
// }
//
// public int getPort() {
// return port;
// }
//
// public void setPort(int port) {
// this.port = port;
// }
//
// public String getNodeId() {
// return nodeId;
// }
//
// public void setNodeId(String nodeId) {
// LettuceAssert.notNull(nodeId, "nodeId must not be null");
// this.nodeId = nodeId;
// }
//
// public boolean isConnected() {
// return connected;
// }
//
// public void setConnected(boolean connected) {
// this.connected = connected;
// }
//
// public long getPingSentTimestamp() {
// return pingSentTimestamp;
// }
//
// public void setPingSentTimestamp(long pingSentTimestamp) {
// this.pingSentTimestamp = pingSentTimestamp;
// }
//
// public long getPongReceivedTimestamp() {
// return pongReceivedTimestamp;
// }
//
// public void setPongReceivedTimestamp(long pongReceivedTimestamp) {
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// }
//
// public Set<NodeFlag> getFlags() {
// return flags;
// }
//
// public void setFlags(Set<NodeFlag> flags) {
// this.flags = flags;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) {
// return true;
// }
// if (!(o instanceof DisqueNode)) {
// return false;
// }
//
// DisqueNode that = (DisqueNode) o;
//
// if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) {
// return false;
// }
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = 31 * (nodeId != null ? nodeId.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// final StringBuilder sb = new StringBuilder();
// sb.append(getClass().getSimpleName());
// sb.append(" [addr=").append(addr);
// sb.append(", port='").append(port).append('\'');
// sb.append(", nodeId='").append(nodeId).append('\'');
// sb.append(", connected=").append(connected);
// sb.append(", pingSentTimestamp=").append(pingSentTimestamp);
// sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp);
// sb.append(", flags=").append(flags);
// sb.append(']');
// return sb.toString();
// }
//
// public enum NodeFlag {
// NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR;
// }
// }
| import java.io.Serializable;
import java.util.*;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.cluster.DisqueNode; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* Convenient base class for classes that rely on the cluster topology of Disque. Typically subclassed by
* {@link SocketAddressSupplier SocketAddressSuppliers}.
*
* @author Mark Paluch
*/
public abstract class ClusterAwareNodeSupport {
public final static int MAX_ALLOWED_PRIORITY = 99;
| // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/cluster/DisqueNode.java
// @SuppressWarnings("serial")
// public class DisqueNode implements Serializable {
// private String addr;
// private int port;
// private String nodeId;
//
// private boolean connected;
// private long pingSentTimestamp;
// private long pongReceivedTimestamp;
//
// private Set<NodeFlag> flags;
//
// public DisqueNode() {
//
// }
//
// public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp,
// long pongReceivedTimestamp, Set<NodeFlag> flags) {
// this.addr = addr;
// this.port = port;
// this.nodeId = nodeId;
// this.connected = connected;
// this.pingSentTimestamp = pingSentTimestamp;
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// this.flags = flags;
// }
//
// public String getAddr() {
// return addr;
// }
//
// public void setAddr(String addr) {
// this.addr = addr;
// }
//
// public int getPort() {
// return port;
// }
//
// public void setPort(int port) {
// this.port = port;
// }
//
// public String getNodeId() {
// return nodeId;
// }
//
// public void setNodeId(String nodeId) {
// LettuceAssert.notNull(nodeId, "nodeId must not be null");
// this.nodeId = nodeId;
// }
//
// public boolean isConnected() {
// return connected;
// }
//
// public void setConnected(boolean connected) {
// this.connected = connected;
// }
//
// public long getPingSentTimestamp() {
// return pingSentTimestamp;
// }
//
// public void setPingSentTimestamp(long pingSentTimestamp) {
// this.pingSentTimestamp = pingSentTimestamp;
// }
//
// public long getPongReceivedTimestamp() {
// return pongReceivedTimestamp;
// }
//
// public void setPongReceivedTimestamp(long pongReceivedTimestamp) {
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// }
//
// public Set<NodeFlag> getFlags() {
// return flags;
// }
//
// public void setFlags(Set<NodeFlag> flags) {
// this.flags = flags;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) {
// return true;
// }
// if (!(o instanceof DisqueNode)) {
// return false;
// }
//
// DisqueNode that = (DisqueNode) o;
//
// if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) {
// return false;
// }
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = 31 * (nodeId != null ? nodeId.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// final StringBuilder sb = new StringBuilder();
// sb.append(getClass().getSimpleName());
// sb.append(" [addr=").append(addr);
// sb.append(", port='").append(port).append('\'');
// sb.append(", nodeId='").append(nodeId).append('\'');
// sb.append(", connected=").append(connected);
// sb.append(", pingSentTimestamp=").append(pingSentTimestamp);
// sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp);
// sb.append(", flags=").append(flags);
// sb.append(']');
// return sb.toString();
// }
//
// public enum NodeFlag {
// NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR;
// }
// }
// Path: src/main/java/biz/paluch/spinach/impl/ClusterAwareNodeSupport.java
import java.io.Serializable;
import java.util.*;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.cluster.DisqueNode;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* Convenient base class for classes that rely on the cluster topology of Disque. Typically subclassed by
* {@link SocketAddressSupplier SocketAddressSuppliers}.
*
* @author Mark Paluch
*/
public abstract class ClusterAwareNodeSupport {
public final static int MAX_ALLOWED_PRIORITY = 99;
| private DisqueConnection<Object, Object> disqueConnection; |
mp911de/spinach | src/main/java/biz/paluch/spinach/impl/ClusterAwareNodeSupport.java | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/cluster/DisqueNode.java
// @SuppressWarnings("serial")
// public class DisqueNode implements Serializable {
// private String addr;
// private int port;
// private String nodeId;
//
// private boolean connected;
// private long pingSentTimestamp;
// private long pongReceivedTimestamp;
//
// private Set<NodeFlag> flags;
//
// public DisqueNode() {
//
// }
//
// public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp,
// long pongReceivedTimestamp, Set<NodeFlag> flags) {
// this.addr = addr;
// this.port = port;
// this.nodeId = nodeId;
// this.connected = connected;
// this.pingSentTimestamp = pingSentTimestamp;
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// this.flags = flags;
// }
//
// public String getAddr() {
// return addr;
// }
//
// public void setAddr(String addr) {
// this.addr = addr;
// }
//
// public int getPort() {
// return port;
// }
//
// public void setPort(int port) {
// this.port = port;
// }
//
// public String getNodeId() {
// return nodeId;
// }
//
// public void setNodeId(String nodeId) {
// LettuceAssert.notNull(nodeId, "nodeId must not be null");
// this.nodeId = nodeId;
// }
//
// public boolean isConnected() {
// return connected;
// }
//
// public void setConnected(boolean connected) {
// this.connected = connected;
// }
//
// public long getPingSentTimestamp() {
// return pingSentTimestamp;
// }
//
// public void setPingSentTimestamp(long pingSentTimestamp) {
// this.pingSentTimestamp = pingSentTimestamp;
// }
//
// public long getPongReceivedTimestamp() {
// return pongReceivedTimestamp;
// }
//
// public void setPongReceivedTimestamp(long pongReceivedTimestamp) {
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// }
//
// public Set<NodeFlag> getFlags() {
// return flags;
// }
//
// public void setFlags(Set<NodeFlag> flags) {
// this.flags = flags;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) {
// return true;
// }
// if (!(o instanceof DisqueNode)) {
// return false;
// }
//
// DisqueNode that = (DisqueNode) o;
//
// if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) {
// return false;
// }
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = 31 * (nodeId != null ? nodeId.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// final StringBuilder sb = new StringBuilder();
// sb.append(getClass().getSimpleName());
// sb.append(" [addr=").append(addr);
// sb.append(", port='").append(port).append('\'');
// sb.append(", nodeId='").append(nodeId).append('\'');
// sb.append(", connected=").append(connected);
// sb.append(", pingSentTimestamp=").append(pingSentTimestamp);
// sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp);
// sb.append(", flags=").append(flags);
// sb.append(']');
// return sb.toString();
// }
//
// public enum NodeFlag {
// NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR;
// }
// }
| import java.io.Serializable;
import java.util.*;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.cluster.DisqueNode; | /*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* Convenient base class for classes that rely on the cluster topology of Disque. Typically subclassed by
* {@link SocketAddressSupplier SocketAddressSuppliers}.
*
* @author Mark Paluch
*/
public abstract class ClusterAwareNodeSupport {
public final static int MAX_ALLOWED_PRIORITY = 99;
private DisqueConnection<Object, Object> disqueConnection; | // Path: src/main/java/biz/paluch/spinach/api/DisqueConnection.java
// public interface DisqueConnection<K, V> extends StatefulConnection<K, V> {
//
// /**
// * Returns the {@link DisqueCommands} API for the current connection. Does not create a new connection.
// *
// * @return the synchronous API for the underlying connection.
// */
// DisqueCommands<K, V> sync();
//
// /**
// * Returns the {@link DisqueAsyncCommands} API for the current connection. Does not create a new connection.
// *
// * @return the asynchronous API for the underlying connection.
// */
// DisqueAsyncCommands<K, V> async();
//
// /**
// * Returns the {@link DisqueReactiveCommands} API for the current connection. Does not create a new connection.
// *
// * @return the reactive API for the underlying connection.
// */
// DisqueReactiveCommands<K, V> reactive();
//
// }
//
// Path: src/main/java/biz/paluch/spinach/cluster/DisqueNode.java
// @SuppressWarnings("serial")
// public class DisqueNode implements Serializable {
// private String addr;
// private int port;
// private String nodeId;
//
// private boolean connected;
// private long pingSentTimestamp;
// private long pongReceivedTimestamp;
//
// private Set<NodeFlag> flags;
//
// public DisqueNode() {
//
// }
//
// public DisqueNode(String addr, int port, String nodeId, boolean connected, long pingSentTimestamp,
// long pongReceivedTimestamp, Set<NodeFlag> flags) {
// this.addr = addr;
// this.port = port;
// this.nodeId = nodeId;
// this.connected = connected;
// this.pingSentTimestamp = pingSentTimestamp;
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// this.flags = flags;
// }
//
// public String getAddr() {
// return addr;
// }
//
// public void setAddr(String addr) {
// this.addr = addr;
// }
//
// public int getPort() {
// return port;
// }
//
// public void setPort(int port) {
// this.port = port;
// }
//
// public String getNodeId() {
// return nodeId;
// }
//
// public void setNodeId(String nodeId) {
// LettuceAssert.notNull(nodeId, "nodeId must not be null");
// this.nodeId = nodeId;
// }
//
// public boolean isConnected() {
// return connected;
// }
//
// public void setConnected(boolean connected) {
// this.connected = connected;
// }
//
// public long getPingSentTimestamp() {
// return pingSentTimestamp;
// }
//
// public void setPingSentTimestamp(long pingSentTimestamp) {
// this.pingSentTimestamp = pingSentTimestamp;
// }
//
// public long getPongReceivedTimestamp() {
// return pongReceivedTimestamp;
// }
//
// public void setPongReceivedTimestamp(long pongReceivedTimestamp) {
// this.pongReceivedTimestamp = pongReceivedTimestamp;
// }
//
// public Set<NodeFlag> getFlags() {
// return flags;
// }
//
// public void setFlags(Set<NodeFlag> flags) {
// this.flags = flags;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) {
// return true;
// }
// if (!(o instanceof DisqueNode)) {
// return false;
// }
//
// DisqueNode that = (DisqueNode) o;
//
// if (nodeId != null ? !nodeId.equals(that.nodeId) : that.nodeId != null) {
// return false;
// }
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = 31 * (nodeId != null ? nodeId.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// final StringBuilder sb = new StringBuilder();
// sb.append(getClass().getSimpleName());
// sb.append(" [addr=").append(addr);
// sb.append(", port='").append(port).append('\'');
// sb.append(", nodeId='").append(nodeId).append('\'');
// sb.append(", connected=").append(connected);
// sb.append(", pingSentTimestamp=").append(pingSentTimestamp);
// sb.append(", pongReceivedTimestamp=").append(pongReceivedTimestamp);
// sb.append(", flags=").append(flags);
// sb.append(']');
// return sb.toString();
// }
//
// public enum NodeFlag {
// NOFLAGS, MYSELF, EVENTUAL_FAIL, FAIL, HANDSHAKE, NOADDR;
// }
// }
// Path: src/main/java/biz/paluch/spinach/impl/ClusterAwareNodeSupport.java
import java.io.Serializable;
import java.util.*;
import com.lambdaworks.redis.internal.LettuceAssert;
import biz.paluch.spinach.api.DisqueConnection;
import biz.paluch.spinach.cluster.DisqueNode;
/*
* Copyright 2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package biz.paluch.spinach.impl;
/**
* Convenient base class for classes that rely on the cluster topology of Disque. Typically subclassed by
* {@link SocketAddressSupplier SocketAddressSuppliers}.
*
* @author Mark Paluch
*/
public abstract class ClusterAwareNodeSupport {
public final static int MAX_ALLOWED_PRIORITY = 99;
private DisqueConnection<Object, Object> disqueConnection; | private final List<DisqueNode> nodes = new ArrayList<>(); |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/server/Server.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
| import nl.pvdberg.pnet.event.PNetListener; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.server;
public interface Server
{
/**
* Sets the event listener
* @param serverListener Nullable event listener
*/ | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
// Path: src/main/java/nl/pvdberg/pnet/server/Server.java
import nl.pvdberg.pnet.event.PNetListener;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.server;
public interface Server
{
/**
* Sets the event listener
* @param serverListener Nullable event listener
*/ | void setListener(final PNetListener serverListener); |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/event/ReceiveListener.java | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
| import nl.pvdberg.pnet.client.Client; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public abstract class ReceiveListener implements PNetListener
{
@Override | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
// Path: src/main/java/nl/pvdberg/pnet/event/ReceiveListener.java
import nl.pvdberg.pnet.client.Client;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public abstract class ReceiveListener implements PNetListener
{
@Override | public void onConnect(final Client c) |
PvdBerg1998/PNet | src/test/java/nl/pvdberg/pnet/server/util/PlainServerTest.java | // Path: src/main/java/nl/pvdberg/pnet/server/Server.java
// public interface Server
// {
// /**
// * Sets the event listener
// * @param serverListener Nullable event listener
// */
// void setListener(final PNetListener serverListener);
//
// /**
// * Starts the server at given port
// * @param port Port to listen to
// * @return Successful
// */
// boolean start(int port);
//
// /**
// * Closes all sockets and stops the acceptor thread
// */
// void stop();
// }
| import static org.junit.Assert.assertTrue;
import nl.pvdberg.pnet.server.Server;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertFalse; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.server.util;
public class PlainServerTest
{
protected static final int port1 = 42365;
protected static final int port2 = 42366;
| // Path: src/main/java/nl/pvdberg/pnet/server/Server.java
// public interface Server
// {
// /**
// * Sets the event listener
// * @param serverListener Nullable event listener
// */
// void setListener(final PNetListener serverListener);
//
// /**
// * Starts the server at given port
// * @param port Port to listen to
// * @return Successful
// */
// boolean start(int port);
//
// /**
// * Closes all sockets and stops the acceptor thread
// */
// void stop();
// }
// Path: src/test/java/nl/pvdberg/pnet/server/util/PlainServerTest.java
import static org.junit.Assert.assertTrue;
import nl.pvdberg.pnet.server.Server;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.server.util;
public class PlainServerTest
{
protected static final int port1 = 42365;
protected static final int port2 = 42366;
| protected Server server1; |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/event/PacketHandler.java | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
| import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public interface PacketHandler
{
/**
* Handles an incoming Packet distributed by a PacketDistributer
* @param p Incoming Packet
* @param c Sender
*/ | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
// Path: src/main/java/nl/pvdberg/pnet/event/PacketHandler.java
import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public interface PacketHandler
{
/**
* Handles an incoming Packet distributed by a PacketDistributer
* @param p Incoming Packet
* @param c Sender
*/ | void handlePacket(final Packet p, final Client c) throws IOException; |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/event/PacketHandler.java | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
| import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public interface PacketHandler
{
/**
* Handles an incoming Packet distributed by a PacketDistributer
* @param p Incoming Packet
* @param c Sender
*/ | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
// Path: src/main/java/nl/pvdberg/pnet/event/PacketHandler.java
import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public interface PacketHandler
{
/**
* Handles an incoming Packet distributed by a PacketDistributer
* @param p Incoming Packet
* @param c Sender
*/ | void handlePacket(final Packet p, final Client c) throws IOException; |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/event/DistributerListener.java | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
| import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
/**
* Automatically calls onReceive on given Packet Distributer
*/
public class DistributerListener implements PNetListener
{
private final PacketDistributer packetDistributer;
public DistributerListener(final PacketDistributer packetDistributer)
{
this.packetDistributer = packetDistributer;
}
@Override | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
// Path: src/main/java/nl/pvdberg/pnet/event/DistributerListener.java
import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
/**
* Automatically calls onReceive on given Packet Distributer
*/
public class DistributerListener implements PNetListener
{
private final PacketDistributer packetDistributer;
public DistributerListener(final PacketDistributer packetDistributer)
{
this.packetDistributer = packetDistributer;
}
@Override | public void onConnect(final Client c) |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/factory/ClientFactory.java | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
| import nl.pvdberg.pnet.client.Client; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.factory;
public interface ClientFactory
{ | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
// Path: src/main/java/nl/pvdberg/pnet/factory/ClientFactory.java
import nl.pvdberg.pnet.client.Client;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.factory;
public interface ClientFactory
{ | Client getClient(); |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/client/Client.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
| import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.client;
public interface Client
{
/**
* Sets the event listener
* @param clientListener Nullable event listener
*/ | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
// Path: src/main/java/nl/pvdberg/pnet/client/Client.java
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.client;
public interface Client
{
/**
* Sets the event listener
* @param clientListener Nullable event listener
*/ | void setClientListener(final PNetListener clientListener); |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/client/Client.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
| import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.client;
public interface Client
{
/**
* Sets the event listener
* @param clientListener Nullable event listener
*/
void setClientListener(final PNetListener clientListener);
/**
* Connects to given host:port
* @throws IllegalStateException when Client is not closed
* @return Successful
*/
boolean connect(final String host, final int port);
/**
* Directly sets socket in Client
* @param socket Socket to be used
* @throws IOException when unable to use given Socket
* @throws IllegalStateException when Client is not closed
*/
void setSocket(final Socket socket) throws IOException;
/**
* Sends given Packet
* @param packet Packet to send
* @return Successful
*/ | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
// Path: src/main/java/nl/pvdberg/pnet/client/Client.java
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.client;
public interface Client
{
/**
* Sets the event listener
* @param clientListener Nullable event listener
*/
void setClientListener(final PNetListener clientListener);
/**
* Connects to given host:port
* @throws IllegalStateException when Client is not closed
* @return Successful
*/
boolean connect(final String host, final int port);
/**
* Directly sets socket in Client
* @param socket Socket to be used
* @throws IOException when unable to use given Socket
* @throws IllegalStateException when Client is not closed
*/
void setSocket(final Socket socket) throws IOException;
/**
* Sends given Packet
* @param packet Packet to send
* @return Successful
*/ | boolean send(final Packet packet); |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/server/util/ServerDecorator.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/server/Server.java
// public interface Server
// {
// /**
// * Sets the event listener
// * @param serverListener Nullable event listener
// */
// void setListener(final PNetListener serverListener);
//
// /**
// * Starts the server at given port
// * @param port Port to listen to
// * @return Successful
// */
// boolean start(int port);
//
// /**
// * Closes all sockets and stops the acceptor thread
// */
// void stop();
// }
| import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.server.Server;
| package nl.pvdberg.pnet.server.util;
public class ServerDecorator implements Server
{
protected final Server server;
public ServerDecorator(final Server server)
{
this.server = server;
}
@Override
| // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/server/Server.java
// public interface Server
// {
// /**
// * Sets the event listener
// * @param serverListener Nullable event listener
// */
// void setListener(final PNetListener serverListener);
//
// /**
// * Starts the server at given port
// * @param port Port to listen to
// * @return Successful
// */
// boolean start(int port);
//
// /**
// * Closes all sockets and stops the acceptor thread
// */
// void stop();
// }
// Path: src/main/java/nl/pvdberg/pnet/server/util/ServerDecorator.java
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.server.Server;
package nl.pvdberg.pnet.server.util;
public class ServerDecorator implements Server
{
protected final Server server;
public ServerDecorator(final Server server)
{
this.server = server;
}
@Override
| public void setListener(final PNetListener serverListener)
|
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/event/PNetListener.java | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
| import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public interface PNetListener
{
/**
* Called when a connection is made
* @param c Connected Client
*/ | // Path: src/main/java/nl/pvdberg/pnet/client/Client.java
// public interface Client
// {
// /**
// * Sets the event listener
// * @param clientListener Nullable event listener
// */
// void setClientListener(final PNetListener clientListener);
//
// /**
// * Connects to given host:port
// * @throws IllegalStateException when Client is not closed
// * @return Successful
// */
// boolean connect(final String host, final int port);
//
// /**
// * Directly sets socket in Client
// * @param socket Socket to be used
// * @throws IOException when unable to use given Socket
// * @throws IllegalStateException when Client is not closed
// */
// void setSocket(final Socket socket) throws IOException;
//
// /**
// * Sends given Packet
// * @param packet Packet to send
// * @return Successful
// */
// boolean send(final Packet packet);
//
// /**
// * Closes listener thread and socket of this Client
// */
// void close();
//
// /**
// * Returns whether the Client has an active connection
// * @return Connected
// */
// boolean isConnected();
//
// /**
// * Returns InetAddress of this Client
// * @return InetAddress of this Client
// */
// InetAddress getInetAddress();
//
// /**
// * Returns current Socket
// * @return Socket
// */
// Socket getSocket();
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
// Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
import nl.pvdberg.pnet.client.Client;
import nl.pvdberg.pnet.packet.Packet;
import java.io.IOException;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.event;
public interface PNetListener
{
/**
* Called when a connection is made
* @param c Connected Client
*/ | void onConnect(final Client c); |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/client/ClientImpl.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/factory/SocketFactory.java
// public interface SocketFactory
// {
// Socket getSocket(final String host, final int port) throws Exception;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
//
// Path: src/main/java/nl/pvdberg/pnet/threading/ThreadManager.java
// public static Future launchThread(final Runnable runnable)
// {
// return service.submit(runnable);
// }
| import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import static nl.pvdberg.pnet.threading.ThreadManager.launchThread;
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.factory.SocketFactory;
import nl.pvdberg.pnet.packet.Packet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; | /*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.client;
public class ClientImpl implements Client
{
private final Logger logger = LoggerFactory.getLogger(ClientImpl.class);
| // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/factory/SocketFactory.java
// public interface SocketFactory
// {
// Socket getSocket(final String host, final int port) throws Exception;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
//
// Path: src/main/java/nl/pvdberg/pnet/threading/ThreadManager.java
// public static Future launchThread(final Runnable runnable)
// {
// return service.submit(runnable);
// }
// Path: src/main/java/nl/pvdberg/pnet/client/ClientImpl.java
import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import static nl.pvdberg.pnet.threading.ThreadManager.launchThread;
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.factory.SocketFactory;
import nl.pvdberg.pnet.packet.Packet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/*
* MIT License
*
* Copyright (c) 2017 Pim van den Berg
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package nl.pvdberg.pnet.client;
public class ClientImpl implements Client
{
private final Logger logger = LoggerFactory.getLogger(ClientImpl.class);
| private final SocketFactory sf; |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/client/ClientImpl.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/factory/SocketFactory.java
// public interface SocketFactory
// {
// Socket getSocket(final String host, final int port) throws Exception;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
//
// Path: src/main/java/nl/pvdberg/pnet/threading/ThreadManager.java
// public static Future launchThread(final Runnable runnable)
// {
// return service.submit(runnable);
// }
| import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import static nl.pvdberg.pnet.threading.ThreadManager.launchThread;
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.factory.SocketFactory;
import nl.pvdberg.pnet.packet.Packet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; | {
if (socket != null && !socket.isClosed()) throw new IllegalStateException("Client not closed");
if (host.isEmpty() || port == -1) throw new IllegalStateException("Host and port are not set");
logger.info("Connecting to {}:{}", host, port);
try
{
setSocket(sf.getSocket(host, port));
logger.debug("Connected");
return true;
}
catch (final Exception e)
{
logger.error("Unable to connect: {} :", e.getClass(), e);
return false;
}
}
@Override
public synchronized void setSocket(final Socket socket) throws IOException
{
if (this.socket != null && !this.socket.isClosed()) throw new IllegalStateException("Client not closed");
this.socket = socket;
socket.setKeepAlive(false);
dataInputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream()));
dataOutputStream = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream()));
logger.debug("Starting thread"); | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/factory/SocketFactory.java
// public interface SocketFactory
// {
// Socket getSocket(final String host, final int port) throws Exception;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
//
// Path: src/main/java/nl/pvdberg/pnet/threading/ThreadManager.java
// public static Future launchThread(final Runnable runnable)
// {
// return service.submit(runnable);
// }
// Path: src/main/java/nl/pvdberg/pnet/client/ClientImpl.java
import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import static nl.pvdberg.pnet.threading.ThreadManager.launchThread;
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.factory.SocketFactory;
import nl.pvdberg.pnet.packet.Packet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
{
if (socket != null && !socket.isClosed()) throw new IllegalStateException("Client not closed");
if (host.isEmpty() || port == -1) throw new IllegalStateException("Host and port are not set");
logger.info("Connecting to {}:{}", host, port);
try
{
setSocket(sf.getSocket(host, port));
logger.debug("Connected");
return true;
}
catch (final Exception e)
{
logger.error("Unable to connect: {} :", e.getClass(), e);
return false;
}
}
@Override
public synchronized void setSocket(final Socket socket) throws IOException
{
if (this.socket != null && !this.socket.isClosed()) throw new IllegalStateException("Client not closed");
this.socket = socket;
socket.setKeepAlive(false);
dataInputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream()));
dataOutputStream = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream()));
logger.debug("Starting thread"); | launchThread(new Runnable() |
PvdBerg1998/PNet | src/main/java/nl/pvdberg/pnet/client/ClientImpl.java | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/factory/SocketFactory.java
// public interface SocketFactory
// {
// Socket getSocket(final String host, final int port) throws Exception;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
//
// Path: src/main/java/nl/pvdberg/pnet/threading/ThreadManager.java
// public static Future launchThread(final Runnable runnable)
// {
// return service.submit(runnable);
// }
| import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import static nl.pvdberg.pnet.threading.ThreadManager.launchThread;
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.factory.SocketFactory;
import nl.pvdberg.pnet.packet.Packet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory; | }
}
@Override
public synchronized void setSocket(final Socket socket) throws IOException
{
if (this.socket != null && !this.socket.isClosed()) throw new IllegalStateException("Client not closed");
this.socket = socket;
socket.setKeepAlive(false);
dataInputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream()));
dataOutputStream = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream()));
logger.debug("Starting thread");
launchThread(new Runnable()
{
@Override
public void run()
{
listenerThreadImpl();
}
});
if (clientListener != null) clientListener.onConnect(this);
}
private void listenerThreadImpl()
{
while (true)
{ | // Path: src/main/java/nl/pvdberg/pnet/event/PNetListener.java
// public interface PNetListener
// {
// /**
// * Called when a connection is made
// * @param c Connected Client
// */
// void onConnect(final Client c);
//
// /**
// * Called when a connection is lost
// * @param c Lost Client
// */
// void onDisconnect(final Client c);
//
// /**
// * Called when a new Packet has been received. May throw a caught and silenced IOException
// * @param p New Packet
// * @param c Sender
// * @throws IOException when anything goes wrong during data extraction. This exception is caught because invalid Packets should not crash the Client or Server
// */
// void onReceive(final Packet p, final Client c) throws IOException;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/factory/SocketFactory.java
// public interface SocketFactory
// {
// Socket getSocket(final String host, final int port) throws Exception;
// }
//
// Path: src/main/java/nl/pvdberg/pnet/packet/Packet.java
// public class Packet
// {
// private final PacketType packetType;
// private final short packetID;
// private final int dataLength;
// private final byte[] data;
//
// public enum PacketType
// {
// Request,
// Reply;
//
// public static final PacketType[] fastValues = values();
// }
//
// /**
// * Creates a new immutable Packet
// * @param packetType Packet Type
// * @param packetID Packet ID
// * @param data Packet Data
// */
// public Packet(final PacketType packetType, final short packetID, final byte[] data)
// {
// this.packetType = packetType;
// this.packetID = packetID;
// dataLength = data.length;
// this.data = data;
// }
//
// /**
// * Returns Packet Type
// * @return Packet Type
// */
// public PacketType getPacketType()
// {
// return packetType;
// }
//
// /**
// * Returns whether Packet is of type Request
// * @return PacketType is Request
// */
// public boolean isRequest()
// {
// return packetType == PacketType.Request;
// }
//
// /**
// * Returns whether Packet is of type Reply
// * @return PacketType is Reply
// */
// public boolean isReply()
// {
// return packetType == PacketType.Reply;
// }
//
// /**
// * Returns Packet ID
// * @return Packet ID
// */
// public short getPacketID()
// {
// return packetID;
// }
//
// /**
// * Returns Data length
// * @return Data length
// */
// public int getDataLength()
// {
// return dataLength;
// }
//
// /**
// * Returns Packet data
// * @return Data
// */
// public byte[] getData()
// {
// return data;
// }
//
// /**
// * Writes Packet into DataOutputStream
// * @param out DataOutputStream to write into
// * @throws IOException when unable to write to stream
// */
// public void write(final DataOutputStream out) throws IOException
// {
// // Packet Type
// out.writeByte(packetType.ordinal());
//
// // Packet ID
// out.writeShort(packetID);
//
// // Data Length
// out.writeInt(dataLength);
//
// // Data
// out.write(data);
// }
//
// /**
// * Reads a Packet from raw input data
// * @param in DataInputStream to fromStream from
// * @return Packet created from input
// * @throws IOException when unable to read from stream
// */
// public static Packet fromStream(final DataInputStream in) throws IOException
// {
// // Packet Type
// final Packet.PacketType packetType = Packet.PacketType.fastValues[in.readByte()];
//
// // Packet ID
// final short packetID = in.readShort();
//
// // Data Length
// final int dataLength = in.readInt();
//
// // Data
// final byte[] data = new byte[dataLength];
// in.readFully(data);
//
// return new Packet(
// packetType,
// packetID,
// data
// );
// }
//
// @Override
// public String toString()
// {
// return "Type: [" + packetType + "] ID: [" + packetID + "] Data: [" + dataLength + " bytes]";
// }
// }
//
// Path: src/main/java/nl/pvdberg/pnet/threading/ThreadManager.java
// public static Future launchThread(final Runnable runnable)
// {
// return service.submit(runnable);
// }
// Path: src/main/java/nl/pvdberg/pnet/client/ClientImpl.java
import java.io.*;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketException;
import static nl.pvdberg.pnet.threading.ThreadManager.launchThread;
import nl.pvdberg.pnet.event.PNetListener;
import nl.pvdberg.pnet.factory.SocketFactory;
import nl.pvdberg.pnet.packet.Packet;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
}
}
@Override
public synchronized void setSocket(final Socket socket) throws IOException
{
if (this.socket != null && !this.socket.isClosed()) throw new IllegalStateException("Client not closed");
this.socket = socket;
socket.setKeepAlive(false);
dataInputStream = new DataInputStream(new BufferedInputStream(socket.getInputStream()));
dataOutputStream = new DataOutputStream(new BufferedOutputStream(socket.getOutputStream()));
logger.debug("Starting thread");
launchThread(new Runnable()
{
@Override
public void run()
{
listenerThreadImpl();
}
});
if (clientListener != null) clientListener.onConnect(this);
}
private void listenerThreadImpl()
{
while (true)
{ | final Packet packet; |
pdsoftplan/zap-maven-plugin | zap-client-api/src/main/java/br/com/softplan/security/zap/api/model/AuthenticationInfo.java | // Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/authentication/AuthenticationInfoValidator.java
// public final class AuthenticationInfoValidator {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(AuthenticationInfoValidator.class);
//
// public static void validate(AuthenticationInfo info) {
// LOGGER.info("--- Validating authentication information ---");
// LOGGER.info("Authentication information provided: {}", info);
// if (info == null) {
// String message = "AuthenticationInfo cannot be null.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
//
// checkRequiredParameter(info.getType(), "type");
// checkRequiredParameter(info.getUsername(), "username");
// checkRequiredParameter(info.getPassword(), "password");
// if (info.getType() != AuthenticationType.HTTP) {
// checkRequiredParameter(info.getLoginUrl(), "loginUrl");
// }
//
// List<String> warnings = new ArrayList<>();
// switch (info.getType()) {
// case HTTP:
// validateHttpAuthenticationInfo(info, warnings);
// break;
// case FORM:
// validateFormAuthenticationInfo(info, warnings);
// break;
// case CAS:
// validateCasAuthenticationInfo(info, warnings);
// break;
// case SELENIUM:
// }
//
// if (warnings.isEmpty()) {
// LOGGER.info("The authentication information provided was successfully validated.");
// } else {
// LOGGER.warn("Some warnings were generated while validating the authentication information provided:");
// for (String warning : warnings) {
// LOGGER.warn("\t{}", warning);
// }
// }
// LOGGER.info("--- Finished validating authentication information ---\n");
// }
//
// private static void checkRequiredParameter(Object parameter, String parameterName) {
// if (parameter == null) {
// String message = "The parameter '" + parameterName + "' is required when working with authentication.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// }
//
// private static void validateHttpAuthenticationInfo(AuthenticationInfo info, List<String> warnings) {
// if (info.getHostname() == null) {
// String message = "The parameter 'hostname' is required for HTTP authentication.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// if (info.getRealm() == null) {
// String message = "The parameter 'realm' is required for HTTP authentication.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// }
//
// private static void validateFormAuthenticationInfo(AuthenticationInfo info, List<String> warnings) {
// validateReauthenticationConfiguration(info, warnings);
// if (info.getProtectedPages() != null && info.getProtectedPages().length > 0) {
// warnings.add("The parameter 'protectedPages' is not used for form based authentication and is necessary only for CAS authentication.");
// }
// }
//
// private static void validateCasAuthenticationInfo(AuthenticationInfo info, List<String> warnings) {
// validateReauthenticationConfiguration(info, warnings);
// if (info.getProtectedPages() == null || info.getProtectedPages().length == 0) {
// String message = "The 'protectedPages' parameter is required for CAS authentication. "
// + "A protected page of each context must be accessed prior to scanning to avoid later redirections.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// }
//
// private static void validateReauthenticationConfiguration(AuthenticationInfo info, List<String> warnings) {
// if (info.getLoggedInRegex() == null && info.getLoggedOutRegex() == null &&
// (info.getExcludeFromScan() == null || info.getExcludeFromScan().length == 0)) {
// warnings.add("None of the parameters 'loggedInRegex', 'loggedOutRegex' and 'excludeFromScan' were provided. "
// + "Reauthentication will not be possible and there might be a chance that the Spider will log itself out during the scan.");
// }
// }
//
// private AuthenticationInfoValidator() {}
//
// }
| import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import br.com.softplan.security.zap.api.authentication.AuthenticationInfoValidator; |
/**
* Sets the realm for HTTP authentication.
*
* @param realm the realm for HTTP authentication.
* @return this {@code Builder} instance.
*/
public Builder realm(String realm) {
this.realm = realm;
return this;
}
/**
* Sets the port for HTTP authentication.
*
* @param port the port for HTTP authentication.
* @return this {@code Builder} instance.
*/
public Builder port(int port) {
this.port = port;
return this;
}
/**
* Validates and builds an {@code AuthenticationInfo} instance based on the builder parameters.
*
* @return a {@code AuthenticationInfo} instance.
*/
public AuthenticationInfo build() {
AuthenticationInfo authenticationInfo = new AuthenticationInfo(this); | // Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/authentication/AuthenticationInfoValidator.java
// public final class AuthenticationInfoValidator {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(AuthenticationInfoValidator.class);
//
// public static void validate(AuthenticationInfo info) {
// LOGGER.info("--- Validating authentication information ---");
// LOGGER.info("Authentication information provided: {}", info);
// if (info == null) {
// String message = "AuthenticationInfo cannot be null.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
//
// checkRequiredParameter(info.getType(), "type");
// checkRequiredParameter(info.getUsername(), "username");
// checkRequiredParameter(info.getPassword(), "password");
// if (info.getType() != AuthenticationType.HTTP) {
// checkRequiredParameter(info.getLoginUrl(), "loginUrl");
// }
//
// List<String> warnings = new ArrayList<>();
// switch (info.getType()) {
// case HTTP:
// validateHttpAuthenticationInfo(info, warnings);
// break;
// case FORM:
// validateFormAuthenticationInfo(info, warnings);
// break;
// case CAS:
// validateCasAuthenticationInfo(info, warnings);
// break;
// case SELENIUM:
// }
//
// if (warnings.isEmpty()) {
// LOGGER.info("The authentication information provided was successfully validated.");
// } else {
// LOGGER.warn("Some warnings were generated while validating the authentication information provided:");
// for (String warning : warnings) {
// LOGGER.warn("\t{}", warning);
// }
// }
// LOGGER.info("--- Finished validating authentication information ---\n");
// }
//
// private static void checkRequiredParameter(Object parameter, String parameterName) {
// if (parameter == null) {
// String message = "The parameter '" + parameterName + "' is required when working with authentication.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// }
//
// private static void validateHttpAuthenticationInfo(AuthenticationInfo info, List<String> warnings) {
// if (info.getHostname() == null) {
// String message = "The parameter 'hostname' is required for HTTP authentication.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// if (info.getRealm() == null) {
// String message = "The parameter 'realm' is required for HTTP authentication.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// }
//
// private static void validateFormAuthenticationInfo(AuthenticationInfo info, List<String> warnings) {
// validateReauthenticationConfiguration(info, warnings);
// if (info.getProtectedPages() != null && info.getProtectedPages().length > 0) {
// warnings.add("The parameter 'protectedPages' is not used for form based authentication and is necessary only for CAS authentication.");
// }
// }
//
// private static void validateCasAuthenticationInfo(AuthenticationInfo info, List<String> warnings) {
// validateReauthenticationConfiguration(info, warnings);
// if (info.getProtectedPages() == null || info.getProtectedPages().length == 0) {
// String message = "The 'protectedPages' parameter is required for CAS authentication. "
// + "A protected page of each context must be accessed prior to scanning to avoid later redirections.";
// LOGGER.error(message);
// throw new AuthenticationInfoValidationException(message);
// }
// }
//
// private static void validateReauthenticationConfiguration(AuthenticationInfo info, List<String> warnings) {
// if (info.getLoggedInRegex() == null && info.getLoggedOutRegex() == null &&
// (info.getExcludeFromScan() == null || info.getExcludeFromScan().length == 0)) {
// warnings.add("None of the parameters 'loggedInRegex', 'loggedOutRegex' and 'excludeFromScan' were provided. "
// + "Reauthentication will not be possible and there might be a chance that the Spider will log itself out during the scan.");
// }
// }
//
// private AuthenticationInfoValidator() {}
//
// }
// Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/model/AuthenticationInfo.java
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import br.com.softplan.security.zap.api.authentication.AuthenticationInfoValidator;
/**
* Sets the realm for HTTP authentication.
*
* @param realm the realm for HTTP authentication.
* @return this {@code Builder} instance.
*/
public Builder realm(String realm) {
this.realm = realm;
return this;
}
/**
* Sets the port for HTTP authentication.
*
* @param port the port for HTTP authentication.
* @return this {@code Builder} instance.
*/
public Builder port(int port) {
this.port = port;
return this;
}
/**
* Validates and builds an {@code AuthenticationInfo} instance based on the builder parameters.
*
* @return a {@code AuthenticationInfo} instance.
*/
public AuthenticationInfo build() {
AuthenticationInfo authenticationInfo = new AuthenticationInfo(this); | AuthenticationInfoValidator.validate(authenticationInfo); |
pdsoftplan/zap-maven-plugin | zap-client-api/src/main/java/br/com/softplan/security/zap/api/report/ZapReportUtil.java | // Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/exception/ZapClientException.java
// public class ZapClientException extends RuntimeException {
//
// private static final long serialVersionUID = -4867749606526224619L;
//
// public ZapClientException(String message) {
// super(message);
// }
//
// public ZapClientException(Throwable e) {
// super(e);
// }
//
// public ZapClientException(String message, Throwable e) {
// super(message, e);
// }
//
// }
| import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import br.com.softplan.security.zap.api.exception.ZapClientException; | * Saves the XML Spider results report to the default path (target/zap-reports/).
*
* @param report the {@link ZapReport} holding the report to be saved.
* @return the saved XML Spider results report as a {@code File} instance.
*/
public static File saveXmlSpiderResults(ZapReport report) {
return saveXmlSpiderResults(report, DEFAULT_REPORTS_PATH);
}
/**
* Saves the XML Spider results report to the given path.
*
* @param report the {@link ZapReport} holding the report to be saved.
* @param path the path to save the XML Spider results report.
* @return the saved XML Spider results report as a {@code File} instance.
*/
public static File saveXmlSpiderResults(ZapReport report, String path) {
return saveReport(report.getXmlSpiderResults(), "zapSpiderResults.xml", path);
}
private static File saveReport(byte[] report, String name, String path) {
createReportsFolder(path);
File reportFile = new File(path, name);
try (OutputStream fos = new FileOutputStream(reportFile)) {
fos.write(report);
LOGGER.info("{} saved to {}", name, path);
} catch(IOException e) {
String message = "Error saving reports.";
LOGGER.error(message, e); | // Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/exception/ZapClientException.java
// public class ZapClientException extends RuntimeException {
//
// private static final long serialVersionUID = -4867749606526224619L;
//
// public ZapClientException(String message) {
// super(message);
// }
//
// public ZapClientException(Throwable e) {
// super(e);
// }
//
// public ZapClientException(String message, Throwable e) {
// super(message, e);
// }
//
// }
// Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/report/ZapReportUtil.java
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Files;
import java.nio.file.Paths;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import br.com.softplan.security.zap.api.exception.ZapClientException;
* Saves the XML Spider results report to the default path (target/zap-reports/).
*
* @param report the {@link ZapReport} holding the report to be saved.
* @return the saved XML Spider results report as a {@code File} instance.
*/
public static File saveXmlSpiderResults(ZapReport report) {
return saveXmlSpiderResults(report, DEFAULT_REPORTS_PATH);
}
/**
* Saves the XML Spider results report to the given path.
*
* @param report the {@link ZapReport} holding the report to be saved.
* @param path the path to save the XML Spider results report.
* @return the saved XML Spider results report as a {@code File} instance.
*/
public static File saveXmlSpiderResults(ZapReport report, String path) {
return saveReport(report.getXmlSpiderResults(), "zapSpiderResults.xml", path);
}
private static File saveReport(byte[] report, String name, String path) {
createReportsFolder(path);
File reportFile = new File(path, name);
try (OutputStream fos = new FileOutputStream(reportFile)) {
fos.write(report);
LOGGER.info("{} saved to {}", name, path);
} catch(IOException e) {
String message = "Error saving reports.";
LOGGER.error(message, e); | throw new ZapClientException(message, e); |
pdsoftplan/zap-maven-plugin | zap-maven-plugin-core/src/main/java/br/com/softplan/security/zap/maven/StartZapMojo.java | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/Zap.java
// public final class Zap {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(Zap.class);
//
// private static ZapBoot zap;
//
// public static void startZap(ZapInfo zapInfo) {
// zap = ZapBootFactory.makeZapBoot(zapInfo);
// LOGGER.debug("ZAP will be started by: [{}].", zap.getClass().getSimpleName());
//
// zap.startZap(zapInfo);
// }
//
// public static void stopZap() {
// if (zap != null) {
// zap.stopZap();
// }
// }
//
// private Zap() {}
//
// }
| import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugins.annotations.Mojo;
import br.com.softplan.security.zap.commons.boot.Zap; | package br.com.softplan.security.zap.maven;
/**
* Starts ZAP.
* <p>
* Normally this goal will be used along with the {@code seleniumAnalyze} goal.
* <p>
* The usual configuration is to use {@code startZap} in the <i>pre-integration-test</i>,
* to make sure ZAP is running during the tests. If the tests are correctly configured,
* they will use ZAP's proxy to run the tests. The goal {@code seleniumAnalyze} can then
* be configured to run in the phase <i>post-integration-test</i> to run a ZAP analysis
* without a Spider (using the navigation done by the tests).
*
* @author pdsec
*/
@Mojo(name="startZap")
public class StartZapMojo extends ZapMojo {
@Override
public void doExecute() throws MojoExecutionException, MojoFailureException { | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/Zap.java
// public final class Zap {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(Zap.class);
//
// private static ZapBoot zap;
//
// public static void startZap(ZapInfo zapInfo) {
// zap = ZapBootFactory.makeZapBoot(zapInfo);
// LOGGER.debug("ZAP will be started by: [{}].", zap.getClass().getSimpleName());
//
// zap.startZap(zapInfo);
// }
//
// public static void stopZap() {
// if (zap != null) {
// zap.stopZap();
// }
// }
//
// private Zap() {}
//
// }
// Path: zap-maven-plugin-core/src/main/java/br/com/softplan/security/zap/maven/StartZapMojo.java
import org.apache.maven.plugin.MojoExecutionException;
import org.apache.maven.plugin.MojoFailureException;
import org.apache.maven.plugins.annotations.Mojo;
import br.com.softplan.security.zap.commons.boot.Zap;
package br.com.softplan.security.zap.maven;
/**
* Starts ZAP.
* <p>
* Normally this goal will be used along with the {@code seleniumAnalyze} goal.
* <p>
* The usual configuration is to use {@code startZap} in the <i>pre-integration-test</i>,
* to make sure ZAP is running during the tests. If the tests are correctly configured,
* they will use ZAP's proxy to run the tests. The goal {@code seleniumAnalyze} can then
* be configured to run in the phase <i>post-integration-test</i> to run a ZAP analysis
* without a Spider (using the navigation done by the tests).
*
* @author pdsec
*/
@Mojo(name="startZap")
public class StartZapMojo extends ZapMojo {
@Override
public void doExecute() throws MojoExecutionException, MojoFailureException { | Zap.startZap(buildZapInfo()); |
pdsoftplan/zap-maven-plugin | zap-client-api/src/main/java/br/com/softplan/security/zap/api/authentication/AuthenticationScript.java | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/authentication/AuthenticationScripts.java
// public class AuthenticationScripts {
//
// public static final String RELATIVE_PATH = "/scripts/";
// public static final String EXTENSION = ".js";
//
// }
//
// Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/ZapDockerBoot.java
// public class ZapDockerBoot extends AbstractZapBoot {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(ZapDockerBoot.class);
//
// private static final String DEFAULT_DOCKER_COMMAND = "docker run --rm";
// private static final String ZAP_IMAGE_OPTION = " -i owasp/zap2docker-stable zap.sh ";
//
// public static final String CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH = "/zap/scripts/";
// private static final String CAS_AUTH_SCRIPT_FILE_NAME = "cas-auth.js";
//
// private static Process zap;
//
// @Override
// public void startZap(ZapInfo zapInfo) {
// int port = zapInfo.getPort();
//
// if (isZapRunning(port)) {
// LOGGER.info("ZAP is already up and running! No attempts will be made to start ZAP.");
// return;
// }
//
// try {
// copyCasAuthScriptFileToMappedFolder();
// startZap(zapInfo.getPath(), buildStartCommand(zapInfo));
// waitForZapInitialization(port, zapInfo.getInitializationTimeoutInMillis());
// } catch (IOException e) {
// LOGGER.error("Error starting ZAP.", e);
// }
// }
//
// private static void copyCasAuthScriptFileToMappedFolder() {
// new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH).mkdirs();
//
// File scriptFile = new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH, CAS_AUTH_SCRIPT_FILE_NAME);
//
// InputStream casAuthScriptInputStream = ZapDockerBoot.class.getResourceAsStream(AuthenticationScripts.RELATIVE_PATH + CAS_AUTH_SCRIPT_FILE_NAME);
// try (FileOutputStream fileOutputStream = new FileOutputStream(scriptFile)) {
// IOUtils.copy(casAuthScriptInputStream, fileOutputStream);
// } catch (IOException e) {
// LOGGER.error("Error while trying to create the script file for CAS authentication in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ". "
// + "The analysis will continue but CAS authentication will work only if the script file can be accessed by ZAP's Docker image "
// + "(a default volume is created in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ").", e);
// }
// }
//
// @Override
// public void stopZap() {
// if (zap != null) {
// LOGGER.info("Stopping ZAP.");
// zap.destroy();
// }
// }
//
// private static String buildStartCommand(ZapInfo zapInfo) {
// StringBuilder startCommand = new StringBuilder(DEFAULT_DOCKER_COMMAND);
// appendVolumeOption(startCommand);
// appendPortOption(zapInfo, startCommand);
// startCommand.append(ZAP_IMAGE_OPTION);
//
// String options = zapInfo.getOptions();
// startCommand.append(options != null ? options : DEFAULT_ZAP_OPTIONS);
// startCommand.append(" -port ").append(zapInfo.getPort());
//
// return startCommand.toString();
// }
//
// private static void appendVolumeOption(StringBuilder startCommand) {
// startCommand.append(" -v ");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":ro");
// }
//
// private static void appendPortOption(ZapInfo zapInfo, StringBuilder startCommand) {
// startCommand.append(" -p ");
// startCommand.append(zapInfo.getPort());
// startCommand.append(":");
// startCommand.append(zapInfo.getPort());
// }
//
// private static void startZap(String path, String startCommand) throws IOException {
// ProcessBuilder processBuilder = new ProcessBuilder(startCommand.split(" +"));
// // processBuilder.directory(new File(path));
//
// Files.createDirectories(Paths.get(DEFAULT_ZAP_LOG_PATH));
// processBuilder.redirectOutput(new File(DEFAULT_ZAP_LOG_PATH, DEFAULT_ZAP_LOG_FILE_NAME));
//
// LOGGER.info("Starting ZAP with command: {}", startCommand);
// zap = processBuilder.start();
// }
//
// }
| import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import org.apache.commons.io.IOUtils;
import br.com.softplan.security.zap.commons.authentication.AuthenticationScripts;
import br.com.softplan.security.zap.commons.boot.ZapDockerBoot; | package br.com.softplan.security.zap.api.authentication;
public class AuthenticationScript {
private String name;
private String description;
private String fileName;
private String relativePath;
private String path;
private File scriptTempFile;
public AuthenticationScript(String name, String description) {
this.name = name;
this.description = description; | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/authentication/AuthenticationScripts.java
// public class AuthenticationScripts {
//
// public static final String RELATIVE_PATH = "/scripts/";
// public static final String EXTENSION = ".js";
//
// }
//
// Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/ZapDockerBoot.java
// public class ZapDockerBoot extends AbstractZapBoot {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(ZapDockerBoot.class);
//
// private static final String DEFAULT_DOCKER_COMMAND = "docker run --rm";
// private static final String ZAP_IMAGE_OPTION = " -i owasp/zap2docker-stable zap.sh ";
//
// public static final String CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH = "/zap/scripts/";
// private static final String CAS_AUTH_SCRIPT_FILE_NAME = "cas-auth.js";
//
// private static Process zap;
//
// @Override
// public void startZap(ZapInfo zapInfo) {
// int port = zapInfo.getPort();
//
// if (isZapRunning(port)) {
// LOGGER.info("ZAP is already up and running! No attempts will be made to start ZAP.");
// return;
// }
//
// try {
// copyCasAuthScriptFileToMappedFolder();
// startZap(zapInfo.getPath(), buildStartCommand(zapInfo));
// waitForZapInitialization(port, zapInfo.getInitializationTimeoutInMillis());
// } catch (IOException e) {
// LOGGER.error("Error starting ZAP.", e);
// }
// }
//
// private static void copyCasAuthScriptFileToMappedFolder() {
// new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH).mkdirs();
//
// File scriptFile = new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH, CAS_AUTH_SCRIPT_FILE_NAME);
//
// InputStream casAuthScriptInputStream = ZapDockerBoot.class.getResourceAsStream(AuthenticationScripts.RELATIVE_PATH + CAS_AUTH_SCRIPT_FILE_NAME);
// try (FileOutputStream fileOutputStream = new FileOutputStream(scriptFile)) {
// IOUtils.copy(casAuthScriptInputStream, fileOutputStream);
// } catch (IOException e) {
// LOGGER.error("Error while trying to create the script file for CAS authentication in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ". "
// + "The analysis will continue but CAS authentication will work only if the script file can be accessed by ZAP's Docker image "
// + "(a default volume is created in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ").", e);
// }
// }
//
// @Override
// public void stopZap() {
// if (zap != null) {
// LOGGER.info("Stopping ZAP.");
// zap.destroy();
// }
// }
//
// private static String buildStartCommand(ZapInfo zapInfo) {
// StringBuilder startCommand = new StringBuilder(DEFAULT_DOCKER_COMMAND);
// appendVolumeOption(startCommand);
// appendPortOption(zapInfo, startCommand);
// startCommand.append(ZAP_IMAGE_OPTION);
//
// String options = zapInfo.getOptions();
// startCommand.append(options != null ? options : DEFAULT_ZAP_OPTIONS);
// startCommand.append(" -port ").append(zapInfo.getPort());
//
// return startCommand.toString();
// }
//
// private static void appendVolumeOption(StringBuilder startCommand) {
// startCommand.append(" -v ");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":ro");
// }
//
// private static void appendPortOption(ZapInfo zapInfo, StringBuilder startCommand) {
// startCommand.append(" -p ");
// startCommand.append(zapInfo.getPort());
// startCommand.append(":");
// startCommand.append(zapInfo.getPort());
// }
//
// private static void startZap(String path, String startCommand) throws IOException {
// ProcessBuilder processBuilder = new ProcessBuilder(startCommand.split(" +"));
// // processBuilder.directory(new File(path));
//
// Files.createDirectories(Paths.get(DEFAULT_ZAP_LOG_PATH));
// processBuilder.redirectOutput(new File(DEFAULT_ZAP_LOG_PATH, DEFAULT_ZAP_LOG_FILE_NAME));
//
// LOGGER.info("Starting ZAP with command: {}", startCommand);
// zap = processBuilder.start();
// }
//
// }
// Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/authentication/AuthenticationScript.java
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import org.apache.commons.io.IOUtils;
import br.com.softplan.security.zap.commons.authentication.AuthenticationScripts;
import br.com.softplan.security.zap.commons.boot.ZapDockerBoot;
package br.com.softplan.security.zap.api.authentication;
public class AuthenticationScript {
private String name;
private String description;
private String fileName;
private String relativePath;
private String path;
private File scriptTempFile;
public AuthenticationScript(String name, String description) {
this.name = name;
this.description = description; | this.fileName = name + AuthenticationScripts.EXTENSION; |
pdsoftplan/zap-maven-plugin | zap-client-api/src/main/java/br/com/softplan/security/zap/api/authentication/AuthenticationScript.java | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/authentication/AuthenticationScripts.java
// public class AuthenticationScripts {
//
// public static final String RELATIVE_PATH = "/scripts/";
// public static final String EXTENSION = ".js";
//
// }
//
// Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/ZapDockerBoot.java
// public class ZapDockerBoot extends AbstractZapBoot {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(ZapDockerBoot.class);
//
// private static final String DEFAULT_DOCKER_COMMAND = "docker run --rm";
// private static final String ZAP_IMAGE_OPTION = " -i owasp/zap2docker-stable zap.sh ";
//
// public static final String CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH = "/zap/scripts/";
// private static final String CAS_AUTH_SCRIPT_FILE_NAME = "cas-auth.js";
//
// private static Process zap;
//
// @Override
// public void startZap(ZapInfo zapInfo) {
// int port = zapInfo.getPort();
//
// if (isZapRunning(port)) {
// LOGGER.info("ZAP is already up and running! No attempts will be made to start ZAP.");
// return;
// }
//
// try {
// copyCasAuthScriptFileToMappedFolder();
// startZap(zapInfo.getPath(), buildStartCommand(zapInfo));
// waitForZapInitialization(port, zapInfo.getInitializationTimeoutInMillis());
// } catch (IOException e) {
// LOGGER.error("Error starting ZAP.", e);
// }
// }
//
// private static void copyCasAuthScriptFileToMappedFolder() {
// new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH).mkdirs();
//
// File scriptFile = new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH, CAS_AUTH_SCRIPT_FILE_NAME);
//
// InputStream casAuthScriptInputStream = ZapDockerBoot.class.getResourceAsStream(AuthenticationScripts.RELATIVE_PATH + CAS_AUTH_SCRIPT_FILE_NAME);
// try (FileOutputStream fileOutputStream = new FileOutputStream(scriptFile)) {
// IOUtils.copy(casAuthScriptInputStream, fileOutputStream);
// } catch (IOException e) {
// LOGGER.error("Error while trying to create the script file for CAS authentication in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ". "
// + "The analysis will continue but CAS authentication will work only if the script file can be accessed by ZAP's Docker image "
// + "(a default volume is created in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ").", e);
// }
// }
//
// @Override
// public void stopZap() {
// if (zap != null) {
// LOGGER.info("Stopping ZAP.");
// zap.destroy();
// }
// }
//
// private static String buildStartCommand(ZapInfo zapInfo) {
// StringBuilder startCommand = new StringBuilder(DEFAULT_DOCKER_COMMAND);
// appendVolumeOption(startCommand);
// appendPortOption(zapInfo, startCommand);
// startCommand.append(ZAP_IMAGE_OPTION);
//
// String options = zapInfo.getOptions();
// startCommand.append(options != null ? options : DEFAULT_ZAP_OPTIONS);
// startCommand.append(" -port ").append(zapInfo.getPort());
//
// return startCommand.toString();
// }
//
// private static void appendVolumeOption(StringBuilder startCommand) {
// startCommand.append(" -v ");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":ro");
// }
//
// private static void appendPortOption(ZapInfo zapInfo, StringBuilder startCommand) {
// startCommand.append(" -p ");
// startCommand.append(zapInfo.getPort());
// startCommand.append(":");
// startCommand.append(zapInfo.getPort());
// }
//
// private static void startZap(String path, String startCommand) throws IOException {
// ProcessBuilder processBuilder = new ProcessBuilder(startCommand.split(" +"));
// // processBuilder.directory(new File(path));
//
// Files.createDirectories(Paths.get(DEFAULT_ZAP_LOG_PATH));
// processBuilder.redirectOutput(new File(DEFAULT_ZAP_LOG_PATH, DEFAULT_ZAP_LOG_FILE_NAME));
//
// LOGGER.info("Starting ZAP with command: {}", startCommand);
// zap = processBuilder.start();
// }
//
// }
| import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import org.apache.commons.io.IOUtils;
import br.com.softplan.security.zap.commons.authentication.AuthenticationScripts;
import br.com.softplan.security.zap.commons.boot.ZapDockerBoot; | package br.com.softplan.security.zap.api.authentication;
public class AuthenticationScript {
private String name;
private String description;
private String fileName;
private String relativePath;
private String path;
private File scriptTempFile;
public AuthenticationScript(String name, String description) {
this.name = name;
this.description = description;
this.fileName = name + AuthenticationScripts.EXTENSION;
this.relativePath = AuthenticationScripts.RELATIVE_PATH + fileName;
this.path = AuthenticationScript.class.getResource(relativePath).getPath();
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
public String getFileName() {
return fileName;
}
public String getRelativePath() {
return relativePath;
}
public String getPath(boolean isZapRunningOnDocker) throws IOException, URISyntaxException {
if (isZapRunningOnDocker) { | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/authentication/AuthenticationScripts.java
// public class AuthenticationScripts {
//
// public static final String RELATIVE_PATH = "/scripts/";
// public static final String EXTENSION = ".js";
//
// }
//
// Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/ZapDockerBoot.java
// public class ZapDockerBoot extends AbstractZapBoot {
//
// private static final Logger LOGGER = LoggerFactory.getLogger(ZapDockerBoot.class);
//
// private static final String DEFAULT_DOCKER_COMMAND = "docker run --rm";
// private static final String ZAP_IMAGE_OPTION = " -i owasp/zap2docker-stable zap.sh ";
//
// public static final String CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH = "/zap/scripts/";
// private static final String CAS_AUTH_SCRIPT_FILE_NAME = "cas-auth.js";
//
// private static Process zap;
//
// @Override
// public void startZap(ZapInfo zapInfo) {
// int port = zapInfo.getPort();
//
// if (isZapRunning(port)) {
// LOGGER.info("ZAP is already up and running! No attempts will be made to start ZAP.");
// return;
// }
//
// try {
// copyCasAuthScriptFileToMappedFolder();
// startZap(zapInfo.getPath(), buildStartCommand(zapInfo));
// waitForZapInitialization(port, zapInfo.getInitializationTimeoutInMillis());
// } catch (IOException e) {
// LOGGER.error("Error starting ZAP.", e);
// }
// }
//
// private static void copyCasAuthScriptFileToMappedFolder() {
// new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH).mkdirs();
//
// File scriptFile = new File(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH, CAS_AUTH_SCRIPT_FILE_NAME);
//
// InputStream casAuthScriptInputStream = ZapDockerBoot.class.getResourceAsStream(AuthenticationScripts.RELATIVE_PATH + CAS_AUTH_SCRIPT_FILE_NAME);
// try (FileOutputStream fileOutputStream = new FileOutputStream(scriptFile)) {
// IOUtils.copy(casAuthScriptInputStream, fileOutputStream);
// } catch (IOException e) {
// LOGGER.error("Error while trying to create the script file for CAS authentication in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ". "
// + "The analysis will continue but CAS authentication will work only if the script file can be accessed by ZAP's Docker image "
// + "(a default volume is created in " + CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + ").", e);
// }
// }
//
// @Override
// public void stopZap() {
// if (zap != null) {
// LOGGER.info("Stopping ZAP.");
// zap.destroy();
// }
// }
//
// private static String buildStartCommand(ZapInfo zapInfo) {
// StringBuilder startCommand = new StringBuilder(DEFAULT_DOCKER_COMMAND);
// appendVolumeOption(startCommand);
// appendPortOption(zapInfo, startCommand);
// startCommand.append(ZAP_IMAGE_OPTION);
//
// String options = zapInfo.getOptions();
// startCommand.append(options != null ? options : DEFAULT_ZAP_OPTIONS);
// startCommand.append(" -port ").append(zapInfo.getPort());
//
// return startCommand.toString();
// }
//
// private static void appendVolumeOption(StringBuilder startCommand) {
// startCommand.append(" -v ");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":");
// startCommand.append(CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH);
// startCommand.append(":ro");
// }
//
// private static void appendPortOption(ZapInfo zapInfo, StringBuilder startCommand) {
// startCommand.append(" -p ");
// startCommand.append(zapInfo.getPort());
// startCommand.append(":");
// startCommand.append(zapInfo.getPort());
// }
//
// private static void startZap(String path, String startCommand) throws IOException {
// ProcessBuilder processBuilder = new ProcessBuilder(startCommand.split(" +"));
// // processBuilder.directory(new File(path));
//
// Files.createDirectories(Paths.get(DEFAULT_ZAP_LOG_PATH));
// processBuilder.redirectOutput(new File(DEFAULT_ZAP_LOG_PATH, DEFAULT_ZAP_LOG_FILE_NAME));
//
// LOGGER.info("Starting ZAP with command: {}", startCommand);
// zap = processBuilder.start();
// }
//
// }
// Path: zap-client-api/src/main/java/br/com/softplan/security/zap/api/authentication/AuthenticationScript.java
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.UnsupportedEncodingException;
import java.net.URISyntaxException;
import org.apache.commons.io.IOUtils;
import br.com.softplan.security.zap.commons.authentication.AuthenticationScripts;
import br.com.softplan.security.zap.commons.boot.ZapDockerBoot;
package br.com.softplan.security.zap.api.authentication;
public class AuthenticationScript {
private String name;
private String description;
private String fileName;
private String relativePath;
private String path;
private File scriptTempFile;
public AuthenticationScript(String name, String description) {
this.name = name;
this.description = description;
this.fileName = name + AuthenticationScripts.EXTENSION;
this.relativePath = AuthenticationScripts.RELATIVE_PATH + fileName;
this.path = AuthenticationScript.class.getResource(relativePath).getPath();
}
public String getName() {
return name;
}
public String getDescription() {
return description;
}
public String getFileName() {
return fileName;
}
public String getRelativePath() {
return relativePath;
}
public String getPath(boolean isZapRunningOnDocker) throws IOException, URISyntaxException {
if (isZapRunningOnDocker) { | return ZapDockerBoot.CAS_AUTH_SCRIPT_DEFAULT_DOCKER_PATH + fileName; |
pdsoftplan/zap-maven-plugin | zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/AbstractZapBoot.java | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/exception/ZapInitializationTimeoutException.java
// public class ZapInitializationTimeoutException extends RuntimeException {
//
// private static final long serialVersionUID = -5283245793671447701L;
//
// public ZapInitializationTimeoutException(String message) {
// super(message);
// }
//
// public ZapInitializationTimeoutException(Throwable e) {
// super(e);
// }
//
// public ZapInitializationTimeoutException(String message, Throwable e) {
// super(message, e);
// }
//
// }
| import java.io.File;
import java.io.IOException;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.URL;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import br.com.softplan.security.zap.commons.exception.ZapInitializationTimeoutException; | static int getResponseFromZap(String host, int port) {
if (host == null) {
return -1;
}
String url = "http://" + host + ":" + port;
int responseCode = -1;
try {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
conn.setRequestMethod(HEAD);
responseCode = conn.getResponseCode();
} catch (ConnectException e) {
LOGGER.debug("ZAP could not be reached at {}:{}.", host, port);
} catch (IOException e) {
LOGGER.error("Error trying to get a response from ZAP.", e);
}
return responseCode;
}
static void waitForZapInitialization(int port, long timeoutInMillis) {
waitForZapInitialization("localhost", port, timeoutInMillis);
}
static void waitForZapInitialization(String host, int port, long timeoutInMillis) {
long startUpTime = System.currentTimeMillis();
do {
if (System.currentTimeMillis() - startUpTime > timeoutInMillis) {
String message = "ZAP did not start before the timeout (" + timeoutInMillis + " ms).";
LOGGER.error(message); | // Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/exception/ZapInitializationTimeoutException.java
// public class ZapInitializationTimeoutException extends RuntimeException {
//
// private static final long serialVersionUID = -5283245793671447701L;
//
// public ZapInitializationTimeoutException(String message) {
// super(message);
// }
//
// public ZapInitializationTimeoutException(Throwable e) {
// super(e);
// }
//
// public ZapInitializationTimeoutException(String message, Throwable e) {
// super(message, e);
// }
//
// }
// Path: zap-utils/src/main/java/br/com/softplan/security/zap/commons/boot/AbstractZapBoot.java
import java.io.File;
import java.io.IOException;
import java.net.ConnectException;
import java.net.HttpURLConnection;
import java.net.URL;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import br.com.softplan.security.zap.commons.exception.ZapInitializationTimeoutException;
static int getResponseFromZap(String host, int port) {
if (host == null) {
return -1;
}
String url = "http://" + host + ":" + port;
int responseCode = -1;
try {
HttpURLConnection conn = (HttpURLConnection) new URL(url).openConnection();
conn.setRequestMethod(HEAD);
responseCode = conn.getResponseCode();
} catch (ConnectException e) {
LOGGER.debug("ZAP could not be reached at {}:{}.", host, port);
} catch (IOException e) {
LOGGER.error("Error trying to get a response from ZAP.", e);
}
return responseCode;
}
static void waitForZapInitialization(int port, long timeoutInMillis) {
waitForZapInitialization("localhost", port, timeoutInMillis);
}
static void waitForZapInitialization(String host, int port, long timeoutInMillis) {
long startUpTime = System.currentTimeMillis();
do {
if (System.currentTimeMillis() - startUpTime > timeoutInMillis) {
String message = "ZAP did not start before the timeout (" + timeoutInMillis + " ms).";
LOGGER.error(message); | throw new ZapInitializationTimeoutException(message); |
RentTheRunway/conduit | conduit/src/test/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumerTest.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
| import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; | package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumerTest {
@Test
public void testHandleDeliveryAcknowledge() { | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
// Path: conduit/src/test/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumerTest.java
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumerTest {
@Test
public void testHandleDeliveryAcknowledge() { | final List<AMQPMessageBundle> messages = new ArrayList<>(); |
RentTheRunway/conduit | conduit/src/test/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumerTest.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
| import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; | package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumerTest {
@Test
public void testHandleDeliveryAcknowledge() {
final List<AMQPMessageBundle> messages = new ArrayList<>();
| // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
// Path: conduit/src/test/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumerTest.java
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumerTest {
@Test
public void testHandleDeliveryAcknowledge() {
final List<AMQPMessageBundle> messages = new ArrayList<>();
| AMQPConsumerCallback callback = new AMQPConsumerCallback() { |
RentTheRunway/conduit | conduit/src/test/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumerTest.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
| import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify; | package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumerTest {
@Test
public void testHandleDeliveryAcknowledge() {
final List<AMQPMessageBundle> messages = new ArrayList<>();
AMQPConsumerCallback callback = new AMQPConsumerCallback() {
@Override | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
// Path: conduit/src/test/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumerTest.java
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.junit.jupiter.api.Test;
import org.mockito.ArgumentCaptor;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.eq;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumerTest {
@Test
public void testHandleDeliveryAcknowledge() {
final List<AMQPMessageBundle> messages = new ArrayList<>();
AMQPConsumerCallback callback = new AMQPConsumerCallback() {
@Override | public ActionResponse handle(AMQPMessageBundle messageBundle) { |
RentTheRunway/conduit | conduit/src/test/java/io/rtr/conduit/util/LoggingAmqpCallbackHandler.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
| import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.charset.StandardCharsets; | package io.rtr.conduit.util;
public class LoggingAmqpCallbackHandler implements AMQPConsumerCallback {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingAmqpCallbackHandler.class);
@Override | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
// Path: conduit/src/test/java/io/rtr/conduit/util/LoggingAmqpCallbackHandler.java
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.charset.StandardCharsets;
package io.rtr.conduit.util;
public class LoggingAmqpCallbackHandler implements AMQPConsumerCallback {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingAmqpCallbackHandler.class);
@Override | public ActionResponse handle(AMQPMessageBundle messageBundle) { |
RentTheRunway/conduit | conduit/src/test/java/io/rtr/conduit/util/LoggingAmqpCallbackHandler.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
| import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.charset.StandardCharsets; | package io.rtr.conduit.util;
public class LoggingAmqpCallbackHandler implements AMQPConsumerCallback {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingAmqpCallbackHandler.class);
@Override | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
// Path: conduit/src/test/java/io/rtr/conduit/util/LoggingAmqpCallbackHandler.java
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.charset.StandardCharsets;
package io.rtr.conduit.util;
public class LoggingAmqpCallbackHandler implements AMQPConsumerCallback {
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingAmqpCallbackHandler.class);
@Override | public ActionResponse handle(AMQPMessageBundle messageBundle) { |
RentTheRunway/conduit | conduit/src/main/java/io/rtr/conduit/amqp/consumer/Consumer.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
| import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import java.io.IOException; | package io.rtr.conduit.amqp.consumer;
/**
* The consumer operates in terms of a listen context; an encapsulation of a
* concrete transport and its properties.
*/
public class Consumer implements AutoCloseable {
private TransportListenContext transportContext;
//! Public interface.
Consumer(TransportListenContext transportContext) {
this.transportContext = transportContext;
}
public void connect() throws IOException {
getTransport().connect(transportContext.getConnectionProperties());
}
public boolean isConnected() {
return getTransport().isConnected();
}
@Override
public void close() throws IOException {
getTransport().close();
}
public void listen() throws IOException {
getTransport().listen(transportContext.getListenProperties());
}
public void stop() throws IOException {
getTransport().stop();
}
public boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
return getTransport().isStopped(maxWaitMilliseconds);
}
| // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
// Path: conduit/src/main/java/io/rtr/conduit/amqp/consumer/Consumer.java
import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import java.io.IOException;
package io.rtr.conduit.amqp.consumer;
/**
* The consumer operates in terms of a listen context; an encapsulation of a
* concrete transport and its properties.
*/
public class Consumer implements AutoCloseable {
private TransportListenContext transportContext;
//! Public interface.
Consumer(TransportListenContext transportContext) {
this.transportContext = transportContext;
}
public void connect() throws IOException {
getTransport().connect(transportContext.getConnectionProperties());
}
public boolean isConnected() {
return getTransport().isConnected();
}
@Override
public void close() throws IOException {
getTransport().close();
}
public void listen() throws IOException {
getTransport().listen(transportContext.getListenProperties());
}
public void stop() throws IOException {
getTransport().stop();
}
public boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
return getTransport().isStopped(maxWaitMilliseconds);
}
| private Transport getTransport() { |
RentTheRunway/conduit | conduit/src/main/java/io/rtr/conduit/amqp/consumer/ConsumerBuilder.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportConnectionProperties.java
// public interface TransportConnectionProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenProperties.java
// public interface TransportListenProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/validation/Validatable.java
// public abstract class Validatable {
//
// protected abstract void validate();
//
// protected void assertNotNull(Object value, String argument) {
// if (value == null) {
// throw new IllegalArgumentException(String.format("Argument %s cannot be null", argument));
// }
// }
// }
| import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportConnectionProperties;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import io.rtr.conduit.amqp.transport.TransportListenProperties;
import io.rtr.conduit.amqp.validation.Validatable; | package io.rtr.conduit.amqp.consumer;
public abstract class ConsumerBuilder<T extends Transport
, C extends TransportConnectionProperties | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportConnectionProperties.java
// public interface TransportConnectionProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenProperties.java
// public interface TransportListenProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/validation/Validatable.java
// public abstract class Validatable {
//
// protected abstract void validate();
//
// protected void assertNotNull(Object value, String argument) {
// if (value == null) {
// throw new IllegalArgumentException(String.format("Argument %s cannot be null", argument));
// }
// }
// }
// Path: conduit/src/main/java/io/rtr/conduit/amqp/consumer/ConsumerBuilder.java
import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportConnectionProperties;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import io.rtr.conduit.amqp.transport.TransportListenProperties;
import io.rtr.conduit.amqp.validation.Validatable;
package io.rtr.conduit.amqp.consumer;
public abstract class ConsumerBuilder<T extends Transport
, C extends TransportConnectionProperties | , L extends TransportListenProperties |
RentTheRunway/conduit | conduit/src/main/java/io/rtr/conduit/amqp/consumer/ConsumerBuilder.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportConnectionProperties.java
// public interface TransportConnectionProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenProperties.java
// public interface TransportListenProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/validation/Validatable.java
// public abstract class Validatable {
//
// protected abstract void validate();
//
// protected void assertNotNull(Object value, String argument) {
// if (value == null) {
// throw new IllegalArgumentException(String.format("Argument %s cannot be null", argument));
// }
// }
// }
| import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportConnectionProperties;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import io.rtr.conduit.amqp.transport.TransportListenProperties;
import io.rtr.conduit.amqp.validation.Validatable; | package io.rtr.conduit.amqp.consumer;
public abstract class ConsumerBuilder<T extends Transport
, C extends TransportConnectionProperties
, L extends TransportListenProperties | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportConnectionProperties.java
// public interface TransportConnectionProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenProperties.java
// public interface TransportListenProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/validation/Validatable.java
// public abstract class Validatable {
//
// protected abstract void validate();
//
// protected void assertNotNull(Object value, String argument) {
// if (value == null) {
// throw new IllegalArgumentException(String.format("Argument %s cannot be null", argument));
// }
// }
// }
// Path: conduit/src/main/java/io/rtr/conduit/amqp/consumer/ConsumerBuilder.java
import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportConnectionProperties;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import io.rtr.conduit.amqp.transport.TransportListenProperties;
import io.rtr.conduit.amqp.validation.Validatable;
package io.rtr.conduit.amqp.consumer;
public abstract class ConsumerBuilder<T extends Transport
, C extends TransportConnectionProperties
, L extends TransportListenProperties | , LC extends TransportListenContext> |
RentTheRunway/conduit | conduit/src/main/java/io/rtr/conduit/amqp/consumer/ConsumerBuilder.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportConnectionProperties.java
// public interface TransportConnectionProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenProperties.java
// public interface TransportListenProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/validation/Validatable.java
// public abstract class Validatable {
//
// protected abstract void validate();
//
// protected void assertNotNull(Object value, String argument) {
// if (value == null) {
// throw new IllegalArgumentException(String.format("Argument %s cannot be null", argument));
// }
// }
// }
| import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportConnectionProperties;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import io.rtr.conduit.amqp.transport.TransportListenProperties;
import io.rtr.conduit.amqp.validation.Validatable; | package io.rtr.conduit.amqp.consumer;
public abstract class ConsumerBuilder<T extends Transport
, C extends TransportConnectionProperties
, L extends TransportListenProperties
, LC extends TransportListenContext> | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/Transport.java
// public abstract class Transport {
// //! Public interface.
//
// //! Establishes a connection to either an intermediary or the other
// // end point. In the case of AMQP, this method is used to connect
// // to the broker.
// public final void connect(TransportConnectionProperties properties) throws IOException {
// connectImpl(properties);
// }
//
// public boolean isConnected() {
// return isConnectedImpl();
// }
//
// //! Closes the connection.
// public final void close() throws IOException {
// closeImpl();
// }
//
// //! Starts the asynchronous delivery mechanism.
// public final void listen(TransportListenProperties properties) throws IOException {
// listenImpl(properties);
// }
//
// //! Stops listening for incoming messages.
// public final void stop() throws IOException {
// stopImpl();
// }
//
// // Is the listener thread pool still doing work? (stop is not synchronous)
// public final boolean isStopped(int maxWaitMilliseconds) throws InterruptedException {
// return isStoppedImpl(maxWaitMilliseconds);
// }
//
// //! Publish a message to the other endpoint.
// public final boolean publish(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return publishImpl(messageBundle, properties);
// }
//
// public final <E> boolean transactionalPublish(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException {
// return transactionalPublishImpl(messageBundles, properties);
// }
//
// //! Implementation
//
// protected abstract boolean isConnectedImpl();
// protected abstract void connectImpl(TransportConnectionProperties properties) throws IOException;
// protected abstract void closeImpl() throws IOException;
//
// protected void listenImpl(TransportListenProperties properties) throws IOException {}
// protected void stopImpl() throws IOException {}
// protected abstract boolean isStoppedImpl(int waitMillSeconds) throws InterruptedException;
//
// protected boolean publishImpl(TransportMessageBundle messageBundle, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// protected <E> boolean transactionalPublishImpl(Collection<E> messageBundles, TransportPublishProperties properties)
// throws IOException, TimeoutException, InterruptedException { return false; }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportConnectionProperties.java
// public interface TransportConnectionProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenContext.java
// public interface TransportListenContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportListenProperties getListenProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportListenProperties.java
// public interface TransportListenProperties {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/validation/Validatable.java
// public abstract class Validatable {
//
// protected abstract void validate();
//
// protected void assertNotNull(Object value, String argument) {
// if (value == null) {
// throw new IllegalArgumentException(String.format("Argument %s cannot be null", argument));
// }
// }
// }
// Path: conduit/src/main/java/io/rtr/conduit/amqp/consumer/ConsumerBuilder.java
import io.rtr.conduit.amqp.transport.Transport;
import io.rtr.conduit.amqp.transport.TransportConnectionProperties;
import io.rtr.conduit.amqp.transport.TransportListenContext;
import io.rtr.conduit.amqp.transport.TransportListenProperties;
import io.rtr.conduit.amqp.validation.Validatable;
package io.rtr.conduit.amqp.consumer;
public abstract class ConsumerBuilder<T extends Transport
, C extends TransportConnectionProperties
, L extends TransportListenProperties
, LC extends TransportListenContext> | extends Validatable { |
RentTheRunway/conduit | conduit/src/main/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumer.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
| import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map; | package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumer extends DefaultConsumer {
private static final Logger log = LoggerFactory.getLogger(AMQPQueueConsumer.class);
private static final String HEADER_RETRY_COUNT = "conduit-retry-count"; | // Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPConsumerCallback.java
// public interface AMQPConsumerCallback {
// ActionResponse handle(AMQPMessageBundle messageBundle);
// void notifyOfActionFailure(Exception e);
// void notifyOfShutdown(String consumerTag, ShutdownSignalException sig);
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/AMQPMessageBundle.java
// public class AMQPMessageBundle implements TransportMessageBundle {
// private String consumerTag;
// private Envelope envelope;
// private AMQP.BasicProperties basicProperties;
// private byte[] body;
//
// private static AMQP.BasicProperties initialProperties() {
// return initialProperties(null);
// }
//
// private static AMQP.BasicProperties initialProperties(Map<String, Object> additionalHeaders) {
// Map<String, Object> headers = new HashMap<String, Object>();
//
// if (additionalHeaders != null) {
// headers.putAll(additionalHeaders);
// }
//
// headers.put("conduit-retry-count", 0);
//
// return new AMQP.BasicProperties()
// .builder()
// .deliveryMode(2 /*persistent*/)
// .priority(0)
// .headers(headers)
// .contentType("text/plain")
// .build();
// }
//
// public AMQPMessageBundle(String consumerTag, Envelope envelope, AMQP.BasicProperties basicProperties, byte[] body) {
// this.consumerTag = consumerTag;
// this.envelope = envelope;
// this.basicProperties = basicProperties;
// this.body = body;
// }
//
// public AMQPMessageBundle(String message) {
// this(null, null, initialProperties(), message.getBytes());
// }
//
// public AMQPMessageBundle(String message, Map<String, Object> headers) {
// this(null, null, initialProperties(headers), message.getBytes());
// }
//
// public String getConsumerTag() {
// return consumerTag;
// }
//
// public Envelope getEnvelope() {
// return envelope;
// }
//
// public AMQP.BasicProperties getBasicProperties() {
// return basicProperties;
// }
//
// public byte[] getBody() {
// return body;
// }
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/ActionResponse.java
// public class ActionResponse {
//
// private Action action;
// private String reason;
// public static final String REASON_KEY = "reason";
//
// private ActionResponse(Action action, String reason) {
// this.action = action;
// this.reason = reason;
// }
//
// public static ActionResponse acknowledge() {
// return new ActionResponse(Action.Acknowledge, null);
// }
//
// public static ActionResponse retry() {
// return new ActionResponse(Action.RejectAndRequeue, null);
// }
//
// public static ActionResponse retry(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndRequeue, String.format(reason, args));
// }
//
// public static ActionResponse discard() {
// return new ActionResponse(Action.RejectAndDiscard, null);
// }
//
// public static ActionResponse discard(String reason, Object... args) {
// return new ActionResponse(Action.RejectAndDiscard, String.format(reason, args));
// }
//
// public String getReason() {
// return reason;
// }
//
// public Action getAction() {
// return action;
// }
//
// @Override
// public boolean equals(Object o) {
// if (this == o) return true;
// if (o == null || getClass() != o.getClass()) return false;
//
// ActionResponse that = (ActionResponse) o;
//
// if (action != that.action) return false;
// if (reason != null ? !reason.equals(that.reason) : that.reason != null) return false;
//
// return true;
// }
//
// @Override
// public int hashCode() {
// int result = action != null ? action.hashCode() : 0;
// result = 31 * result + (reason != null ? reason.hashCode() : 0);
// return result;
// }
//
// @Override
// public String toString() {
// return "ActionResponse{" +
// "action=" + action +
// ", reason='" + reason + '\'' +
// '}';
// }
//
// public enum Action {
// Acknowledge, //! The transport will ack the message explicitly.
// RejectAndRequeue, //! The message wasn't meant to be processed.
// // For example, if the message delivered is of
// // a higher version than what we are able to
// // deal with.
// RejectAndDiscard //! A malformed message, place it on a poison queue.
// }
//
//
//
// }
// Path: conduit/src/main/java/io/rtr/conduit/amqp/impl/AMQPQueueConsumer.java
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
import com.rabbitmq.client.ShutdownSignalException;
import io.rtr.conduit.amqp.AMQPConsumerCallback;
import io.rtr.conduit.amqp.AMQPMessageBundle;
import io.rtr.conduit.amqp.ActionResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
package io.rtr.conduit.amqp.impl;
public class AMQPQueueConsumer extends DefaultConsumer {
private static final Logger log = LoggerFactory.getLogger(AMQPQueueConsumer.class);
private static final String HEADER_RETRY_COUNT = "conduit-retry-count"; | private AMQPConsumerCallback callback; |
RentTheRunway/conduit | conduit/src/main/java/io/rtr/conduit/amqp/publisher/Publisher.java | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportMessageBundle.java
// public interface TransportMessageBundle {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportPublishContext.java
// public interface TransportPublishContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportPublishProperties getPublishProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportPublishProperties.java
// public interface TransportPublishProperties {
// }
| import io.rtr.conduit.amqp.transport.TransportMessageBundle;
import io.rtr.conduit.amqp.transport.TransportPublishContext;
import io.rtr.conduit.amqp.transport.TransportPublishProperties;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.TimeoutException; | package io.rtr.conduit.amqp.publisher;
/**
* The publisher operates in terms of a publish context; an encapsulation of a
* concrete transport and its properties.
* Example:
*
* AMQPPublishContext context = new AMQPPublishContext(
* username, password, virtualHost, exchange, routingKey, host, port
* );
*
* Publisher publisher = new Publisher(context);
*/
public class Publisher implements AutoCloseable {
private TransportPublishContext transportContext;
//! Public interface.
Publisher(TransportPublishContext transportContext) {
this.transportContext = transportContext;
}
//! Connects to the context-specified host with context-specified credentials.
public void connect() throws IOException {
transportContext.getTransport().connect(transportContext.getConnectionProperties());
}
public boolean isConnected() {
return transportContext.getTransport().isConnected();
}
@Override
public void close() throws IOException {
transportContext.getTransport().close();
}
/**
* Publish the message using the publish properties defined in the transport context
* @param messageBundle Message to send
*/ | // Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportMessageBundle.java
// public interface TransportMessageBundle {
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportPublishContext.java
// public interface TransportPublishContext {
// Transport getTransport();
// TransportConnectionProperties getConnectionProperties();
// TransportPublishProperties getPublishProperties();
// }
//
// Path: conduit/src/main/java/io/rtr/conduit/amqp/transport/TransportPublishProperties.java
// public interface TransportPublishProperties {
// }
// Path: conduit/src/main/java/io/rtr/conduit/amqp/publisher/Publisher.java
import io.rtr.conduit.amqp.transport.TransportMessageBundle;
import io.rtr.conduit.amqp.transport.TransportPublishContext;
import io.rtr.conduit.amqp.transport.TransportPublishProperties;
import java.io.IOException;
import java.util.Collection;
import java.util.concurrent.TimeoutException;
package io.rtr.conduit.amqp.publisher;
/**
* The publisher operates in terms of a publish context; an encapsulation of a
* concrete transport and its properties.
* Example:
*
* AMQPPublishContext context = new AMQPPublishContext(
* username, password, virtualHost, exchange, routingKey, host, port
* );
*
* Publisher publisher = new Publisher(context);
*/
public class Publisher implements AutoCloseable {
private TransportPublishContext transportContext;
//! Public interface.
Publisher(TransportPublishContext transportContext) {
this.transportContext = transportContext;
}
//! Connects to the context-specified host with context-specified credentials.
public void connect() throws IOException {
transportContext.getTransport().connect(transportContext.getConnectionProperties());
}
public boolean isConnected() {
return transportContext.getTransport().isConnected();
}
@Override
public void close() throws IOException {
transportContext.getTransport().close();
}
/**
* Publish the message using the publish properties defined in the transport context
* @param messageBundle Message to send
*/ | public boolean publish(TransportMessageBundle messageBundle) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.