code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
import play.api.test._
import play.api.test.Helpers._
/**
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
@RunWith(classOf[JUnitRunner])
class ApplicationSpec extends Specification {
"Application" should {
"send 404 on a bad request" in new WithApplication {
route(FakeRequest(GET, "/boum")) must beNone
}
"render the index page" in new WithApplication {
val home = route(FakeRequest(GET, "/")).get
status(home) must equalTo(OK)
contentType(home) must beSome.which(_ == "text/html")
contentAsString(home) must contain("Your new application is ready!")
}
}
}
| Leonti/userpath | test/ApplicationSpec.scala | Scala | mit | 767 |
package odfi.server.api
import org.odfi.indesign.core.harvest.fs.HarvestedFile
import odfi.server.ODFIHarvester
import org.odfi.indesign.core.main.IndesignPlatorm
import java.io.File
import odfi.server.ODFIInstallation
import com.idyria.osi.tea.thread.ThreadLanguage
import odfi.server.ODFIManagerModule
object TestCommandRun extends App with ThreadLanguage {
//-- Find ODFI and TCL
IndesignPlatorm.prepareDefault
IndesignPlatorm use ODFIManagerModule
ODFIHarvester.deliverDirect(HarvestedFile(new File("""E:\\odfi""")))
IndesignPlatorm.start
var odfi = ODFIHarvester.getResource[ODFIInstallation].get.getODFIInstance("main")
//-- Run Command
var th = createThread {
//var odfi = ODFIHarvester.getResource[ODFIInstallation].get.getODFIInstance("main")
odfi.interpreter.evalString("""puts "Hello World" """)
odfi.getCommand("odfi/info") match {
case None =>
println("Command not found")
case Some(c) =>
println("Running Command...")
c.startRedirectIO match {
case Some(r) =>
println(s"Found Result....")
r.valueMaps.foreach {
case (k, v) =>
println(s"Key: $k, Value: $v")
}
case None =>
}
}
}
th.start()
th.join()
println("=============================================================")
th = createThread {
odfi.getCommand("odfi/info") match {
case None =>
println("Command not found")
case Some(c) =>
println("Running Command...")
c.startRedirectIO match {
case Some(r) =>
println(s"Found Result....")
r.valueMaps.foreach {
case (k, v) =>
println(s"Key: $k, Value: $v")
}
case None =>
}
}
}
th.start()
th.join()
} | richnou/odfi-manager | server/src/test/scala/odfi/server/api/TestCommandRun.scala | Scala | lgpl-3.0 | 1,854 |
package todomvc
import preact.Preact.VNode
import preact.macros.PreactComponent
import todomvc.Model.Filter
object FilterButton {
case class Props(filter: Filter, currentFilter: Filter)
}
import todomvc.FilterButton._
@PreactComponent[Props, Unit]
class FilterButton {
import preact.dsl.symbol._
def render(): VNode = {
'li(
'a("href" -> s"#/${props.filter.path}",
if (props.currentFilter == props.filter) {
"class" -> "selected"
} else {
Entry.EmptyAttribute
},
props.filter.label
)
)
}
}
| LMnet/scala-js-preact | examples/todomvc/src/main/scala/todomvc/FilterButton.scala | Scala | mit | 576 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.time.LocalDateTime
import java.util.Locale
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import com.google.common.primitives.UnsignedLong
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.{JobContext, TaskAttemptContext}
import org.apache.parquet.column.{Encoding, ParquetProperties}
import org.apache.parquet.column.ParquetProperties.WriterVersion.PARQUET_1_0
import org.apache.parquet.example.data.Group
import org.apache.parquet.example.data.simple.{SimpleGroup, SimpleGroupFactory}
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.example.ExampleParquetWriter
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.parquet.hadoop.metadata.CompressionCodecName.GZIP
import org.apache.parquet.schema.{MessageType, MessageTypeParser}
import org.apache.spark.{SPARK_VERSION_SHORT, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, ScalaReflection}
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeRow}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* A test suite that tests basic Parquet I/O.
*/
class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession {
import testImplicits._
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}
test("basic data types (without binary)") {
val data = (1 to 4).map { i =>
(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble)
}
checkParquetFile(data)
}
test("raw binary") {
val data = (1 to 4).map(i => Tuple1(Array.fill(3)(i.toByte)))
withParquetDataFrame(data) { df =>
assertResult(data.map(_._1.mkString(",")).sorted) {
df.collect().map(_.getAs[Array[Byte]](0).mkString(",")).sorted
}
}
}
test("SPARK-11694 Parquet logical types are not being tested properly") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 a(INT_8);
| required int32 b(INT_16);
| required int32 c(DATE);
| required int32 d(DECIMAL(1,0));
| required int64 e(DECIMAL(10,0));
| required binary f(UTF8);
| required binary g(ENUM);
| required binary h(DECIMAL(32,0));
| required fixed_len_byte_array(32) i(DECIMAL(32,0));
| required int64 j(TIMESTAMP_MILLIS);
| required int64 k(TIMESTAMP_MICROS);
|}
""".stripMargin)
val expectedSparkTypes = Seq(ByteType, ShortType, DateType, DecimalType(1, 0),
DecimalType(10, 0), StringType, StringType, DecimalType(32, 0), DecimalType(32, 0),
TimestampType, TimestampType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
readParquetFile(path.toString)(df => {
val sparkTypes = df.schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
})
}
}
test("string") {
val data = (1 to 4).map(i => Tuple1(i.toString))
// Property spark.sql.parquet.binaryAsString shouldn't affect Parquet files written by Spark SQL
// as we store Spark SQL schema in the extra metadata.
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "false")(checkParquetFile(data))
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true")(checkParquetFile(data))
}
test("SPARK-36182: TimestampNTZ") {
val data = Seq("2021-01-01T00:00:00", "1970-07-15T01:02:03.456789")
.map(ts => Tuple1(LocalDateTime.parse(ts)))
withAllParquetReaders {
checkParquetFile(data)
}
}
test("Read TimestampNTZ and TimestampLTZ for various logical TIMESTAMP types") {
val schema = MessageTypeParser.parseMessageType(
"""message root {
| required int64 timestamp_ltz_millis_depr(TIMESTAMP_MILLIS);
| required int64 timestamp_ltz_micros_depr(TIMESTAMP_MICROS);
| required int64 timestamp_ltz_millis(TIMESTAMP(MILLIS,true));
| required int64 timestamp_ltz_micros(TIMESTAMP(MICROS,true));
| required int64 timestamp_ntz_millis(TIMESTAMP(MILLIS,false));
| required int64 timestamp_ntz_micros(TIMESTAMP(MICROS,false));
|}
""".stripMargin)
for (dictEnabled <- Seq(true, false)) {
withTempDir { dir =>
val tablePath = new Path(s"${dir.getCanonicalPath}/timestamps.parquet")
val numRecords = 100
val writer = createParquetWriter(schema, tablePath, dictionaryEnabled = dictEnabled)
(0 until numRecords).foreach { i =>
val record = new SimpleGroup(schema)
for (group <- Seq(0, 2, 4)) {
record.add(group, 1000L) // millis
record.add(group + 1, 1000000L) // micros
}
writer.write(record)
}
writer.close
withAllParquetReaders {
val df = spark.read.parquet(tablePath.toString)
assertResult(df.schema) {
StructType(
StructField("timestamp_ltz_millis_depr", TimestampType, nullable = true) ::
StructField("timestamp_ltz_micros_depr", TimestampType, nullable = true) ::
StructField("timestamp_ltz_millis", TimestampType, nullable = true) ::
StructField("timestamp_ltz_micros", TimestampType, nullable = true) ::
StructField("timestamp_ntz_millis", TimestampNTZType, nullable = true) ::
StructField("timestamp_ntz_micros", TimestampNTZType, nullable = true) ::
Nil
)
}
val exp = (0 until numRecords).map { _ =>
val ltz_value = new java.sql.Timestamp(1000L)
val ntz_value = LocalDateTime.of(1970, 1, 1, 0, 0, 1)
(ltz_value, ltz_value, ltz_value, ltz_value, ntz_value, ntz_value)
}.toDF()
checkAnswer(df, exp)
}
}
}
}
testStandardAndLegacyModes("fixed-length decimals") {
def makeDecimalRDD(decimal: DecimalType): DataFrame = {
spark
.range(1000)
// Parquet doesn't allow column names with spaces, have to add an alias here.
// Minus 500 here so that negative decimals are also tested.
.select((('id - 500) / 100.0) cast decimal as 'dec)
.coalesce(1)
}
val combinations = Seq((5, 2), (1, 0), (1, 1), (18, 10), (18, 17), (19, 0), (38, 37))
for ((precision, scale) <- combinations) {
withTempPath { dir =>
val data = makeDecimalRDD(DecimalType(precision, scale))
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df => {
checkAnswer(df, data.collect().toSeq)
}}
}
}
}
test("date type") {
def makeDateRDD(): DataFrame =
sparkContext
.parallelize(0 to 1000)
.map(i => Tuple1(DateTimeUtils.toJavaDate(i)))
.toDF()
.select($"_1")
withTempPath { dir =>
val data = makeDateRDD()
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df =>
checkAnswer(df, data.collect().toSeq)
}
}
}
testStandardAndLegacyModes("map") {
val data = (1 to 4).map(i => Tuple1(Map(i -> s"val_$i")))
checkParquetFile(data)
}
testStandardAndLegacyModes("array") {
val data = (1 to 4).map(i => Tuple1(Seq(i, i + 1)))
checkParquetFile(data)
}
testStandardAndLegacyModes("array and double") {
val data = (1 to 4).map(i => (i.toDouble, Seq(i.toDouble, (i + 1).toDouble)))
checkParquetFile(data)
}
testStandardAndLegacyModes("struct") {
val data = (1 to 4).map(i => Tuple1((i, s"val_$i")))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("array of struct") {
val data = (1 to 4).map { i =>
Tuple1(
Seq(
Tuple1(s"1st_val_$i"),
Tuple1(s"2nd_val_$i")
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(array) =>
Row(array.map(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
testStandardAndLegacyModes("array of nested struct") {
val data = (1 to 4).map { i =>
Tuple1(
Seq(
Tuple1(
Tuple1(s"1st_val_$i")),
Tuple1(
Tuple1(s"2nd_val_$i"))
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(array) =>
Row(array.map { case Tuple1(Tuple1(str)) => Row(Row(str))})
})
}
}
testStandardAndLegacyModes("nested struct with array of array as field") {
val data = (1 to 4).map(i => Tuple1((i, Seq(Seq(s"val_$i")))))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("nested map with struct as key type") {
val data = (1 to 4).map { i =>
Tuple1(
Map(
(i, s"kA_$i") -> s"vA_$i",
(i, s"kB_$i") -> s"vB_$i"
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.map { case (k, v) => Row(k.productIterator.toSeq: _*) -> v })
})
}
}
testStandardAndLegacyModes("nested map with struct as value type") {
val data = (1 to 4).map { i =>
Tuple1(
Map(
s"kA_$i" -> ((i, s"vA_$i")),
s"kB_$i" -> ((i, s"vB_$i"))
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.mapValues(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
test("nulls") {
val allNulls = (
null.asInstanceOf[java.lang.Boolean],
null.asInstanceOf[Integer],
null.asInstanceOf[java.lang.Long],
null.asInstanceOf[java.lang.Float],
null.asInstanceOf[java.lang.Double])
withParquetDataFrame(allNulls :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(5)(null): _*))
}
}
test("nones") {
val allNones = (
None.asInstanceOf[Option[Int]],
None.asInstanceOf[Option[Long]],
None.asInstanceOf[Option[String]])
withParquetDataFrame(allNones :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(3)(null): _*))
}
}
test("SPARK-34817: Support for unsigned Parquet logical types") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required INT32 a(UINT_8);
| required INT32 b(UINT_16);
| required INT32 c(UINT_32);
| required INT64 d(UINT_64);
|}
""".stripMargin)
val expectedSparkTypes = Seq(ShortType, IntegerType, LongType, DecimalType.LongDecimal)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val sparkTypes = spark.read.parquet(path.toString).schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
}
}
test("SPARK-11692 Support for Parquet logical types, JSON and BSON (embedded types)") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required binary a(JSON);
| required binary b(BSON);
|}
""".stripMargin)
val expectedSparkTypes = Seq(StringType, BinaryType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val sparkTypes = spark.read.parquet(path.toString).schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
}
}
test("compression codec") {
val hadoopConf = spark.sessionState.newHadoopConf()
def compressionCodecFor(path: String, codecName: String): String = {
val codecs = for {
footer <- readAllFootersWithoutSummaryFiles(new Path(path), hadoopConf)
block <- footer.getParquetMetadata.getBlocks.asScala
column <- block.getColumns.asScala
} yield column.getCodec.name()
assert(codecs.distinct === Seq(codecName))
codecs.head
}
val data = (0 until 10).map(i => (i, i.toString))
def checkCompressionCodec(codec: CompressionCodecName): Unit = {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> codec.name()) {
withParquetFile(data) { path =>
assertResult(spark.conf.get(SQLConf.PARQUET_COMPRESSION).toUpperCase(Locale.ROOT)) {
compressionCodecFor(path, codec.name())
}
}
}
}
// Checks default compression codec
checkCompressionCodec(
CompressionCodecName.fromConf(spark.conf.get(SQLConf.PARQUET_COMPRESSION)))
checkCompressionCodec(CompressionCodecName.UNCOMPRESSED)
checkCompressionCodec(CompressionCodecName.GZIP)
checkCompressionCodec(CompressionCodecName.SNAPPY)
checkCompressionCodec(CompressionCodecName.ZSTD)
}
private def createParquetWriter(
schema: MessageType,
path: Path,
dictionaryEnabled: Boolean = false,
pageSize: Int = 1024,
dictionaryPageSize: Int = 1024): ParquetWriter[Group] = {
val hadoopConf = spark.sessionState.newHadoopConf()
ExampleParquetWriter
.builder(path)
.withDictionaryEncoding(dictionaryEnabled)
.withType(schema)
.withWriterVersion(PARQUET_1_0)
.withCompressionCodec(GZIP)
.withRowGroupSize(1024 * 1024)
.withPageSize(pageSize)
.withDictionaryPageSize(dictionaryPageSize)
.withConf(hadoopConf)
.build()
}
test("SPARK-34859: test multiple pages with different sizes and nulls") {
def makeRawParquetFile(
path: Path,
dictionaryEnabled: Boolean,
n: Int,
pageSize: Int): Seq[Option[Int]] = {
val schemaStr =
"""
|message root {
| optional boolean _1;
| optional int32 _2;
| optional int64 _3;
| optional float _4;
| optional double _5;
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path,
dictionaryEnabled = dictionaryEnabled, pageSize = pageSize, dictionaryPageSize = pageSize)
val rand = scala.util.Random
val expected = (0 until n).map { i =>
if (rand.nextBoolean()) {
None
} else {
Some(i)
}
}
expected.foreach { opt =>
val record = new SimpleGroup(schema)
opt match {
case Some(i) =>
record.add(0, i % 2 == 0)
record.add(1, i)
record.add(2, i.toLong)
record.add(3, i.toFloat)
record.add(4, i.toDouble)
case _ =>
}
writer.write(record)
}
writer.close()
expected
}
Seq(true, false).foreach { dictionaryEnabled =>
Seq(64, 128, 89).foreach { pageSize =>
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
val expected = makeRawParquetFile(path, dictionaryEnabled, 1000, pageSize)
readParquetFile(path.toString) { df =>
checkAnswer(df, expected.map {
case None =>
Row(null, null, null, null, null)
case Some(i) =>
Row(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble)
})
}
}
}
}
}
test("read raw Parquet file") {
def makeRawParquetFile(path: Path): Unit = {
val schemaStr =
"""
|message root {
| required boolean _1;
| required int32 _2;
| required int64 _3;
| required float _4;
| required double _5;
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path)
(0 until 10).foreach { i =>
val record = new SimpleGroup(schema)
record.add(0, i % 2 == 0)
record.add(1, i)
record.add(2, i.toLong)
record.add(3, i.toFloat)
record.add(4, i.toDouble)
writer.write(record)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (0 until 10).map { i =>
Row(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble) })
}
}
}
test("SPARK-34817: Read UINT_8/UINT_16/UINT_32 from parquet") {
Seq(true, false).foreach { dictionaryEnabled =>
def makeRawParquetFile(path: Path): Unit = {
val schemaStr =
"""message root {
| required INT32 a(UINT_8);
| required INT32 b(UINT_16);
| required INT32 c(UINT_32);
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path, dictionaryEnabled)
val factory = new SimpleGroupFactory(schema)
(0 until 1000).foreach { i =>
val group = factory.newGroup()
.append("a", i % 100 + Byte.MaxValue)
.append("b", i % 100 + Short.MaxValue)
.append("c", i % 100 + Int.MaxValue)
writer.write(group)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (0 until 1000).map { i =>
Row(i % 100 + Byte.MaxValue,
i % 100 + Short.MaxValue,
i % 100 + Int.MaxValue.toLong)
})
}
}
}
}
test("SPARK-34817: Read UINT_64 as Decimal from parquet") {
Seq(true, false).foreach { dictionaryEnabled =>
def makeRawParquetFile(path: Path): Unit = {
val schemaStr =
"""message root {
| required INT64 a(UINT_64);
|}
""".stripMargin
val schema = MessageTypeParser.parseMessageType(schemaStr)
val writer = createParquetWriter(schema, path, dictionaryEnabled)
val factory = new SimpleGroupFactory(schema)
(-500 until 500).foreach { i =>
val group = factory.newGroup()
.append("a", i % 100L)
writer.write(group)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (-500 until 500).map { i =>
val bi = UnsignedLong.fromLongBits(i % 100L).bigIntegerValue()
Row(new java.math.BigDecimal(bi))
})
}
}
}
}
test("SPARK-35640: read binary as timestamp should throw schema incompatible error") {
val data = (1 to 4).map(i => Tuple1(i.toString))
val readSchema = StructType(Seq(StructField("_1", DataTypes.TimestampType)))
withParquetFile(data) { path =>
val errMsg = intercept[Exception](spark.read.schema(readSchema).parquet(path).collect())
.getMessage
assert(errMsg.contains("Parquet column cannot be converted in file"))
}
}
test("SPARK-35640: int as long should throw schema incompatible error") {
val data = (1 to 4).map(i => Tuple1(i))
val readSchema = StructType(Seq(StructField("_1", DataTypes.LongType)))
withParquetFile(data) { path =>
val errMsg = intercept[Exception](spark.read.schema(readSchema).parquet(path).collect())
.getMessage
assert(errMsg.contains("Parquet column cannot be converted in file"))
}
}
test("write metadata") {
val hadoopConf = spark.sessionState.newHadoopConf()
withTempPath { file =>
val path = new Path(file.toURI.toString)
val fs = FileSystem.getLocal(hadoopConf)
val schema = StructType.fromAttributes(ScalaReflection.attributesFor[(Int, String)])
writeMetadata(schema, path, hadoopConf)
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_COMMON_METADATA_FILE)))
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_METADATA_FILE)))
val expectedSchema = new SparkToParquetSchemaConverter().convert(schema)
val actualSchema = readFooter(path, hadoopConf).getFileMetaData.getSchema
actualSchema.checkContains(expectedSchema)
expectedSchema.checkContains(actualSchema)
}
}
test("save - overwrite") {
withParquetFile((1 to 10).map(i => (i, i.toString))) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Overwrite).save(file)
readParquetFile(file) { df =>
checkAnswer(df, newData.map(Row.fromTuple))
}
}
}
test("save - ignore") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Ignore).save(file)
readParquetFile(file) { df =>
checkAnswer(df, data.map(Row.fromTuple))
}
}
}
test("save - throw") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
val errorMessage = intercept[Throwable] {
newData.toDF().write.format("parquet").mode(SaveMode.ErrorIfExists).save(file)
}.getMessage
assert(errorMessage.contains("already exists"))
}
}
test("save - append") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Append).save(file)
readParquetFile(file) { df =>
checkAnswer(df, (data ++ newData).map(Row.fromTuple))
}
}
}
test("SPARK-6315 regression test") {
// Spark 1.1 and prior versions write Spark schema as case class string into Parquet metadata.
// This has been deprecated by JSON format since 1.2. Notice that, 1.3 further refactored data
// types API, and made StructType.fields an array. This makes the result of StructType.toString
// different from prior versions: there's no "Seq" wrapping the fields part in the string now.
val sparkSchema =
"StructType(Seq(StructField(a,BooleanType,false),StructField(b,IntegerType,false)))"
// The Parquet schema is intentionally made different from the Spark schema. Because the new
// Parquet data source simply falls back to the Parquet schema once it fails to parse the Spark
// schema. By making these two different, we are able to assert the old style case class string
// is parsed successfully.
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 c;
|}
""".stripMargin)
withTempPath { location =>
val extraMetadata = Map(ParquetReadSupport.SPARK_METADATA_KEY -> sparkSchema.toString)
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf, extraMetadata)
readParquetFile(path.toString) { df =>
assertResult(df.schema) {
StructType(
StructField("a", BooleanType, nullable = true) ::
StructField("b", IntegerType, nullable = true) ::
Nil)
}
}
}
}
test("SPARK-8121: spark.sql.parquet.output.committer.class shouldn't be overridden") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
val extraOptions = Map(
SQLConf.OUTPUT_COMMITTER_CLASS.key -> classOf[ParquetOutputCommitter].getCanonicalName,
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key ->
classOf[JobCommitFailureParquetOutputCommitter].getCanonicalName
)
withTempPath { dir =>
val message = intercept[SparkException] {
spark.range(0, 1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(message === "Intentional exception for testing purposes")
}
}
}
test("SPARK-6330 regression test") {
// In 1.3.0, save to fs other than file: without configuring core-site.xml would get:
// IllegalArgumentException: Wrong FS: hdfs://..., expected: file:///
intercept[Throwable] {
spark.read.parquet("file:///nonexistent")
}
val errorMessage = intercept[Throwable] {
spark.read.parquet("hdfs://nonexistent")
}.toString
assert(errorMessage.contains("UnknownHostException"))
}
test("SPARK-7837 Do not close output writer twice when commitTask() fails") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
// Using a output committer that always fail when committing a task, so that both
// `commitTask()` and `abortTask()` are invoked.
val extraOptions = Map[String, String](
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key ->
classOf[TaskCommitFailureParquetOutputCommitter].getCanonicalName
)
// Before fixing SPARK-7837, the following code results in an NPE because both
// `commitTask()` and `abortTask()` try to close output writers.
withTempPath { dir =>
val m1 = intercept[SparkException] {
spark.range(1).coalesce(1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m1.contains("Intentional exception for testing purposes"))
}
withTempPath { dir =>
val m2 = intercept[SparkException] {
val df = spark.range(1).select('id as 'a, 'id as 'b).coalesce(1)
df.write.partitionBy("a").options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m2.contains("Intentional exception for testing purposes"))
}
}
}
test("SPARK-11044 Parquet writer version fixed as version1 ") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
// For dictionary encoding, Parquet changes the encoding types according to its writer
// version. So, this test checks one of the encoding types in order to ensure that
// the file is written with writer version2.
val extraOptions = Map[String, String](
// Write a Parquet file with writer version2.
ParquetOutputFormat.WRITER_VERSION -> ParquetProperties.WriterVersion.PARQUET_2_0.toString,
// By default, dictionary encoding is enabled from Parquet 1.2.0 but
// it is enabled just in case.
ParquetOutputFormat.ENABLE_DICTIONARY -> "true"
)
val hadoopConf = spark.sessionState.newHadoopConfWithOptions(extraOptions)
withSQLConf(ParquetOutputFormat.JOB_SUMMARY_LEVEL -> "ALL") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part-r-0.parquet"
spark.range(1 << 16).selectExpr("(id % 4) AS i")
.coalesce(1).write.options(extraOptions).mode("overwrite").parquet(path)
val blockMetadata = readFooter(new Path(path), hadoopConf).getBlocks.asScala.head
val columnChunkMetadata = blockMetadata.getColumns.asScala.head
// If the file is written with version2, this should include
// Encoding.RLE_DICTIONARY type. For version1, it is Encoding.PLAIN_DICTIONARY
assert(columnChunkMetadata.getEncodings.contains(Encoding.RLE_DICTIONARY))
}
}
}
}
test("null and non-null strings") {
// Create a dataset where the first values are NULL and then some non-null values. The
// number of non-nulls needs to be bigger than the ParquetReader batch size.
val data: Dataset[String] = spark.range(200).map (i =>
if (i < 150) null
else "a"
)
val df = data.toDF("col")
assert(df.agg("col" -> "count").collect().head.getLong(0) == 50)
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/data"
df.write.parquet(path)
readParquetFile(path) { df2 =>
assert(df2.agg("col" -> "count").collect().head.getLong(0) == 50)
}
}
}
test("read dictionary encoded decimals written as INT32") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i32.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(5, 2) as 'i32_dec))
}
}
test("read dictionary encoded decimals written as INT64") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i64.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'i64_dec))
}
}
test("read dictionary encoded decimals written as FIXED_LEN_BYTE_ARRAY") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-fixed-len.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'fixed_len_dec))
}
}
test("read dictionary and plain encoded timestamp_millis written as INT64") {
withAllParquetReaders {
checkAnswer(
// timestamp column in this file is encoded using combination of plain
// and dictionary encodings.
readResourceParquetFile("test-data/timemillis-in-i64.parquet"),
(1 to 3).map(i => Row(new java.sql.Timestamp(10))))
}
}
test("SPARK-12589 copy() on rows returned from reader works for strings") {
withTempPath { dir =>
val data = (1, "abc") ::(2, "helloabcde") :: Nil
data.toDF().write.parquet(dir.getCanonicalPath)
var hash1: Int = 0
var hash2: Int = 0
(false :: true :: Nil).foreach { v =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> v.toString) {
val df = spark.read.parquet(dir.getCanonicalPath)
val rows = df.queryExecution.toRdd.map(_.copy()).collect()
val unsafeRows = rows.map(_.asInstanceOf[UnsafeRow])
if (!v) {
hash1 = unsafeRows(0).hashCode()
hash2 = unsafeRows(1).hashCode()
} else {
assert(hash1 == unsafeRows(0).hashCode())
assert(hash2 == unsafeRows(1).hashCode())
}
}
}
}
}
test("SPARK-36726: test incorrect Parquet row group file offset") {
readParquetFile(testFile("test-data/malformed-file-offset.parquet")) { df =>
assert(df.count() == 3650)
}
}
test("VectorizedParquetRecordReader - direct path read") {
val data = (0 to 10).map(i => (i, (i + 'a').toChar.toString))
withTempPath { dir =>
spark.createDataFrame(data).repartition(1).write.parquet(dir.getCanonicalPath)
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0);
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, null)
val result = mutable.ArrayBuffer.empty[(Int, String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getInt(0), row.getString(1))
result += v
}
assert(data.toSet == result.toSet)
} finally {
reader.close()
}
}
// Project just one column
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, ("_2" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
result += row.getString(0)
}
assert(data.map(_._2).toSet == result.toSet)
} finally {
reader.close()
}
}
// Project columns in opposite order
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, ("_2" :: "_1" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String, Int)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getString(0), row.getInt(1))
result += v
}
assert(data.map { x => (x._2, x._1) }.toSet == result.toSet)
} finally {
reader.close()
}
}
// Empty projection
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, List[String]().asJava)
var result = 0
while (reader.nextKeyValue()) {
result += 1
}
assert(result == data.length)
} finally {
reader.close()
}
}
}
}
test("VectorizedParquetRecordReader - partition column types") {
withTempPath { dir =>
Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath)
val dataTypes =
Seq(StringType, BooleanType, ByteType, BinaryType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType)
val constantValues =
Seq(
UTF8String.fromString("a string"),
true,
1.toByte,
"Spark SQL".getBytes,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75D,
Decimal("1234.23456"),
DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")),
DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123")))
dataTypes.zip(constantValues).foreach { case (dt, v) =>
val schema = StructType(StructField("pcol", dt) :: Nil)
val conf = sqlContext.conf
val vectorizedReader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
val partitionValues = new GenericInternalRow(Array(v))
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0)
try {
vectorizedReader.initialize(file, null)
vectorizedReader.initBatch(schema, partitionValues)
vectorizedReader.nextKeyValue()
val row = vectorizedReader.getCurrentValue.asInstanceOf[InternalRow]
// Use `GenericMutableRow` by explicitly copying rather than `ColumnarBatch`
// in order to use get(...) method which is not implemented in `ColumnarBatch`.
val actual = row.copy().get(1, dt)
val expected = v
if (dt.isInstanceOf[BinaryType]) {
assert(actual.asInstanceOf[Array[Byte]] sameElements expected.asInstanceOf[Array[Byte]])
} else {
assert(actual == expected)
}
} finally {
vectorizedReader.close()
}
}
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> "snappy") {
val option = new ParquetOptions(Map("Compression" -> "uncompressed"), spark.sessionState.conf)
assert(option.compressionCodecClassName == "UNCOMPRESSED")
}
}
test("SPARK-23173 Writing a file with data converted from JSON with and incorrect user schema") {
withTempPath { file =>
val jsonData =
"""{
| "a": 1,
| "c": "foo"
|}
|""".stripMargin
val jsonSchema = new StructType()
.add("a", LongType, nullable = false)
.add("b", StringType, nullable = false)
.add("c", StringType, nullable = false)
spark.range(1).select(from_json(lit(jsonData), jsonSchema) as "input")
.write.parquet(file.getAbsolutePath)
checkAnswer(spark.read.parquet(file.getAbsolutePath), Seq(Row(Row(1, null, "foo"))))
}
}
test("Write Spark version into Parquet metadata") {
withTempPath { dir =>
spark.range(1).repartition(1).write.parquet(dir.getAbsolutePath)
assert(getMetaData(dir)(SPARK_VERSION_METADATA_KEY) === SPARK_VERSION_SHORT)
}
}
Seq(true, false).foreach { vec =>
test(s"SPARK-34167: read LongDecimals with precision < 10, VectorizedReader $vec") {
// decimal32-written-as-64-bit.snappy.parquet was generated using a 3rd-party library. It has
// 10 rows of Decimal(9, 1) written as LongDecimal instead of an IntDecimal
readParquetFile(testFile("test-data/decimal32-written-as-64-bit.snappy.parquet"), vec) {
df =>
assert(10 == df.collect().length)
val first10Df = df.head(10)
assert(
Seq(792059492, 986842987, 540247998, null, 357991078,
494131059, 92536396, 426847157, -999999999, 204486094)
.zip(first10Df).forall(d =>
d._2.isNullAt(0) && d._1 == null ||
d._1 == d._2.getDecimal(0).unscaledValue().intValue()
))
}
// decimal32-written-as-64-bit-dict.snappy.parquet was generated using a 3rd-party library. It
// has 2048 rows of Decimal(3, 1) written as LongDecimal instead of an IntDecimal
readParquetFile(
testFile("test-data/decimal32-written-as-64-bit-dict.snappy.parquet"), vec) {
df =>
assert(2048 == df.collect().length)
val first10Df = df.head(10)
assert(Seq(751, 937, 511, null, 337, 467, 84, 403, -999, 190)
.zip(first10Df).forall(d =>
d._2.isNullAt(0) && d._1 == null ||
d._1 == d._2.getDecimal(0).unscaledValue().intValue()))
val last10Df = df.tail(10)
assert(Seq(866, 20, 492, 76, 824, 604, 343, 820, 864, 243)
.zip(last10Df).forall(d =>
d._1 == d._2.getDecimal(0).unscaledValue().intValue()))
}
}
}
}
class JobCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitJob(jobContext: JobContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
class TaskCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitTask(context: TaskAttemptContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
| holdenk/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala | Scala | apache-2.0 | 41,259 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.pattern.orchestration
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
import java.util.NoSuchElementException
import scala.language.postfixOps
import scala.util.{Failure, Success, Try}
class OFutureSpec extends AnyFunSpec with Matchers {
//the key test case follows, as `threadless` indicates, the callbacks (success/failure) should be run within the same thread
//other than common `scala.concurrent.Future`
it("should run success callbacks in the same thread") {
val threadIdentity = Thread.currentThread
val accuracy = 1024
val p = OPromise[String]()
val f = p.future
var ack = Seq[Boolean]()
1 to accuracy foreach(_ => f.onSuccess({
case v:String => ack = ack :+ (Thread.currentThread == threadIdentity)
case _ => throw new IllegalStateException
}))
p.success("value")
f.value should equal(Some(Success("value")))
ack should equal(Seq.fill[Boolean](accuracy)(true))
}
it("should run failure callbacks in the same thread"){
val threadIdentity = Thread.currentThread
val accuracy = 1024
val p = OPromise[String]()
val f = p.future
var ack = Seq[Boolean]()
1 to accuracy foreach(_ => f.onFailure({
case t:Throwable => ack = ack :+ (Thread.currentThread == threadIdentity)
case _ => throw new IllegalStateException
}))
val cause = new IllegalArgumentException
p.failure(cause)
f.value should equal(Some(Failure(cause)))
ack should equal(Seq.fill[Boolean](accuracy)(true))
}
//now we borrow some test cases from: https://github.com/scala/scala/blob/master/test/files/jvm/future-spec/FutureTests.scala
//for completeness and orchestration functions.
it("should compose with for-comprehensions") {
def async(x: Int) = OFuture.successful((x * 2).toString)
val future0 = OFuture.successful("five!".length)
val future1 = for {
a <- future0.mapTo[Int] // returns 5
b <- async(a) // returns "10"
c <- async(7) // returns "14"
} yield b + "-" + c
future1 should not be null
future1.isCompleted should equal(true)
future1.value should equal(Some(Success("10-14")))
val future2 = for {
a <- future0.mapTo[Int]
b <- OFuture.successful((a * 2).toString).mapTo[Int]
c <- OFuture.successful((7 * 2).toString)
} yield s"$b-$c"
future2 should not be null
future2.isCompleted should equal(true)
future2.value match {
case Some(Failure(ex)) => ex.getClass should equal(classOf[ClassCastException])
case _ => fail("value should be a class cast exception due to `b <- Future.successful((a * 2).toString).mapTo[Int]`")
}
}
it("should be able to recover from exceptions") {
val future1 = OFuture.successful(5)
val future2 = future1 map (_ / 0)
val future3 = future2 map (_.toString)
val future4 = future1 recover {
case e: ArithmeticException => 0
} map (_.toString)
val future5 = future2 recover {
case e: ArithmeticException => 0
} map (_.toString)
val future6 = future2 recover {
case e: MatchError => 0
} map (_.toString)
val future7 = future3 recover {
case e: ArithmeticException => "You got ERROR"
}
future1.value should equal(Some(Success(5)))
future2.value match {
case Some(Failure(ex)) => ex.getClass should equal(classOf[ArithmeticException])
case _ => fail("value should be an arithmetic exception due to `future1 map (_ / 0)`")
}
future3.value match {
case Some(Failure(ex)) => ex.getClass should equal(classOf[ArithmeticException])
case _ => fail("value should be an arithmetic exception due to `future2`")
}
future4.value should equal(Some(Success("5")))
future5.value should equal(Some(Success("0")))
future6.value match {
case Some(Failure(ex)) => ex.getClass should equal(classOf[ArithmeticException])
case _ => fail("value should be an arithmetic exception due to `future2`")
}
future7.value should equal(Some(Success("You got ERROR")))
}
it("should recoverWith from exceptions") {
val o = new IllegalStateException("original")
val r = new IllegalStateException("recovered")
val r0 = OFuture.failed[String](o) recoverWith {
case _ if false => OFuture.successful("yay")
}
r0.isCompleted should equal(true)
r0.value should equal(Some(Failure(o)))
val recovered = OFuture.failed[String](o) recoverWith {
case _ => OFuture.successful("yay!")
}
recovered.isCompleted should equal(true)
recovered.value should equal(Some(Success("yay!")))
val reFailed = OFuture.failed[String](o) recoverWith {
case _ => OFuture.failed[String](r)
}
reFailed.isCompleted should equal(true)
reFailed.value should equal(Some(Failure(r)))
}
it("andThen should work as expected") {
val q = new java.util.concurrent.LinkedBlockingQueue[Int]
for (i <- 1 to 1000) {
val chained = OFuture.successful({
q.add(1); 3
}) andThen {
case _ => q.add(2)
} andThen {
case Success(0) => q.add(Int.MaxValue)
} andThen {
case _ => q.add(3);
}
chained.value should equal(Some(Success(3)))
q.poll() should equal (1)
q.poll() should equal (2)
q.poll() should equal (3)
q.clear()
}
}
it("should get firstCompletedOf") {
def futures = Vector.fill[OFuture[Int]](10) {
OPromise[Int]().future
} :+ OFuture.successful[Int](5)
OFuture.firstCompletedOf(futures).value should equal (Some(Success(5)))
OFuture.firstCompletedOf(futures.iterator).value should equal (Some(Success(5)))
}
it("should find the future") {
val futures = for (i <- 1 to 10) yield OFuture.successful(i)
val result = OFuture.find[Int](futures)(_ == 3)
result.value should equal (Some(Success(Some(3))))
val notFound = OFuture.find[Int](futures.iterator)(_ == 11)
notFound.value should equal(Some(Success(None)))
}
it("should support zip function") {
val f = new IllegalStateException("test")
val zip0 = OFuture.failed[String](f) zip OFuture.successful("foo")
zip0.value should equal(Some(Failure(f)))
val zip1 = OFuture.successful("foo") zip OFuture.failed[String](f)
zip1.value should equal(Some(Failure(f)))
val zip2 = OFuture.failed[String](f) zip OFuture.failed[String](f)
zip2.value should equal(Some(Failure(f)))
val zip3 = OFuture.successful("foo") zip OFuture.successful("foo")
zip3.value should equal(Some(Success(("foo", "foo"))))
}
it("should support fold function") {
def async(add: Int) = OFuture.successful(add)
val futures = (0 to 9) map {
idx => async(idx)
}
val folded = OFuture.fold(futures)(0) { _ + _ }
folded.value should equal(Some(Success(45)))
}
it("should support fold by composing") {
def futures = (0 to 9) map {
idx => OFuture.successful(idx)
}
val folded = futures.foldLeft(OFuture.successful(0)) {
case (fr, fa) => for (r <- fr; a <- fa) yield r + a
}
folded.value should equal(Some(Success(45)))
}
it("should show exception in the folding process") {
def async(add: Int):OFuture[Int] =
if (add == 6)
OFuture.failed(new IllegalArgumentException("shouldFoldResultsWithException: expected"))
else
OFuture.successful(add)
def futures = (0 to 9) map {
idx => async(idx)
}
val folded = OFuture.fold(futures)(0)(_ + _)
folded.value match {
case Some(Failure(ex)) =>
ex.getMessage should equal("shouldFoldResultsWithException: expected")
case _ =>
fail("value should be exception with message above due to `if (add == 6)\\n Future.failed(new IllegalArgumentException(\\"shouldFoldResultsWithException: expected\\"))`")
}
}
it("should return zero when the folding list is empty") {
val zero = OFuture.fold(List[OFuture[Int]]())(0)(_ + _)
zero.value should equal(Some(Success(0)))
}
it("should support reduce function") {
val futures = (0 to 9) map OFuture.successful
val reduced = OFuture.reduce(futures)(_ + _)
reduced.value should equal(Some(Success(45)))
}
it("should show exception in the reducing process") {
def async(add: Int):OFuture[Int] =
if (add == 6)
OFuture.failed(new IllegalArgumentException("shouldReduceResultsWithException: expected"))
else
OFuture.successful(add)
def futures = (0 to 9) map {
idx => async(idx)
}
val folded = OFuture.reduce(futures)(_ + _)
folded.value match {
case Some(Failure(ex)) =>
ex.getMessage should equal("shouldReduceResultsWithException: expected")
case _ =>
fail("value should be exception with message above due to `if (add == 6)\\n Future.failed(new IllegalArgumentException(\\"shouldReduceResultsWithException: expected\\"))`")
}
}
it("should throw exception when reducing an empty list") {
val reduced = OFuture.reduce(List[OFuture[Int]]())(_ + _)
reduced.value match {
case Some(Failure(ex)) => ex.getClass should equal(classOf[NoSuchElementException])
case _ => fail("should have got failure due to empty list reducing")
}
}
it("should support functions: filter, collect, fallback") {
val p = OPromise[String]()
val f = p.future
p.success("abc")
val newFuture = f.filter(s => s.equals("abc"))
newFuture.value.get.get should be("abc")
val newFuture2 = f.withFilter(s => s.equals("abc"))
newFuture2.value.get.get should be("abc")
val newFuture3 = f.filter(s => s.equals("abcd"))
newFuture3.value.get.failed.get shouldBe a[NoSuchElementException]
newFuture3.value.get.failed.get.getMessage should be("Future.filter predicate is not satisfied")
val newFuture4 = f.collect{
case "abc" => "OK"
}
newFuture4.value.get.get should be("OK")
val newFuture5 = f.collect {
case "abcd" => "OK"
}
newFuture5.value.get.failed.get shouldBe a[NoSuchElementException]
newFuture5.value.get.failed.get.getMessage should be("Future.collect partial function is not defined at: abc")
val newFuture6 = f.fallbackTo(OFuture.successful("haha"))
newFuture6.value.get.get should be("abc")
val p2 = OPromise[String]()
val f2 = p2.future
p2.failure(new RuntimeException("BadMan"))
val newFuture7 = f2.filter(s => s.equals("abc"))
newFuture7.value.get.failed.get shouldBe a[RuntimeException]
newFuture7.value.get.failed.get.getMessage should be("BadMan")
val newFuture8 = f2.collect{
case "abcd" => "OK"
}
newFuture8.value.get.failed.get shouldBe a[RuntimeException]
newFuture8.value.get.failed.get.getMessage should be("BadMan")
val newFuture9 = f2.fallbackTo(OFuture.successful("haha"))
newFuture9.value.get.get should be("haha")
}
it("should support functions: failed ,apply ,foreach, transform") {
val p = OPromise[String]()
val f = p.future
val func : Try[Throwable] => String = {
case Success(v) => v.getMessage
case Failure(t) => t.getMessage
}
p.success("abc")
var result = ""
f.foreach {
a => result = a.toUpperCase
}
result should be("ABC")
val transFuture = f.transform(s => s + "def", t => new IllegalArgumentException(t))
transFuture.value.get should be (Success("abcdef"))
f.failed.value.map(func).get should be ("Future.failed not completed with a throwable.")
f() should be ("abc")
val p1 = OPromise[String]()
val f1 = p1.future
the[NoSuchElementException] thrownBy {
f1()
} should have message "Future not completed."
p1.failure(new RuntimeException("BadMan"))
result = "aaa"
f1.foreach {
a => result = a.toUpperCase
}
result should be("aaa")
val transFuture1 = f1.transform(s => s + "def", t => new IllegalArgumentException(t))
transFuture1.value.get.failed.get shouldBe a[IllegalArgumentException]
transFuture1.value.get.failed.get.getCause shouldBe a[RuntimeException]
a [RuntimeException] should be thrownBy f1()
f1.failed.value map func getOrElse "" should be ("BadMan")
}
it("should support traversal") {
object counter {
var count = -1
def incAndGet() = counter.synchronized {
count += 2
count
}
}
val oddFutures = List.fill(100)(OFuture.successful(counter.incAndGet())).iterator
val traversed = OFuture.sequence(oddFutures)
traversed.value match {
case Some(Success(list:Iterator[Int])) => list.sum should equal(10000)
case _ => fail("should have got a list of integers")
}
val list = (1 to 100).toList
val traversedList = OFuture.traverse(list)(x => OFuture.successful(x * 2 - 1))
traversedList.value match {
case Some(Success(list:List[Int])) => list.sum should equal(10000)
case _ => fail("should have got a list of integers")
}
val iterator = (1 to 100).toList.iterator
val traversedIterator = OFuture.traverse(iterator)(x => OFuture.successful(x * 2 - 1))
traversedIterator.value match {
case Some(Success(list:Iterator[Int])) => list.sum should equal(10000)
case _ => fail("should have got a list of integers")
}
}
}
| paypal/squbs | squbs-pattern/src/test/scala/org/squbs/pattern/orchestration/OFutureSpec.scala | Scala | apache-2.0 | 14,042 |
package org.everpeace
package object scalamata{
// Set is equivalent to member ship function.
implicit def set2func[E](set: Set[E]): E => Boolean = set.contains(_)
// Automata on alphabet is equivalent to Seq[ฮฃ] => Boolean
implicit def automaton2func[Q, ฮฃ](a: Automata[Q, ฮฃ]): Seq[ฮฃ] => Boolean = a.accept(_)
// ฮตNFA and DFA can implicitly converted to NFA for compositions
implicit def ฮตNFA2NFA[Q,ฮฃ](a:ฮตNFA[Q, ฮฃ]):NFA[Q, ฮฃ] = a.asNFA
implicit def DFA2NFA[Q,ฮฃ](a:DFA[Q,ฮฃ]):NFA[Q, ฮฃ] = a.asNFA
} | everpeace/scalamata | core/src/main/scala/org/everpeace/package.scala | Scala | mit | 526 |
/* Title: Pure/System/command_line.scala
Author: Makarius
Support for Isabelle/Scala command line tools.
*/
package isabelle
object Command_Line
{
object Chunks
{
private def chunks(list: List[String]): List[List[String]] =
list.indexWhere(_ == "\\n") match {
case -1 => List(list)
case i =>
val (chunk, rest) = list.splitAt(i)
chunk :: chunks(rest.tail)
}
def unapplySeq(list: List[String]): Option[List[List[String]]] = Some(chunks(list))
}
var debug = false
def tool(body: => Int): Nothing =
{
val rc =
try { body }
catch {
case exn: Throwable =>
if (debug) exn.printStackTrace
Output.error_message(Exn.message(exn))
Exn.return_code(exn, 2)
}
sys.exit(rc)
}
def tool0(body: => Unit): Nothing = tool { body; 0 }
}
| MerelyAPseudonym/isabelle | src/Pure/System/command_line.scala | Scala | bsd-3-clause | 873 |
package com.prezi.haskell.gradle.io.packers
import java.io.File
/**
* Abstract zip unpacker interface
*/
trait Unpacker {
def unpack(zipFile: File, targetDir: File): Unit
}
| prezi/gradle-haskell-plugin | src/main/scala/com/prezi/haskell/gradle/io/packers/Unpacker.scala | Scala | apache-2.0 | 181 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx.compiler.codegenerator.opencl.hyperkernels
import cogx.compiler.codegenerator.opencl.fragments.{TensorElementAddressing, AddressingMode, HyperKernel}
import cogx.platform.types.{VirtualFieldRegister, FieldType}
import cogx.cogmath.geometry.Shape
import cogx.compiler.parser.op.SubspaceOp
/** Extract a portion of an input field. Unlike the Subfield operation, this
* kernel expects a constant origin for the extracted region (not a dynamic
* guide vector). Also, the extracted region must be within the bounds of the
* input field, so no border policy is brought into play.
*
* @author Dick Carter
*
* @param in The input virtual field register driving this kernel.
* @param operation The SubspaceOp for this operation, with its ranges.
* @param resultType The FieldType of the result of this kernel.
* @param addressMode The addressing mode of this kernel.
*/
private[cogx]
class SubspaceHyperKernel private (in: Array[VirtualFieldRegister],
operation: SubspaceOp,
resultType: FieldType,
addressMode: AddressingMode)
extends HyperKernel(operation, in, resultType, addressMode) {
val code = new StringBuffer
val inDim = in(0).fieldType.dimensions
code append " column = _column + " + operation.indices(inDim - 1).start + ";\n"
if (inDim >= 2)
code append " row = _row + " + operation.indices(inDim - 2).start + ";\n"
if (inDim >= 3)
code append " layer = _layer + " + operation.indices(inDim - 3).start + ";\n"
if (addressMode == TensorElementAddressing)
code append " tensorElement = _tensorElement;\n"
code append " @out0 = readNonlocal(@in0);\n"
addCode(code.toString)
// debugCompile
}
/** Factory object for creating kernels of this type.
*/
private[cogx]
object SubspaceHyperKernel extends HyperHelper {
/**
* Create a Hyperkernel that extracts a portion of an input field.
*
* @param in The input virtual field register driving this kernel.
* @param operation The SubspaceOp for this operation, with its ranges.
* @param resultType The FieldType of the result of this kernel.
* @return Synthesized hyperkernel.
*/
def apply(in: Array[VirtualFieldRegister], operation: SubspaceOp, resultType: FieldType): HyperKernel = {
require(in.length == 1)
val inType = in(0).fieldType
val indices = operation.indices
require(inType.dimensions == indices.length,
"The number of Range parameters for the subspace operator must match " +
"the dimensionality of the space.")
for (dim <- 0 until indices.length) {
val range = indices(dim)
require(range.step == 1, "Subspace operator requires a stride of 1")
require(range.start >= 0 && range.end <= inType.fieldShape(dim),
"\nRange in subspace operator exceeds size of field.\n" +
" range: " + range.start + " until " + range.end + "\n" +
" field: " + inType.fieldShape + "\n")
}
val newFieldShape = new Shape(operation.indices.map(_.length).toArray)
val expectedResultType = new FieldType(newFieldShape, inType.tensorShape, inType.elementType)
require(expectedResultType == resultType)
val addressing = bestAddressMode(in, resultType)
new SubspaceHyperKernel(in, operation, resultType, addressing)
}
} | hpe-cct/cct-core | src/main/scala/cogx/compiler/codegenerator/opencl/hyperkernels/SubspaceHyperKernel.scala | Scala | apache-2.0 | 4,014 |
package com.transport.domain.protocol
import com.transport.domain.otp.OTPGraphService
import com.vividsolutions.jts.geom.Coordinate
import org.opentripplanner.routing.core.{RoutingRequest, State, TraverseMode, TraverseModeSet}
import org.opentripplanner.routing.location.StreetLocation
import org.opentripplanner.routing.services.GraphService
import org.opentripplanner.routing.spt.GraphPath
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.Await
import scala.concurrent.duration._
class TripPlanSpec extends FlatSpec with Matchers with ScalaFutures {
implicit val graphService: GraphService = OTPGraphService.apply
OTPGraphService.get.waitTillAllGraphsLoaded()
"TransportRequest" should "execute the TripPlan request" in {
val req = new RoutingRequest(new TraverseModeSet(TraverseMode.BICYCLE))
req.routerId = "sf"
val tripPlanRequest = TransportRequest(req, TripPlan(List(
new GraphPath(new State(new StreetLocation("v1", new Coordinate(37.771291, -122.412597), "v1"), req), true),
new GraphPath(new State(new StreetLocation("v2", new Coordinate(37.765125, -122.409432), "v2"), req), true)
)))
//val tripResult = Await.result(tripPlanRequest.execute, 10 seconds)
}
}
| ksarath/transport-using-opentripplanner | src/test/scala/com/transport/domain/protocol/TripPlanSpec.scala | Scala | apache-2.0 | 1,277 |
package nars.logic.language
import java.util._
import nars.io.Symbols
import nars.storage.Memory
import IntersectionInt._
//remove if not needed
import scala.collection.JavaConversions._
import CompoundTerm._
object IntersectionInt {
/**
* Try to make a new compound from two components. Called by the inference rules.
* @param term1 The first compoment
* @param term2 The first compoment
* @param memory Reference to the memory
* @return A compound generated or a term it reduced to
*/
def make(term1: Term, term2: Term, memory: Memory): Term = {
var set: TreeSet[Term] = null
if ((term1.isInstanceOf[SetExt]) && (term2.isInstanceOf[SetExt])) {
set = new TreeSet[Term](term1.asInstanceOf[CompoundTerm].cloneComponents())
set.addAll(term2.asInstanceOf[CompoundTerm].cloneComponents())
return SetExt.make(set, memory)
}
if ((term1.isInstanceOf[SetInt]) && (term2.isInstanceOf[SetInt])) {
set = new TreeSet[Term](term1.asInstanceOf[CompoundTerm].cloneComponents())
set.retainAll(term2.asInstanceOf[CompoundTerm].cloneComponents())
return SetInt.make(set, memory)
}
if (term1.isInstanceOf[IntersectionInt]) {
set = new TreeSet[Term](term1.asInstanceOf[CompoundTerm].cloneComponents())
if (term2.isInstanceOf[IntersectionInt]) {
set.addAll(term2.asInstanceOf[CompoundTerm].cloneComponents())
} else {
set.add(term2.clone().asInstanceOf[Term])
}
} else if (term2.isInstanceOf[IntersectionInt]) {
set = new TreeSet[Term](term2.asInstanceOf[CompoundTerm].cloneComponents())
set.add(term1.clone().asInstanceOf[Term])
} else {
set = new TreeSet[Term]()
set.add(term1.clone().asInstanceOf[Term])
set.add(term2.clone().asInstanceOf[Term])
}
make(set, memory)
}
/**
* Try to make a new IntersectionExt. Called by StringParser.
* @return the Term generated from the arguments
* @param argList The list of components
* @param memory Reference to the memory
*/
def make(argList: ArrayList[Term], memory: Memory): Term = {
val set = new TreeSet[Term](argList)
make(set, memory)
}
/**
* Try to make a new compound from a set of components. Called by the public make methods.
* @param set a set of Term as compoments
* @param memory Reference to the memory
* @return the Term generated from the arguments
*/
def make(set: TreeSet[Term], memory: Memory): Term = {
if (set.size == 1) {
return set.first()
}
val argument = new ArrayList[Term](set)
val name = makeCompoundName(Symbols.INTERSECTION_INT_OPERATOR, argument)
val t = memory.nameToListedTerm(name)
if ((t != null)) t else new IntersectionInt(argument)
}
}
/**
* A compound term whose intension is the intersection of the extensions of its components
*/
class IntersectionInt private (arg: ArrayList[Term]) extends CompoundTerm(arg) {
/**
* Constructor with full values, called by clone
* @param n The name of the term
* @param cs Component list
* @param open Open variable list
* @param i Syntactic complexity of the compound
*/
private def this(n: String,
cs: ArrayList[Term],
con: Boolean,
i: Short) {
// super(n, cs, con, i)
this(cs)
setName(n)
this.isConstant_ = con
this.complexity = i
}
/**
* Clone an object
* @return A new object, to be casted into a Conjunction
*/
override def clone(): AnyRef = {
new IntersectionInt(name, cloneList(components).asInstanceOf[ArrayList[Term]], isConstant_, complexity)
}
/**
* Get the operator of the term.
* @return the operator of the term
*/
def operator(): String = Symbols.INTERSECTION_INT_OPERATOR
/**
* Check if the compound is communitative.
* @return true for communitative
*/
override def isCommutative(): Boolean = true
}
| printedheart/opennars | nars_lab_x/nars_scala/src/main/scala/nars/language/IntersectionInt.scala | Scala | agpl-3.0 | 3,886 |
/*
* Copyright (C) 2012 Julien Letrouit
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package frac
import org.specs2.mutable._
class DefaultDefinitionRepositorySpec extends Specification {
val sut = ExampleRepository
val kochSrc = "title = Koch lineangle = 60seed = FF = F+F--F+F"
"definition repository" should {
"have definitions" in {
sut.examples.size must be_>(0)
}
"have the cross as the first definition" in {
sut.examples(0).sourceText.replace("\r", "").replace("\n", "") must beEqualTo(kochSrc)
}
}
} | jletroui/frac | src/test/scala/frac/DefaultDefinitionRepositorySpec.scala | Scala | apache-2.0 | 1,063 |
package net.ceedubs.ficus
import com.typesafe.config.Config
import net.ceedubs.ficus.readers.{AllValueReaderInstances, ValueReader}
trait FicusConfig {
def config: Config
def as[A](path: String)(implicit reader: ValueReader[A]): A = reader.read(config, path)
def getAs[A](path: String)(implicit reader: ValueReader[Option[A]]): Option[A] = reader.read(config, path)
def apply[A](key: ConfigKey[A])(implicit reader: ValueReader[A]): A = as[A](key.path)
}
final case class SimpleFicusConfig(config: Config) extends FicusConfig
@deprecated(
"For implicits, use Ficus._ instead of FicusConfig._. Separately use ArbitraryTypeReader._ for macro-based derived reader instances. See https://github.com/ceedubs/ficus/issues/5",
since = "1.0.1/1.1.1")
object FicusConfig extends AllValueReaderInstances {
implicit def toFicusConfig(config: Config): FicusConfig = SimpleFicusConfig(config)
}
| ceedubs/ficus | src/main/scala/net/ceedubs/ficus/FicusConfig.scala | Scala | mit | 902 |
package mau.mauannotation
import scala.concurrent.ExecutionContext.Implicits.global
import mau._
import mau.test._
class CompoundIndexAnnotationTest extends MauRedisSpec("CompoundIndexAnnotationTest", true) {
describe("@compoundIndex class annotation") {
it("should allow to find by compoundIndex") {
val personMauRepo = Person.mauRepo
val person = Person(None, "Hans", 27)
val savedPerson = await(personMauRepo.save(person))
val id = savedPerson.id.get
val retrievedPeople = await(personMauRepo.findByNameAge("Hans", 27))
retrievedPeople should be(Seq(savedPerson))
val retrievedPerson = retrievedPeople(0)
retrievedPerson.name should be(person.name)
}
it("should allow to delete by compoundIndex") {
val personMauRepo = Person.mauRepo
val person = Person(None, "Hans", 27)
val savedPerson = await(personMauRepo.save(person))
val id = savedPerson.id.get
val deleteResult = await(personMauRepo.deleteByNameAge("Hans", 27))
deleteResult should be(1)
val retrievedPerson = await(personMauRepo.get(id))
retrievedPerson should be(None)
}
it("should allow to count by compoundIndex") {
val personMauRepo = Person.mauRepo
val person = Person(None, "Hans", 27)
val savedPerson = await(personMauRepo.save(person))
val id = savedPerson.id.get
val countResult = await(personMauRepo.countByNameAge("Hans", 27))
countResult should be(1)
}
}
@mauModel("Mau:Test:CompoundIndexAnnotationTest", false)
@sprayJson
@compoundIndex("NameAge", List("name", "age"))
case class Person(
id: Option[Id],
name: String,
age: Int)
}
| ExNexu/mau | mau-annotation/src/test/scala/mau/annotation/CompoundIndexAnnotationTest.scala | Scala | apache-2.0 | 1,692 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.algebird.matrix
import scala.collection.mutable.{ ArrayBuffer, Map => MMap }
import com.twitter.algebird.{ AdaptiveVector, Monoid }
/**
* A Matrix structure that is designed to hide moving between sparse and dense representations
* Initial support here is focused on a dense row count with a sparse set of columns
*/
abstract class AdaptiveMatrix[V: Monoid] extends Serializable {
def rows: Int
def cols: Int
def size = rows * cols
def getValue(position: (Int, Int)): V
def updateInto(buffer: ArrayBuffer[V]): Unit
def updated(position: (Int, Int), value: V): AdaptiveMatrix[V]
}
object AdaptiveMatrix {
def zero[V: Monoid](rows: Int, cols: Int) = fill(rows, cols)(implicitly[Monoid[V]].zero)
def fill[V: Monoid](rows: Int, cols: Int)(fill: V): AdaptiveMatrix[V] = {
SparseColumnMatrix(Vector.fill(rows)(AdaptiveVector.fill[V](cols)(fill)))
}
def empty[V: Monoid](): AdaptiveMatrix[V] = {
SparseColumnMatrix(IndexedSeq[AdaptiveVector[V]]())
}
// The adaptive monoid to swap between sparse modes.
implicit def monoid[V: Monoid]: Monoid[AdaptiveMatrix[V]] = new Monoid[AdaptiveMatrix[V]] {
private[this] final val innerZero = implicitly[Monoid[V]].zero
override def zero: AdaptiveMatrix[V] = SparseColumnMatrix[V](IndexedSeq[AdaptiveVector[V]]())
override def plus(a: AdaptiveMatrix[V], b: AdaptiveMatrix[V]) = sumOption(List(a, b)).get
private def denseInsert(rows: Int, cols: Int, buff: ArrayBuffer[V], remainder: Iterator[AdaptiveMatrix[V]]): Option[AdaptiveMatrix[V]] = {
remainder.foreach(_.updateInto(buff))
Some(DenseMatrix(rows, cols, buff))
}
private def denseUpdate(current: AdaptiveMatrix[V], remainder: Iterator[AdaptiveMatrix[V]]): Option[AdaptiveMatrix[V]] = {
val rows = current.rows
val cols = current.cols
val buffer = ArrayBuffer.fill(rows * cols)(innerZero)
current.updateInto(buffer)
denseInsert(rows, cols, buffer, remainder)
}
private def sparseUpdate(storage: IndexedSeq[MMap[Int, V]], other: SparseColumnMatrix[V]) = {
other.rowsByColumns.zipWithIndex.foreach {
case (contents, indx) =>
val curMap: MMap[Int, V] = storage(indx)
AdaptiveVector.toMap(contents).foreach {
case (col, value) =>
curMap.update(col, Monoid.plus(value, curMap.getOrElse(col, innerZero)))
}
}
}
private def goDense(rows: Int, cols: Int, storage: IndexedSeq[MMap[Int, V]], remainder: Iterator[AdaptiveMatrix[V]]): Option[AdaptiveMatrix[V]] = {
val buffer = ArrayBuffer.fill(rows * cols)(innerZero)
var row = 0
val iter = storage.iterator
while (iter.hasNext) {
val curRow = iter.next
curRow.foreach {
case (col, value) =>
buffer(row * cols + col) = value
}
row += 1
}
denseInsert(rows, cols, buffer, remainder)
}
override def sumOption(items: TraversableOnce[AdaptiveMatrix[V]]): Option[AdaptiveMatrix[V]] =
if (items.isEmpty) {
None
} else {
val iter = items.toIterator.buffered
val rows = iter.head.rows
val cols = iter.head.cols
val sparseStorage = (0 until rows).map{ _ => MMap[Int, V]() }.toIndexedSeq
while (iter.hasNext) {
val current = iter.next
current match {
case d @ DenseMatrix(_, _, _) => return denseUpdate(d, iter)
case s @ SparseColumnMatrix(_) =>
sparseUpdate(sparseStorage, s)
if (sparseStorage(0).size > current.cols / 4) {
return goDense(rows, cols, sparseStorage, iter)
}
}
}
// Need to still be sparse to reach here, so must unpack the MMap to be used again.
Some(SparseColumnMatrix.fromSeqMap(cols, sparseStorage))
}
}
}
| avibryant/algebird | algebird-core/src/main/scala/com/twitter/algebird/matrix/AdaptiveMatrix.scala | Scala | apache-2.0 | 4,434 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.dcct.codegenerator
import ch.usi.inf.l3.sana
import sana.dcct
import sana.ooj
import sana.brokenj
import sana.primj
import sana.tiny
import sana.calcj
import tiny.core.TransformationComponent
import tiny.dsl._
import tiny.ast._
import tiny.symbols._
import tiny.ast.Implicits._
import tiny.source.Position
import tiny.debug.logger
import calcj.ast._
import calcj.ast.operators.{Inc, Dec}
import tiny.errors.ErrorReporting.{error,warning}
import primj.ast.{MethodDefApi => _, TreeUtils => _, ProgramApi => _, _}
import primj.typechecker.ShapeCheckerComponent
import ooj.symbols._
import ooj.modifiers.Ops._
import ooj.errors.ErrorCodes._
import ooj.ast._
import ooj.names.StdNames._
import ooj.ast.TreeExtractors._
trait CodeGenComponent extends TransformationComponent[Tree, String] {
def codegen: Tree => String
}
// TODO move the task of formatting strings to some other module or something!
@component
trait ProgramCodeGenComponent extends CodeGenComponent{
(prg: primj.ast.ProgramApi) => {
val dataDefScript = prg.members.foldLeft("")((c,x) => c + codegen(x) + "\\n")
val prgName = prg.sourceName.toString()
val keyspaceName = prgName.substring(prgName.lastIndexOf("/") + 1, prgName.lastIndexOf("."))
s"DROP KEYSPACE IF EXISTS $keyspaceName \\n" +
s"CREATE KEYSPACE $keyspaceName \\n" +
"WITH replication = {'class': 'SimpleStrategy', 'replication_factor' : 3}; \\n" +
s"USE $keyspaceName; \\n" +
dataDefScript
}
}
@component
trait EntityCodeGenComponent extends CodeGenComponent{
(entity: ooj.ast.ClassDefApi) => {
val entityDef = entity.body.members.foldLeft(s"CREATE TABLE ${entity.name} (\\n")( (c, x) => {
val field = x.asInstanceOf[ValDefApi]
val fName = field.name
val fType = field.tpt.asInstanceOf[TypeUseApi].name.toString().toLowerCase
c + "\\t" + fName + " " + fType + ",\\n"
})
entityDef + ")"
}
}
| amanjpro/languages-a-la-carte | dcct/src/main/scala/codegen/codegenerators.scala | Scala | bsd-3-clause | 3,495 |
package utils.silhouette
import models.{Manager, TokenManager}
import com.mohiva.play.silhouette.core.Environment
import com.mohiva.play.silhouette.contrib.authenticators.CookieAuthenticator
trait SilhouetteAdminController extends SilhouetteController[Manager, TokenManager] {
lazy val identityService = new ManagerService
lazy val passwordInfoDAO = new PasswordInfoAdminDAO
lazy val tokenService = new TokenManagerService
implicit lazy val env = Environment[Manager, CookieAuthenticator](
identityService,
authenticatorService,
Map(credentialsProvider.id -> credentialsProvider),
eventBus
)
} | vtapadia/crickit | modules/admin/app/utils/silhouette/SilhouetteAdminController.scala | Scala | apache-2.0 | 611 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.projection
import java.net.URLEncoder
import akka.actor.ActorRef
import akka.actor.Props
import akka.cluster.ddata.DistributedData
import akka.actor.Terminated
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.annotation.ApiMayChange
import akka.cluster.ddata.LWWMap
import akka.cluster.ddata.LWWMapKey
import akka.cluster.ddata.Replicator.Changed
import akka.cluster.ddata.Replicator.Subscribe
import akka.cluster.ddata.Replicator.Update
import akka.cluster.ddata.Replicator.UpdateFailure
import akka.cluster.ddata.Replicator.UpdateSuccess
import akka.cluster.ddata.Replicator.WriteConsistency
import akka.cluster.ddata.Replicator.WriteMajority
import akka.cluster.ddata.SelfUniqueAddress
import com.lightbend.lagom.projection.Started
import com.lightbend.lagom.projection.State
import com.lightbend.lagom.projection.Status
import com.lightbend.lagom.projection.Stopped
import scala.concurrent.duration._
import com.lightbend.lagom.projection.ProjectionSerializable
@ApiMayChange
object ProjectionRegistryActor {
def props = Props(new ProjectionRegistryActor)
type WorkerKey = String // a WorkerKey is a unique String representing WorkerCoordinates
type ProjectionName = String
case class WorkerRequestCommand(coordinates: WorkerCoordinates, requestedStatus: Status)
case class ProjectionRequestCommand(projectionName: ProjectionName, requestedStatus: Status)
case class RegisterProjection(projectionName: ProjectionName, tagNames: Set[String])
case class ReportForDuty(coordinates: WorkerCoordinates)
case class WorkerCoordinates(projectionName: ProjectionName, tagName: String) extends ProjectionSerializable {
val asKey: WorkerKey = s"$projectionName-$tagName"
val workerActorName: String = URLEncoder.encode(asKey, "utf-8")
val supervisingActorName: String = URLEncoder.encode(s"backoff-$asKey", "utf-8")
}
// Read-Only command. Returns `State` representing the state of
// the projection workers as currently seen in this node. It contains both the
// requested and the observed status for each worker (both are eventually consistent
// values since both may have been edited concurrently in other nodes).
case object GetState
}
class ProjectionRegistryActor extends Actor with ActorLogging {
import ProjectionRegistryActor._
val replicator: ActorRef = DistributedData(context.system).replicator
implicit val node: SelfUniqueAddress = DistributedData(context.system).selfUniqueAddress
private val projectionConfig: ProjectionConfig = ProjectionConfig(context.system.settings.config)
// All usages os `data` in this actor are unaffected by `UpdateTimeout` (see
// https://github.com/lagom/lagom/pull/2208). In general uses, using WriteMajority(5 sec) could be an issue
// in big clusters.
val writeConsistency: WriteConsistency = WriteMajority(timeout = projectionConfig.writeMajorityTimeout)
// (a) Replicator contains data of all workers (requested and observed status, plus a name index)
private val RequestedStatusDataKey: LWWMapKey[WorkerCoordinates, Status] =
LWWMapKey[WorkerCoordinates, Status]("projection-registry-requested-status")
private val ObservedStatusDataKey: LWWMapKey[WorkerCoordinates, Status] =
LWWMapKey[WorkerCoordinates, Status]("projection-registry-observed-status")
replicator ! Subscribe(RequestedStatusDataKey, self)
replicator ! Subscribe(ObservedStatusDataKey, self)
// (b) Keep a local copy to simplify the implementation of some ops
var requestedStatusLocalCopy: Map[WorkerCoordinates, Status] = Map.empty[WorkerCoordinates, Status]
var observedStatusLocalCopy: Map[WorkerCoordinates, Status] = Map.empty[WorkerCoordinates, Status]
// (c) Actor indices contain only data of workers running locally
var actorIndex: Map[WorkerCoordinates, ActorRef] = Map.empty[WorkerCoordinates, ActorRef]
// required to handle Terminate(deadActor)
var reversedActorIndex: Map[ActorRef, WorkerCoordinates] = Map.empty[ActorRef, WorkerCoordinates]
// (d) this index helps locate worker coordinates given a project name. It is not a CRDT assuming
// all nodes know all projections and use the same tag names. This is filled when projection
// drivers register the projection (which happens even before ClusterDistribution is started in
// the local node).
var nameIndex: Map[ProjectionName, Set[WorkerCoordinates]] = Map.empty[ProjectionName, Set[WorkerCoordinates]]
// (e) Users may request a status before the projection was registered, in that case, we stash
// the request in this map.
var unknownProjections: Map[ProjectionName, Status] = Map.empty[ProjectionName, Status]
val DefaultRequestedStatus: Status = projectionConfig.defaultRequestedStatus
override def receive: Receive = {
case ReportForDuty(coordinates) =>
log.debug(s"Registering worker $coordinates to [${sender().path.toString}]")
// keep track
actorIndex = actorIndex.updated(coordinates, sender())
reversedActorIndex = reversedActorIndex.updated(sender, coordinates)
// when worker registers, we must reply with the requested status (if it's been set already, or DefaultInitialStatus if not).
val initialStatus = requestedStatusLocalCopy.getOrElse(coordinates, DefaultRequestedStatus)
log.debug(s"Setting initial status [$initialStatus] on worker $coordinates [${sender().path.toString}]")
sender ! initialStatus
// watch
context.watch(sender)
case GetState =>
sender ! State.fromReplicatedData(
nameIndex,
requestedStatusLocalCopy,
observedStatusLocalCopy,
DefaultRequestedStatus,
Stopped // unless observed somewhere (and replicated), we consider a worker stopped.
)
case RegisterProjection(projectionName, tagNames) =>
log.debug(s"Registering projection $projectionName for tags $tagNames.")
nameIndex += (projectionName -> tagNames.map {
WorkerCoordinates(projectionName, _)
})
// If we have stashed requestsfor this projection name, unstash them:
unknownProjections.get(projectionName).foreach { requestedStatus =>
self ! ProjectionRequestCommand(projectionName, requestedStatus)
unknownProjections -= projectionName
}
// XyzRequestCommand's come from `ProjectionRegistry` and contain a requested Status
case command: WorkerRequestCommand =>
log.debug(s"Propagating request $command.")
updateStateChangeRequests(command.coordinates, command.requestedStatus)
case command: ProjectionRequestCommand =>
log.debug(s"Propagating request $command.")
val projectionWorkers: Option[Set[WorkerCoordinates]] = nameIndex.get(command.projectionName)
projectionWorkers match {
case Some(workerSet) =>
workerSet.foreach(coordinates => updateStateChangeRequests(coordinates, command.requestedStatus))
case None => unknownProjections += (command.projectionName -> command.requestedStatus)
}
// Bare Status come from worker and contain an observed Status
case observedStatus: Status =>
log.debug(s"Observed [${sender().path.toString}] as $observedStatus.")
reversedActorIndex.get(sender()) match {
case Some(workerName) => updateObservedStates(workerName, observedStatus)
case None => log.error(s"Unknown actor [${sender().path.toString}] reports status $observedStatus.")
}
case UpdateSuccess(_, _) => //noop: the update op worked nicely, nothing to see here
// There's three types of UpdateFailure and 3 target CRDTs totalling 9 possible cases of
// which only UpdateTimeout(_,_) is relevant.
// case UpdateTimeout(ObservedStatusDataKey, _) =>
// the observed status changes very rarely, but when it changes it may change multiple times in a short
// period. The fast/often changes probably happen on a cluster rollup, up/down-scale, etc... In any case,
// data eventually will become stable (unchanging)in which case data is eventually gossiped and last writer wins.
// case UpdateTimeout(RequestedStatusDataKey, _) =>
// the request status changes very rarely. It is safe to ignore timeouts when using WriteMajority because
// data is eventually gossiped.
// case UpdateTimeout(NameIndexDataKey, _) =>
// the data in the nameIndex is only-grow until reaching a full hardcoded representation. It is safe to
// ignore timeouts when using WriteMajority because data is eventually gossiped.
// In any other UpdateFailure cases, noop:
// - ModifyFailure: using LWWMap with `put` as the modify operation will never fail, latest wins.
// - StoreFailure: doesn't apply because we don't use durable CRDTs
case _: UpdateFailure[_] =>
// Changed is not sent for every single change but, instead, it is batched on the replicator. This means
// multiple changes will be notified at once. This is especially relevant when joining a cluster where
// instead of getting an avalanche of Changed messages with all the history of the CRDT only a single
// message with the latest state is received.
case changed @ Changed(RequestedStatusDataKey) => {
val replicatedEntries = changed.get(RequestedStatusDataKey).entries
val diffs: Set[(WorkerCoordinates, Status)] = replicatedEntries.toSet.diff(requestedStatusLocalCopy.toSet)
// when the requested status changes, we must forward the new value to the appropriate actor
// if it's a one of the workers in the local actorIndex
diffs
.foreach {
case (workerName, requestedStatus) =>
log.debug(s"Remotely requested worker [$workerName] as [$requestedStatus].")
actorIndex.get(workerName).foreach { workerRef =>
log.debug(
s"Setting requested status [$requestedStatus] on worker $workerName [${workerRef.path.toString}]"
)
workerRef ! requestedStatus
}
}
requestedStatusLocalCopy = replicatedEntries
}
case changed @ Changed(ObservedStatusDataKey) =>
observedStatusLocalCopy = changed.get(ObservedStatusDataKey).entries
case Terminated(deadActor) =>
log.debug(s"Worker ${deadActor.path.name} died. Marking it as Stopped.")
// when a watched actor dies, we mark it as stopped. It will eventually
// respawn (thanks to EnsureActive) and come back to it's requested status.
reversedActorIndex.get(deadActor).foreach { coordinates =>
updateObservedStates(coordinates, Stopped)
}
// ... and then update indices and stop watching
actorIndex = actorIndex - reversedActorIndex(deadActor)
reversedActorIndex = reversedActorIndex - deadActor
}
private def updateStateChangeRequests(coordinates: WorkerCoordinates, requested: Status): Unit = {
replicator ! Update(RequestedStatusDataKey, LWWMap.empty[WorkerCoordinates, Status], writeConsistency)(
_.:+(coordinates -> requested)
)
}
private def updateObservedStates(coordinates: WorkerCoordinates, status: Status): Unit = {
replicator ! Update(ObservedStatusDataKey, LWWMap.empty[WorkerCoordinates, Status], writeConsistency)(
_.:+(coordinates -> status)
)
}
}
| TimMoore/lagom | projection/core/src/main/scala/com/lightbend/lagom/internal/projection/ProjectionRegistryActor.scala | Scala | apache-2.0 | 11,440 |
package org.jetbrains.sbt.annotator.dependency.ui
import javax.swing.{Icon, JComponent}
import com.intellij.ide.wizard.Step
import com.intellij.openapi.project.Project
import com.intellij.util.ui.JBUI
import org.jetbrains.sbt.annotator.dependency.DependencyPlaceInfo
import org.jetbrains.plugins.scala.extensions
/**
* Created by afonichkin on 7/19/17.
*/
private class SbtPossiblePlacesStep(wizard: SbtArtifactSearchWizard, project: Project, fileLines: Seq[DependencyPlaceInfo])
extends Step {
val panel = new SbtPossiblePlacesPanel(project, wizard, fileLines)
override def _init(): Unit = {
wizard.setTitle("Place to add dependency")
wizard.setSize(JBUI.scale(800), JBUI.scale(750))
panel.myResultList.clearSelection()
extensions.inWriteAction {
panel.myCurEditor.getDocument.setText("// Select a place from the list above to enable this preview")
}
panel.updateUI()
}
override def getComponent: JComponent = panel
override def _commit(finishChosen: Boolean): Unit = {
if (finishChosen) {
wizard.resultFileLine = Option(panel.myResultList.getSelectedValue)
}
}
override def getIcon: Icon = null
override def getPreferredFocusedComponent: JComponent = panel
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/annotator/dependency/ui/SbtPossiblePlacesStep.scala | Scala | apache-2.0 | 1,237 |
import atto._, Atto._
import cats.implicits._
import java.lang.String
import scala.{ Boolean, Char, Double, List, App }
import scala.Predef.charWrapper
object JsonExample extends Whitespace {
// Json AST
sealed trait JValue
case object JNull extends JValue
final case class JBoolean(value: Boolean) extends JValue
final case class JString(value: String) extends JValue
final case class JNumber(value: Double) extends JValue
final case class JArray(values: List[JValue]) extends JValue
final case class JObject(values: List[(String, JValue)]) extends JValue
// Invariant constructors
def jNull: JValue = JNull
def jBoolean(value: Boolean): JValue = JBoolean(value)
def jString(value: String): JValue = JString(value)
def jNumber(value: Double): JValue = JNumber(value)
def jArray(values: List[JValue]): JValue = JArray(values)
def jObject(values: List[(String, JValue)]): JValue = JObject(values)
// Bracketed, comma-separated sequence, internal whitespace allowed
def seq[A](open: Char, p: Parser[A], close: Char): Parser[List[A]] =
char(open).t ~> sepByT(p, char(',')) <~ char(close)
// Colon-separated pair, internal whitespace allowed
lazy val pair: Parser[(String, JValue)] =
pairByT(stringLiteral, char(':'), jexpr)
// Json Expression
lazy val jexpr: Parser[JValue] = delay {
stringLiteral -| jString |
seq('{', pair, '}') -| jObject |
seq('[', jexpr, ']') -| jArray |
double -| jNumber |
string("null") >| jNull |
string("true") >| jBoolean(true) |
string("false") >| jBoolean(false)
}
}
// Some extre combinators and syntax for coping with whitespace. Something like this might be
// useful in core but it needs some thought.
trait Whitespace {
// Syntax for turning a parser into one that consumes trailing whitespace
implicit class TokenOps[A](self: Parser[A]) {
def t: Parser[A] =
self <~ takeWhile(c => c.isSpaceChar || c === '\\n')
}
// Delimited list
def sepByT[A](a: Parser[A], b: Parser[_]): Parser[List[A]] =
sepBy(a.t, b.t)
// Delimited pair, internal whitespace allowed
def pairByT[A,B](a: Parser[A], delim: Parser[_], b: Parser[B]): Parser[(A,B)] =
pairBy(a.t, delim.t, b)
}
object JsonTest extends App {
lazy val text = """
[
{
"id": 0,
"guid": "24e3bb35-f5da-47ac-910e-1aa6568938c8",
"isActive": true,
"balance": "$1,880.00",
"picture": "http://placehold.it/32x32",
"age": 21,
"name": "Gallegos Rich",
"gender": "male",
"company": "ZORROMOP",
"email": "[email protected]",
"phone": "+1 (996) 457-2721",
"address": "599 Dupont Street, Healy, New Mexico, 5501",
"about": "Adipisicing magna Lorem excepteur non sint aute sint anim exercitation ullamco voluptate eu dolor non. Sint fugiat incididunt consequat aliqua amet elit. Sint cillum nostrud aliqua minim culpa est.\\r\\n",
"registered": "2014-03-21T00:07:44 +07:00",
"latitude": 81,
"longitude": -133,
"tags": [
"mollit",
"anim",
"ad",
"laboris",
"quis",
"magna",
"reprehenderit"
],
"friends": [
{
"id": 0,
"name": "Stacy Burks"
},
{
"id": 1,
"name": "Higgins Weber"
},
{
"id": 2,
"name": "Elvira Blair"
}
],
"greeting": "Hello, Gallegos Rich! You have 4 unread messages.",
"favoriteFruit": "banana"
},
{
"id": 1,
"guid": "68ff4ed1-54b6-44b0-a3cc-5ac4a0785f28",
"isActive": false,
"balance": "$2,576.00",
"picture": "http://placehold.it/32x32",
"age": 36,
"name": "Kelley Cooke",
"gender": "male",
"company": "LIMOZEN",
"email": "[email protected]",
"phone": "+1 (858) 479-3389",
"address": "925 Howard Place, Wildwood, Pennsylvania, 992",
"about": "Amet aliqua ex in occaecat. Nostrud voluptate dolore elit deserunt enim enim dolor excepteur non enim. In commodo aute Lorem et nisi excepteur id nisi amet nisi. Ut Lorem consectetur id culpa labore tempor adipisicing eu ea quis. Qui aliqua eiusmod aute cupidatat tempor commodo incididunt amet enim eiusmod. Non qui est deserunt qui minim cillum commodo magna irure consequat.\\r\\n",
"registered": "2014-04-11T07:27:15 +07:00",
"latitude": 7,
"longitude": 78,
"tags": [
"minim",
"duis",
"duis",
"minim",
"sit",
"ea",
"incididunt"
],
"friends": [
{
"id": 0,
"name": "Brandi Trevino"
},
{
"id": 1,
"name": "Fuentes Daugherty"
},
{
"id": 2,
"name": "Gillespie Cash"
}
],
"greeting": "Hello, Kelley Cooke! You have 1 unread messages.",
"favoriteFruit": "strawberry"
},
{
"id": 2,
"guid": "201e59af-1b60-4a61-9d14-fb6db351b049",
"isActive": false,
"balance": "$1,339.00",
"picture": "http://placehold.it/32x32",
"age": 27,
"name": "Hope Delacruz",
"gender": "female",
"company": "COMCUBINE",
"email": "[email protected]",
"phone": "+1 (908) 467-2395",
"address": "216 Logan Street, Yardville, Vermont, 8018",
"about": "Et elit proident ut aute ea qui aute id elit. Sunt aliquip ad ipsum sit ut amet do nulla. Lorem aliquip voluptate Lorem veniam. Ea id reprehenderit et enim aliquip. Elit voluptate magna amet nulla excepteur aliquip. Mollit fugiat veniam Lorem dolore nulla sint et pariatur tempor.\\r\\n",
"registered": "2014-02-13T00:55:00 +08:00",
"latitude": 24,
"longitude": -54,
"tags": [
"mollit",
"nostrud",
"proident",
"aliquip",
"aliquip",
"do",
"excepteur"
],
"friends": [
{
"id": 0,
"name": "Durham Dunlap"
},
{
"id": 1,
"name": "Penny Dyer"
},
{
"id": 2,
"name": "Louella Warren"
}
],
"greeting": "Hello, Hope Delacruz! You have 9 unread messages.",
"favoriteFruit": "apple"
},
{
"id": 3,
"guid": "1fe07ccf-31d8-4a40-a5cb-8d29cce48630",
"isActive": false,
"balance": "$2,556.00",
"picture": "http://placehold.it/32x32",
"age": 34,
"name": "Lopez Cross",
"gender": "male",
"company": "QUONATA",
"email": "[email protected]",
"phone": "+1 (888) 483-2717",
"address": "222 Visitation Place, Katonah, Oklahoma, 833",
"about": "Aute minim exercitation sint sunt nisi proident. Adipisicing in duis officia ea qui aute. Sit officia duis consectetur aute cupidatat. Cillum reprehenderit elit veniam elit labore non ex officia. Elit est et nostrud ea minim mollit pariatur cillum fugiat magna nisi voluptate cillum officia.\\r\\n",
"registered": "2014-02-11T00:02:49 +08:00",
"latitude": 77,
"longitude": -101,
"tags": [
"commodo",
"eu",
"nulla",
"Lorem",
"laboris",
"exercitation",
"incididunt"
],
"friends": [
{
"id": 0,
"name": "Maritza Potter"
},
{
"id": 1,
"name": "Schmidt Todd"
},
{
"id": 2,
"name": "Chasity Carroll"
}
],
"greeting": "Hello, Lopez Cross! You have 5 unread messages.",
"favoriteFruit": "banana"
},
{
"id": 4,
"guid": "a44846f7-7204-445d-b11f-80c020262165",
"isActive": false,
"balance": "$2,388.00",
"picture": "http://placehold.it/32x32",
"age": 29,
"name": "Valentine Nguyen",
"gender": "male",
"company": "ECRATER",
"email": "[email protected]",
"phone": "+1 (927) 579-3317",
"address": "469 Tapscott Avenue, Titanic, Kentucky, 5275",
"about": "Amet ut veniam ullamco voluptate. Qui non aliqua irure ipsum aute. Velit aute deserunt est Lorem velit fugiat consequat ullamco cupidatat culpa eu. Aute sunt et esse laboris enim dolore deserunt veniam aliquip consectetur. Consectetur eiusmod laboris officia proident amet ut nostrud nostrud tempor veniam fugiat.\\r\\n",
"registered": "2014-04-11T03:12:48 +07:00",
"latitude": -82,
"longitude": 22,
"tags": [
"quis",
"eu",
"anim",
"aliquip",
"ullamco",
"occaecat",
"dolor"
],
"friends": [
{
"id": 0,
"name": "Dickson Santos"
},
{
"id": 1,
"name": "Tracey Mckenzie"
},
{
"id": 2,
"name": "Avila Terry"
}
],
"greeting": "Hello, Valentine Nguyen! You have 10 unread messages.",
"favoriteFruit": "banana"
},
{
"id": 5,
"guid": "50264b3b-0395-429b-8ec9-8c41821e84c4",
"isActive": true,
"balance": "$1,860.00",
"picture": "http://placehold.it/32x32",
"age": 32,
"name": "Holland Gibson",
"gender": "male",
"company": "PEARLESSA",
"email": "[email protected]",
"phone": "+1 (953) 442-3713",
"address": "909 Cass Place, Lithium, Alabama, 3836",
"about": "Sit pariatur exercitation tempor labore est. Incididunt fugiat pariatur amet in pariatur do magna pariatur id. Ad adipisicing est ad tempor reprehenderit aliqua quis esse nulla dolor. Magna consequat dolore culpa dolor amet excepteur deserunt minim consequat non cupidatat aliqua enim.\\r\\n",
"registered": "2014-03-03T12:16:22 +08:00",
"latitude": -47,
"longitude": 66,
"tags": [
"duis",
"cillum",
"irure",
"ut",
"consequat",
"sint",
"laboris"
],
"friends": [
{
"id": 0,
"name": "Hallie Thomas"
},
{
"id": 1,
"name": "Adele Joseph"
},
{
"id": 2,
"name": "Gayle Poole"
}
],
"greeting": "Hello, Holland Gibson! You have 10 unread messages.",
"favoriteFruit": "banana"
},
{
"id": 6,
"guid": "ddff18fc-6d88-4200-bf23-bf68b711eada",
"isActive": true,
"balance": "$2,160.00",
"picture": "http://placehold.it/32x32",
"age": 27,
"name": "Gibson Lane",
"gender": "male",
"company": "COMVEYOR",
"email": "[email protected]",
"phone": "+1 (990) 599-3696",
"address": "399 Huntington Street, Brownsville, Florida, 7576",
"about": "Magna anim enim aute proident duis sint. Culpa sint ipsum elit consectetur et. Quis nostrud occaecat consequat sint cillum ea eiusmod velit ex fugiat aliqua reprehenderit non minim. Anim ad nisi et Lorem ullamco nulla eiusmod qui pariatur qui laborum deserunt cupidatat.\\r\\n",
"registered": "2014-03-27T14:50:16 +07:00",
"latitude": 8,
"longitude": 86,
"tags": [
"ad",
"id",
"ad",
"duis",
"commodo",
"consectetur",
"Lorem"
],
"friends": [
{
"id": 0,
"name": "Doreen Macdonald"
},
{
"id": 1,
"name": "Geraldine Buchanan"
},
{
"id": 2,
"name": "Imelda Mclaughlin"
}
],
"greeting": "Hello, Gibson Lane! You have 5 unread messages.",
"favoriteFruit": "strawberry"
}
]
""".trim
}
| tpolecat/atto | modules/docs/src/main/scala/json.scala | Scala | mit | 12,650 |
package com.datamountaineer.streamreactor.connect.pulsar
import com.datamountaineer.streamreactor.connect.pulsar.config.{PulsarConfigConstants, PulsarSinkConfig, PulsarSinkSettings}
import org.apache.pulsar.client.api.CompressionType
import org.apache.pulsar.client.api.ProducerConfiguration.MessageRoutingMode
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
import scala.collection.JavaConverters._
/**
* Created by [email protected] on 23/01/2018.
* stream-reactor
*/
class ProducerConfigFactoryTest extends AnyWordSpec with Matchers {
val pulsarTopic = "persistent://landoop/standalone/connect/kafka-topic"
"should create a SinglePartition with batching" in {
val config = PulsarSinkConfig(Map(
PulsarConfigConstants.HOSTS_CONFIG -> "pulsar://localhost:6650",
PulsarConfigConstants.KCQL_CONFIG -> s"INSERT INTO $pulsarTopic SELECT * FROM kafka_topic BATCH = 10 WITHPARTITIONER = SinglePartition WITHCOMPRESSION = ZLIB WITHDELAY = 1000"
).asJava)
val settings = PulsarSinkSettings(config)
val producerConfig = ProducerConfigFactory("test", settings.kcql)
producerConfig(pulsarTopic).getBatchingEnabled shouldBe true
producerConfig(pulsarTopic).getBatchingMaxMessages shouldBe 10
producerConfig(pulsarTopic).getBatchingMaxPublishDelayMs shouldBe 1000
producerConfig(pulsarTopic).getCompressionType shouldBe CompressionType.ZLIB
producerConfig(pulsarTopic).getMessageRoutingMode shouldBe MessageRoutingMode.SinglePartition
}
"should create a CustomPartition with no batching and no compression" in {
val config = PulsarSinkConfig(Map(
PulsarConfigConstants.HOSTS_CONFIG -> "pulsar://localhost:6650",
PulsarConfigConstants.KCQL_CONFIG -> s"INSERT INTO $pulsarTopic SELECT * FROM kafka_topic WITHPARTITIONER = CustomPartition"
).asJava)
val settings = PulsarSinkSettings(config)
val producerConfig = ProducerConfigFactory("test", settings.kcql)
producerConfig(pulsarTopic).getBatchingEnabled shouldBe false
producerConfig(pulsarTopic).getCompressionType shouldBe CompressionType.NONE
producerConfig(pulsarTopic).getMessageRoutingMode shouldBe MessageRoutingMode.CustomPartition
}
"should create a roundrobin with batching and no compression no delay" in {
val config = PulsarSinkConfig(Map(
PulsarConfigConstants.HOSTS_CONFIG -> "pulsar://localhost:6650",
PulsarConfigConstants.KCQL_CONFIG -> s"INSERT INTO $pulsarTopic SELECT * FROM kafka_topic BATCH = 10 WITHPARTITIONER = ROUNDROBINPARTITION"
).asJava)
val settings = PulsarSinkSettings(config)
val producerConfig = ProducerConfigFactory("test", settings.kcql)
producerConfig(pulsarTopic).getBatchingEnabled shouldBe true
producerConfig(pulsarTopic).getBatchingEnabled shouldBe true
producerConfig(pulsarTopic).getBatchingMaxMessages shouldBe 10
producerConfig(pulsarTopic).getBatchingMaxPublishDelayMs shouldBe 10
producerConfig(pulsarTopic).getCompressionType shouldBe CompressionType.NONE
producerConfig(pulsarTopic).getMessageRoutingMode shouldBe MessageRoutingMode.RoundRobinPartition
}
}
| datamountaineer/stream-reactor | kafka-connect-pulsar/src/test/scala/com/datamountaineer/streamreactor/connect/pulsar/ProducerConfigFactoryTest.scala | Scala | apache-2.0 | 3,171 |
package com.monsanto.arch.kamon.prometheus.converter
import com.monsanto.arch.kamon.prometheus.PrometheusSettings
import com.monsanto.arch.kamon.prometheus.metric.{Metric, MetricFamily, MetricValue, PrometheusType}
import kamon.metric.SubscriptionsDispatcher.TickMetricSnapshot
import kamon.metric.instrument._
import kamon.metric.{Entity, SingleInstrumentEntityRecorder}
import kamon.util.MilliTimestamp
/** Maps metrics from Kamon to a structure suitable for Prometheus.
*
* Kamon tracks entities associated with one or more instruments, e.g. a
* counter or a gauge. In the Kamon data model, each entity is uniquely
* identified by a category, a name, and a set of tags (arbitrary string
* key-value mappings). Kamon supports four different types of instruments:
* counters, histograms, min-max counters, and gauges. This differs from
* Prometheus' data model.
*
* In Prometheus, there is no concept of a category, just metric names.
* Prometheus also accepts something like tags, called labels. Prometheus
* does support several different types: counters, gauges, histograms, and
* summaries. In these ways, Prometheus is similar to Kamon. However, in
* Prometheus, each metric can only be of one type. Additionally, metrics
* with different labels can have the same name (in fact, labels are used to
* support the more complex types).
*
* This class exists to bridge the two different data models.
*
* @see [[http://prometheus.io/docs/concepts/data_model/ Prometheus: Data Model]]
* @see [[http://prometheus.io/docs/concepts/metric_types/ Prometheus: Metric Types]]
* @see [[http://kamon.io/core/metrics/core-concepts/ Kamon: Core Concepts]]
* @see [[http://kamon.io/core/metrics/instruments/ Kamon: Metric Recording Instruments]]
*
* @author Daniel Solano Gรณmez
*/
class SnapshotConverter(settings: PrometheusSettings) {
import SnapshotConverter._
/** Transforms metric snapshots before being converted. */
val preprocess: Preprocessor = new DefaultPreprocessor
/** Transforms metric families post-conversion. */
val postprocess: Postprocessor = new DefaultPostprocessor
/** Converts a metric snapshot into a sequence of metric families. */
def apply(tick: TickMetricSnapshot): Seq[MetricFamily] = {
type Category = String
type Name = String
// first, regroup data from (name, category, tags) โ snapshot to category โ name โ [(tags, snapshot)]
val data = for ((Entity(name, category, rawTags), snapshot) โ tick.metrics) yield {
// also, while we are here, letโs do what we need to do to the tags
val fullTags = rawTags ++ settings.labels + (KamonCategoryLabel โ category) + (KamonNameLabel โ name)
val mungedTags = fullTags.map(entry โ Mungers.asLabelName(entry._1) โ entry._2)
preprocess(MetricSnapshot(category, name, mungedTags, snapshot))
}
val byCategoryData = data.groupBy(_.category)
byCategoryData.flatMap { case (category, categoryData) โ
category match {
case SingleInstrumentEntityRecorder.Counter โ
categoryData.groupBy(_.name).map { case (_, snapshots) โ makeCounterMetricFamily(snapshots, tick.to) }
case SingleInstrumentEntityRecorder.Histogram โ
categoryData.groupBy(_.name).map { case (_, snapshots) โ makeHistogramMetricFamily(snapshots, tick.to) }
case SingleInstrumentEntityRecorder.MinMaxCounter โ
categoryData.groupBy(_.name).map { case (_, snapshots) โ makeMinMaxCounterMetricFamily(snapshots, tick.to) }
case SingleInstrumentEntityRecorder.Gauge โ
categoryData.groupBy(_.name).map { case (_, snapshots) โ makeGaugeMetricFamily(snapshots, tick.to) }
case _ โ
makeArbitraryMetricFamilies(categoryData, tick.to)
}
}.map(postprocess(_)).toList
}
/** Builds a metric family corresponding to a counter. */
def makeCounterMetricFamily(snapshots: Iterable[MetricSnapshot], timestamp: MilliTimestamp) = {
assert(snapshots.nonEmpty, "A metric family requires at least one member")
assert(snapshots.forall(_.category == SingleInstrumentEntityRecorder.Counter),
"All snapshots must be counter snapshots.")
assert(snapshots.map(_.name).toSet.size == 1, "All snapshots must have the same name")
val metrics = {
snapshots.map { metricSnapshot โ
val snapshot = metricSnapshot.value
assert(snapshot.gauges.isEmpty, "A counter should not have any gauge values")
assert(snapshot.histograms.isEmpty, "A counter should not have any histogram values")
assert(snapshot.minMaxCounters.isEmpty, "A counter should not have any minMaxCounter values")
assert(snapshot.counters.size == 1, "A counter should only have one counter value")
val value = snapshot.counter("counter")
assert(value.isDefined, "A counterโs value should have the name โcounterโ")
Metric(MetricValue.Counter(value.get.count), timestamp, metricSnapshot.tags)
}
}
// TODO: handle help
val suffix = SnapshotConverter.unitSuffix(snapshots.head.value.counters.head._1.unitOfMeasurement)
MetricFamily(Mungers.asMetricName(snapshots.head.name + suffix), PrometheusType.Counter, None, metrics.toSeq)
}
/** Builds a metric family corresponding to a Kamon histogram. */
def makeHistogramMetricFamily(snapshots: Iterable[MetricSnapshot], timestamp: MilliTimestamp) = {
assert(snapshots.nonEmpty, "A metric family requires at least one member")
assert(snapshots.forall(_.category == SingleInstrumentEntityRecorder.Histogram),
"All snapshots must be histogram snapshots.")
assert(snapshots.map(_.name).toSet.size == 1, "All snapshots must have the same name")
val metrics = {
snapshots.map { member โ
val snapshot = member.value
assert(snapshot.gauges.isEmpty, "A histogram should not have any gauge values")
assert(snapshot.counters.isEmpty, "A histogram should not have any counter values")
assert(snapshot.minMaxCounters.isEmpty, "A histogram should not have any minMaxCounter values")
assert(snapshot.histograms.size == 1, "A histogram should only have one histogram value")
assert(snapshot.histogram("histogram").isDefined, "A histogramโs value should have the name โhistogramโ")
val value = MetricValue.Histogram(snapshot.histogram("histogram").get)
Metric(value, timestamp, member.tags)
}
}
val suffix = SnapshotConverter.unitSuffix(snapshots.head.value.histograms.head._1.unitOfMeasurement)
// TODO: handle help
MetricFamily(Mungers.asMetricName(snapshots.head.name + suffix), PrometheusType.Histogram, None, metrics.toSeq)
}
/** Builds a metric family corresponding to a Kamon min-max counter. */
def makeMinMaxCounterMetricFamily(snapshots: Iterable[MetricSnapshot], timestamp: MilliTimestamp) = {
assert(snapshots.nonEmpty, "A metric family requires at least one member")
assert(snapshots.forall(_.category == SingleInstrumentEntityRecorder.MinMaxCounter),
"All snapshots must be min-max counter snapshots.")
assert(snapshots.map(_.name).toSet.size == 1, "All snapshots must have the same name")
val metrics = snapshots.map { member โ
val snapshot = member.value
assert(snapshot.gauges.isEmpty, "A min-max counter should not have any gauge values")
assert(snapshot.histograms.isEmpty, "A min-max counter should not have any histogram values")
assert(snapshot.counters.isEmpty, "A min-max counter should not have any counter values")
assert(snapshot.minMaxCounters.size == 1, "A min-max counter should only have one min-max counter value")
assert(snapshot.minMaxCounter("min-max-counter").isDefined, "A min-max counterโs value should have the name " +
"โmin-max-counterโ")
val value = MetricValue.Histogram(snapshot.minMaxCounter("min-max-counter").get)
Metric(value, timestamp, member.tags)
}
val suffix = SnapshotConverter.unitSuffix(snapshots.head.value.minMaxCounters.head._1.unitOfMeasurement)
// TODO: handle help
MetricFamily(Mungers.asMetricName(snapshots.head.name + suffix), PrometheusType.Histogram, None, metrics.toSeq)
}
/** Builds a metric family corresponding to a Kamon gauge. */
def makeGaugeMetricFamily(snapshots: Iterable[MetricSnapshot], timestamp: MilliTimestamp) = {
assert(snapshots.nonEmpty, "A metric family requires at least one member")
assert(snapshots.forall(_.category == SingleInstrumentEntityRecorder.Gauge),
"All snapshots must be gauge snapshots.")
assert(snapshots.map(_.name).toSet.size == 1, "All snapshots must have the same name")
val metrics = snapshots.map { member โ
val snapshot = member.value
assert(snapshot.minMaxCounters.isEmpty, "A gauge should not have any min-max counter values")
assert(snapshot.gauges.size == 1, "A gauge should only have one min-max counter value")
assert(snapshot.gauge("gauge").isDefined, "A gaugeโs value should have the name โgaugeโ")
assert(snapshot.histograms.isEmpty, "A gauge should not have any histogram values")
assert(snapshot.counters.isEmpty, "A gauge should not have any counter values")
val value = MetricValue.Histogram(snapshot.gauge("gauge").get)
Metric(value, timestamp, member.tags)
}.toList
val suffix = SnapshotConverter.unitSuffix(snapshots.head.value.gauges.head._1.unitOfMeasurement)
// TODO: handle help
MetricFamily(Mungers.asMetricName(snapshots.head.name + suffix), PrometheusType.Histogram, None, metrics.toSeq)
}
/** Constructs a list of metric families for an arbitrary entity recorder. Since these may have more than one
* instrument, it is necessary to rearrange the data so that we get one metric family per instrument.
*/
def makeArbitraryMetricFamilies(snapshots: Iterable[MetricSnapshot], timestamp: MilliTimestamp): Seq[MetricFamily] = {
assert(snapshots.nonEmpty, "Must supply at least one member")
assert(snapshots.map(_.category).toSet.size == 1, "All snapshots must have the same category")
val category = snapshots.head.category
// splat out snapshots into tuples of the instrument key, tags, and instrument value. We ignore category since it
// is the same for all values. We ignore names since they have been included in the tags.
val instrumentSnapshots = for {
snapshot โ snapshots
(key, value) โ snapshot.value.metrics
} yield (key, snapshot.tags, value)
// group the data by instrument key
val groupedInstrumentSnapshots = instrumentSnapshots.groupBy(_._1)
// Now, create one metric family per instrument
groupedInstrumentSnapshots.map { case (key, data) โ
assert(data.nonEmpty, "There must be data!")
assert(data.map(_._3.getClass).toSet.size == 1, "All values for a given metric key must have the same type.")
val familyName = Mungers.asMetricName(s"${category}_${key.name}${unitSuffix(key.unitOfMeasurement)}")
val prometheusType = data.head._3 match {
case _: Counter.Snapshot โ PrometheusType.Counter
case _: Histogram.Snapshot โ PrometheusType.Histogram
}
val metrics = data.map { case (_, tags, snapshot) โ
snapshot match {
case c: Counter.Snapshot โ Metric(MetricValue.Counter(c.count), timestamp, tags)
case h: Histogram.Snapshot โ Metric(MetricValue.Histogram(h), timestamp, tags)
}
}
MetricFamily(familyName, prometheusType, None, metrics.toSeq)
}.toSeq
}
}
object SnapshotConverter {
/** Label used to report the original Kamon category to Prometheus. */
val KamonCategoryLabel = "kamon_category"
/** Label used to report the original Kamon name to Prometheus. */
val KamonNameLabel = "kamon_name"
def unitSuffix(unitOfMeasurement: UnitOfMeasurement): String = {
unitOfMeasurement match {
case UnitOfMeasurement.Unknown โ ""
case Time.Nanoseconds โ "_nanoseconds"
case Time.Microseconds โ "_microseconds"
case Time.Milliseconds โ "_milliseconds"
case Time.Seconds โ "_seconds"
case Memory.Bytes โ "_bytes"
case Memory.KiloBytes โ "_kilobytes"
case Memory.MegaBytes โ "_megabytes"
case Memory.GigaBytes โ "_gigabytes"
case x โ "_" + x.label
}
}
}
| MonsantoCo/kamon-prometheus | library/src/main/scala/com/monsanto/arch/kamon/prometheus/converter/SnapshotConverter.scala | Scala | bsd-3-clause | 12,436 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.storm
import backtype.storm.{ Config => BacktypeStormConfig, LocalCluster, Testing }
import backtype.storm.generated.StormTopology
import backtype.storm.testing.{ CompleteTopologyParam, MockedSources }
import com.twitter.algebird.{ MapAlgebra, Semigroup }
import com.twitter.storehaus.{ ReadableStore, JMapStore }
import com.twitter.storehaus.algebra.MergeableStore
import com.twitter.summingbird._
import com.twitter.summingbird.batch.{ BatchID, Batcher }
import com.twitter.summingbird.storm.spout.TraversableSpout
import com.twitter.summingbird.storm.option._
import com.twitter.summingbird.online._
import com.twitter.summingbird.memory._
import com.twitter.summingbird.planner._
import com.twitter.tormenta.spout.Spout
import com.twitter.util.Future
import java.util.{ Collections, HashMap, Map => JMap, UUID }
import java.util.concurrent.atomic.AtomicInteger
import org.specs2.mutable._
import org.scalacheck._
import org.scalacheck.Prop._
import org.scalacheck.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable.{
ArrayBuffer,
HashMap => MutableHashMap,
Map => MutableMap,
SynchronizedBuffer,
SynchronizedMap
}
import java.security.Permission
/**
* Tests for Summingbird's Storm planner.
*/
object StormLaws extends Specification {
sequential
import MapAlgebra.sparseEquiv
// This is dangerous, obviously. The Storm platform graphs tested
// here use the UnitBatcher, so the actual time extraction isn't
// needed.
implicit def extractor[T]: TimeExtractor[T] = TimeExtractor(_ => 0L)
implicit val batcher = Batcher.unit
val testFn = sample[Int => List[(Int, Int)]]
implicit val storm = Storm.local(Map())
def sample[T: Arbitrary]: T = Arbitrary.arbitrary[T].sample.get
def genStore: (String, Storm#Store[Int, Int]) = TestStore.createStore[Int, Int]()
def genSink: () => ((Int) => Future[Unit]) = () => { x: Int =>
append(x)
Future.Unit
}
def memoryPlanWithoutSummer(original: List[Int])(mkJob: (Producer[Memory, Int], Memory#Sink[Int]) => TailProducer[Memory, Int]): List[Int] = {
val memory = new Memory
val outputList = ArrayBuffer[Int]()
val sink: (Int) => Unit = { x: Int => outputList += x }
val job = mkJob(
Memory.toSource(original),
sink
)
val topo = memory.plan(job)
memory.run(topo)
outputList.toList
}
val outputList = new ArrayBuffer[Int] with SynchronizedBuffer[Int]
def append(x: Int): Unit = {
StormLaws.outputList += x
}
def runWithOutSummer(original: List[Int])(mkJob: (Producer[Storm, Int], Storm#Sink[Int]) => TailProducer[Storm, Int]): List[Int] = {
val cluster = new LocalCluster()
val job = mkJob(
Storm.source(TraversableSpout(original)),
Storm.sink[Int]({ (x: Int) => append(x); Future.Unit })
)
StormTestRun(job)
StormLaws.outputList.toList
}
val nextFn = { pair: ((Int, (Int, Option[Int]))) =>
val (k, (v, joinedV)) = pair
List((k -> joinedV.getOrElse(10)))
}
val nextFn1 = { pair: ((Int, Option[Int])) =>
val (v, joinedV) = pair
List((joinedV.getOrElse(10)))
}
val serviceFn = sample[Int => Option[Int]]
val service = ReadableServiceFactory[Int, Int](() => ReadableStore.fromFn(serviceFn))
// ALL TESTS START AFTER THIS LINE
"StormPlatform matches Scala for single step jobs" in {
val original = sample[List[Int]]
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.singleStepJob[Storm, Int, Int, Int](_, _)(testFn)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.singleStepInScala(original)(testFn),
returnedState.toScala
) must beTrue
}
"FlatMap to nothing" in {
val original = sample[List[Int]]
val fn = { (x: Int) => List[(Int, Int)]() }
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.singleStepJob[Storm, Int, Int, Int](_, _)(fn)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.singleStepInScala(original)(fn),
returnedState.toScala
) must beTrue
}
"OptionMap and FlatMap" in {
val original = sample[List[Int]]
val fnA = sample[Int => Option[Int]]
val fnB = sample[Int => List[(Int, Int)]]
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.twinStepOptionMapFlatMapJob[Storm, Int, Int, Int, Int](_, _)(fnA, fnB)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.twinStepOptionMapFlatMapScala(original)(fnA, fnB),
returnedState.toScala
) must beTrue
}
"OptionMap to nothing and FlatMap" in {
val original = sample[List[Int]]
val fnA = { (x: Int) => None }
val fnB = sample[Int => List[(Int, Int)]]
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.twinStepOptionMapFlatMapJob[Storm, Int, Int, Int, Int](_, _)(fnA, fnB)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.twinStepOptionMapFlatMapScala(original)(fnA, fnB),
returnedState.toScala
) must beTrue
}
"StormPlatform matches Scala for large expansion single step jobs" in {
val original = sample[List[Int]]
val expander = sample[Int => List[(Int, Int)]]
val expansionFunc = { (x: Int) =>
expander(x).flatMap { case (k, v) => List((k, v), (k, v), (k, v), (k, v), (k, v)) }
}
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.singleStepJob[Storm, Int, Int, Int](_, _)(expansionFunc)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.singleStepInScala(original)(expansionFunc),
returnedState.toScala
) must beTrue
}
"StormPlatform matches Scala for flatmap keys jobs" in {
val original = List(1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 41, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 41) // sample[List[Int]]
val fnA = sample[Int => List[(Int, Int)]]
val fnB = sample[Int => List[Int]]
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.singleStepMapKeysJob[Storm, Int, Int, Int, Int](_, _)(fnA, fnB)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.singleStepMapKeysInScala(original)(fnA, fnB),
returnedState.toScala
) must beTrue
}
"StormPlatform matches Scala for left join jobs" in {
val original = sample[List[Int]]
val staticFunc = { i: Int => List((i -> i)) }
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.leftJoinJob[Storm, Int, Int, Int, Int, Int](_, service, _)(staticFunc)(nextFn)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.leftJoinInScala(original)(serviceFn)(staticFunc)(nextFn),
returnedState.toScala
) must beTrue
}
"StormPlatform matches Scala for left join with flatMapValues jobs" in {
val original = sample[List[Int]]
val staticFunc = { i: Int => List((i -> i)) }
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.leftJoinJobWithFlatMapValues[Storm, Int, Int, Int, Int, Int](_, service, _)(staticFunc)(nextFn1)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.leftJoinWithFlatMapValuesInScala(original)(serviceFn)(staticFunc)(nextFn1),
returnedState.toScala
) must beTrue
}
"StormPlatform matches Scala for repeated tuple leftJoin jobs" in {
val original = sample[List[Int]]
val staticFunc = { i: Int => List((i -> i)) }
val returnedState =
StormTestRun.simpleRun[Int, Int, Int](original,
TestGraphs.repeatedTupleLeftJoinJob[Storm, Int, Int, Int, Int, Int](_, service, _)(staticFunc)(nextFn)
)
Equiv[Map[Int, Int]].equiv(
TestGraphs.repeatedTupleLeftJoinInScala(original)(serviceFn)(staticFunc)(nextFn),
returnedState.toScala
) must beTrue
}
"StormPlatform matches Scala for optionMap only jobs" in {
val original = sample[List[Int]]
val (id, storeSupplier) = genStore
val cluster = new LocalCluster()
val producer =
Storm.source(TraversableSpout(original))
.filter(_ % 2 == 0)
.map(_ -> 10)
.sumByKey(storeSupplier)
StormTestRun(producer)
Equiv[Map[Int, Int]].equiv(
MapAlgebra.sumByKey(original.filter(_ % 2 == 0).map(_ -> 10)),
TestStore[Int, Int](id).get.toScala
) must beTrue
}
"StormPlatform matches Scala for MapOnly/NoSummer" in {
val original = sample[List[Int]]
val doubler = { x: Int => List(x * 2) }
val stormOutputList =
runWithOutSummer(original)(
TestGraphs.mapOnlyJob[Storm, Int, Int](_, _)(doubler)
).sorted
val memoryOutputList =
memoryPlanWithoutSummer(original)(TestGraphs.mapOnlyJob[Memory, Int, Int](_, _)(doubler)).sorted
stormOutputList must_== (memoryOutputList)
}
"StormPlatform with multiple summers" in {
val original = List(1, 2, 3, 4, 5, 6, 7, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 41, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 41) // sample[List[Int]]
val doubler = { (x): (Int) => List((x -> x * 2)) }
val simpleOp = { (x): (Int) => List(x * 10) }
val source = Storm.source(TraversableSpout(original))
val (store1Id, store1) = genStore
val (store2Id, store2) = genStore
val tail = TestGraphs.multipleSummerJob[Storm, Int, Int, Int, Int, Int, Int](source, store1, store2)(simpleOp, doubler, doubler)
StormTestRun(tail)
val (scalaA, scalaB) = TestGraphs.multipleSummerJobInScala(original)(simpleOp, doubler, doubler)
val store1Map = TestStore[Int, Int](store1Id).get.toScala
val store2Map = TestStore[Int, Int](store2Id).get.toScala
Equiv[Map[Int, Int]].equiv(
scalaA,
store1Map
) must beTrue
Equiv[Map[Int, Int]].equiv(
scalaB,
store2Map
) must beTrue
}
"StormPlatform should be efficent in real world job" in {
val original1 = sample[List[Int]]
val original2 = sample[List[Int]]
val original3 = sample[List[Int]]
val original4 = sample[List[Int]]
val source1 = Storm.source(TraversableSpout(original1))
val source2 = Storm.source(TraversableSpout(original2))
val source3 = Storm.source(TraversableSpout(original3))
val source4 = Storm.source(TraversableSpout(original4))
val fn1 = sample[(Int) => List[(Int, Int)]]
val fn2 = sample[(Int) => List[(Int, Int)]]
val fn3 = sample[(Int) => List[(Int, Int)]]
val (store1Id, store1) = genStore
val preJoinFn = sample[(Int) => (Int, Int)]
val postJoinFn = sample[((Int, (Int, Option[Int]))) => List[(Int, Int)]]
val serviceFn = sample[Int => Option[Int]]
val service = ReadableServiceFactory[Int, Int](() => ReadableStore.fromFn(serviceFn))
val tail = TestGraphs.realJoinTestJob[Storm, Int, Int, Int, Int, Int, Int, Int, Int, Int](source1, source2, source3, source4,
service, store1, fn1, fn2, fn3, preJoinFn, postJoinFn)
OnlinePlan(tail).nodes.size must beLessThan(10)
StormTestRun(tail)
val scalaA = TestGraphs.realJoinTestJobInScala(original1, original2, original3, original4,
serviceFn, fn1, fn2, fn3, preJoinFn, postJoinFn)
val store1Map = TestStore[Int, Int](store1Id).get.toScala
Equiv[Map[Int, Int]].equiv(
scalaA,
store1Map
) must beTrue
}
}
| rangadi/summingbird | summingbird-storm-test/src/test/scala/com/twitter/summingbird/storm/StormLaws.scala | Scala | apache-2.0 | 11,862 |
package edu.berkeley.cs.boom.bloomscala
import edu.berkeley.cs.boom.bloomscala.ast.Program
import scala.collection.{GenSeq, GenMap}
import edu.berkeley.cs.boom.bloomscala.analysis.{Stratifier, DepAnalyzer, Stratum}
// TODO: this test currently has a ton of code duplication.
// This will be eliminated once the stratifcation results are embedded into rules
// via a rewriting phase rather than relying on attribution.
class StratifierSuite extends BloomScalaSuite {
def isStratifiable(source: String) = {
val program = Compiler.nameAndType(source)
val depAnalyzer = new DepAnalyzer(program)
val stratifier = new Stratifier(depAnalyzer)
import stratifier._
program->isTemporallyStratifiable
}
def getCollectionStrata(program: Program): GenMap[String, Stratum] = {
val depAnalyzer = new DepAnalyzer(program)
val stratifier = new Stratifier(depAnalyzer)
import stratifier._
program.declarations.map(d => (d.name, collectionStratum(d))).toMap
}
def getRuleStrata(program: Program): GenSeq[Stratum] = {
val depAnalyzer = new DepAnalyzer(program)
val stratifier = new Stratifier(depAnalyzer)
import stratifier._
program.statements.map(ruleStratum).toSeq
}
test("Positive programs should have only one stratum") {
val program = Compiler.compileToIntermediateForm(
"""
| table link, [from: string, to: string, cost: int]
| table path, [from: string, to: string, nxt: string, cost: int]
| path <= link {|l| [l.from, l.to, l.to, l.cost]}
| path <= (link * path) on (link.to == path.from) { |l, p|
| [l.from, p.to, l.to, l.cost+p.cost]
| }
""".stripMargin)
val depAnalyzer = new DepAnalyzer(program)
val stratifier = new Stratifier(depAnalyzer)
import stratifier._
assert(program->isTemporallyStratifiable)
assert(getCollectionStrata(program).values.toSet.size === 1)
assert(getRuleStrata(program).toSet.size === 1)
}
test("Collections should be placed in higher strata than their negated dependencies") {
val program = Compiler.compileToIntermediateForm(
"""
| table a, [val: int]
| table b, [val: int]
| table c, [val: int]
| c <= a.notin(b)
""".stripMargin
)
val depAnalyzer = new DepAnalyzer(program)
val stratifier = new Stratifier(depAnalyzer)
import stratifier._
assert(program->isTemporallyStratifiable)
val strata = getCollectionStrata(program)
assert(strata("a") === strata("b"))
assert(strata("c") > strata("b"))
assert(getRuleStrata(program).head === strata("c"))
}
test("Cycles with temporal negation should still be stratifiable") {
assert(isStratifiable(
"""
| table a, [val: int]
| table b, [val: int]
| a <+ b.notin(a)
| b <+ a.notin(b)
""".stripMargin))
}
test("Cycles with immediate negation should be unstratifiable") {
assert(!isStratifiable(
"""
| table a, [val: int]
| table b, [val: int]
| b <= a.notin(b)
| a <= b.notin(a)
""".stripMargin))
}
test("Positive cycles should be stratifiable") {
assert(isStratifiable(
"""
| table a, [val: int]
| table b, [val: int]
| b <= a
| a <= b
""".stripMargin))
}
test("Dependencies used in non-monotonic contexts are evaluated in lower strata") {
val program = Compiler.compileToIntermediateForm(
"""
| table a, [key: int, val: int]
| table b, [ley: int, val: int]
| b <= a.argmin([a.key], a.val, intOrder)
""".stripMargin
)
val depAnalyzer = new DepAnalyzer(program)
val stratifier = new Stratifier(depAnalyzer)
import stratifier._
assert(program->isTemporallyStratifiable)
val strata = getCollectionStrata(program)
assert(strata("a") === Stratum(0))
assert(strata("b") === Stratum(1))
assert(getRuleStrata(program).head === Stratum(1))
}
}
| JoshRosen/bloom-compiler | compiler/src/test/scala/edu/berkeley/cs/boom/bloomscala/StratifierSuite.scala | Scala | bsd-3-clause | 4,144 |
package eventstreams.sources.filetailer
trait FileSystemComponent {
def fileSystem: FileSystem
}
| intelix/eventstreams | es-sources/es-source-file/src/main/scala/eventstreams/sources/filetailer/FileSystemComponent.scala | Scala | apache-2.0 | 104 |
package i1202a
class Test[T] {
def testMethod: Unit =
new Foo(this)
}
class Foo[T]() {
def this(ct: Test[T]) = this()
}
| som-snytt/dotty | tests/pos/i1202a.scala | Scala | apache-2.0 | 129 |
package pl.touk.nussknacker.engine.flink.util.transformer
import cats.data.ValidatedNel
import org.apache.flink.api.common.state.ValueStateDescriptor
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.runtime.operators.windowing.TimestampedValue
import org.apache.flink.util.Collector
import pl.touk.nussknacker.engine.api._
import pl.touk.nussknacker.engine.api.context.{ContextTransformation, JoinContextTransformation, ProcessCompilationError, ValidationContext}
import pl.touk.nussknacker.engine.api.typed.typing.{Typed, TypedObjectTypingResult}
import pl.touk.nussknacker.engine.flink.api.compat.ExplicitUidInOperatorsSupport
import pl.touk.nussknacker.engine.flink.api.process.{FlinkCustomJoinTransformation, FlinkCustomNodeContext}
import pl.touk.nussknacker.engine.flink.api.state.LatelyEvictableStateFunction
import pl.touk.nussknacker.engine.flink.api.timestampwatermark.TimestampWatermarkHandler
import pl.touk.nussknacker.engine.flink.util.keyed.{StringKeyedValue, StringKeyedValueMapper}
import pl.touk.nussknacker.engine.flink.util.timestamp.TimestampAssignmentHelper
import pl.touk.nussknacker.engine.flink.util.transformer.UnionWithMemoTransformer.KeyField
import pl.touk.nussknacker.engine.api.NodeId
import pl.touk.nussknacker.engine.util.KeyedValue
import java.time.Duration
object UnionWithMemoTransformer extends UnionWithMemoTransformer(None)
class UnionWithMemoTransformer(timestampAssigner: Option[TimestampWatermarkHandler[TimestampedValue[ValueWithContext[StringKeyedValue[(String, AnyRef)]]]]])
extends CustomStreamTransformer with ExplicitUidInOperatorsSupport {
val KeyField = "key"
override def canHaveManyInputs: Boolean = true
@MethodToInvoke
def execute(@BranchParamName("key") keyByBranchId: Map[String, LazyParameter[CharSequence]],
@BranchParamName("value") valueByBranchId: Map[String, LazyParameter[AnyRef]],
@ParamName("stateTimeout") stateTimeout: Duration,
@OutputVariableName variableName: String)(implicit nodeId: NodeId): JoinContextTransformation =
ContextTransformation
.join.definedBy(transformContextsDefinition(valueByBranchId, variableName)(_))
.implementedBy(
new FlinkCustomJoinTransformation {
override def transform(inputs: Map[String, DataStream[Context]], context: FlinkCustomNodeContext): DataStream[ValueWithContext[AnyRef]] = {
val keyedInputStreams = inputs.toList.map {
case (branchId, stream) =>
val keyParam = keyByBranchId(branchId)
val valueParam = valueByBranchId(branchId)
stream
.flatMap(new StringKeyedValueMapper(context, keyParam, valueParam))
.map(_.map(_.mapValue(v => (ContextTransformation.sanitizeBranchName(branchId), v))))
}
val connectedStream = keyedInputStreams.reduce(_.connect(_).map(mapElement, mapElement))
val afterOptionalAssigner = timestampAssigner
.map(new TimestampAssignmentHelper[ValueWithContext[StringKeyedValue[(String, AnyRef)]]](_).assignWatermarks(connectedStream))
.getOrElse(connectedStream)
setUidToNodeIdIfNeed(context, afterOptionalAssigner
.keyBy(_.value.key)
.process(new UnionMemoFunction(stateTimeout)))
}
}
)
protected def mapElement: ValueWithContext[KeyedValue[String, (String, AnyRef)]] => ValueWithContext[KeyedValue[String, (String, AnyRef)]] = identity
def transformContextsDefinition(valueByBranchId: Map[String, LazyParameter[AnyRef]], variableName: String)
(inputContexts: Map[String, ValidationContext])
(implicit nodeId: NodeId): ValidatedNel[ProcessCompilationError, ValidationContext] = {
ContextTransformation.findUniqueParentContext(inputContexts).map { parent =>
val newType = TypedObjectTypingResult(
(KeyField -> Typed[String]) :: inputContexts.map {
case (branchId, _) =>
ContextTransformation.sanitizeBranchName(branchId) -> valueByBranchId(branchId).returnType
}.toList
)
ValidationContext(Map(variableName -> newType), Map.empty, parent)
}
}
}
class UnionMemoFunction(stateTimeout: Duration) extends LatelyEvictableStateFunction[ValueWithContext[StringKeyedValue[(String, AnyRef)]], ValueWithContext[AnyRef], Map[String, AnyRef]] {
type FlinkCtx = KeyedProcessFunction[String, ValueWithContext[StringKeyedValue[(String, AnyRef)]], ValueWithContext[AnyRef]]#Context
import scala.collection.JavaConverters._
override protected def stateDescriptor: ValueStateDescriptor[Map[String, AnyRef]] =
new ValueStateDescriptor("state", implicitly[TypeInformation[Map[String, AnyRef]]])
override def processElement(valueWithCtx: ValueWithContext[StringKeyedValue[(String, AnyRef)]], ctx: FlinkCtx, out: Collector[ValueWithContext[AnyRef]]): Unit = {
val currentState = Option(readState()).getOrElse(Map.empty)
val (sanitizedBranchName, value) = valueWithCtx.value.value
val newValue = Map(
KeyField -> valueWithCtx.value.key,
sanitizedBranchName -> value
)
val mergedValue = currentState ++ newValue
updateState(mergedValue, ctx.timestamp() + stateTimeout.toMillis, ctx.timerService())
out.collect(new ValueWithContext[AnyRef](mergedValue.asJava, valueWithCtx.context))
}
}
| TouK/nussknacker | engine/flink/components/base/src/main/scala/pl/touk/nussknacker/engine/flink/util/transformer/UnionWithMemoTransformer.scala | Scala | apache-2.0 | 5,583 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hortonworks.spark.sql.hive.llap
import java.util.UUID
import org.apache.hadoop.hive.llap.LlapBaseInputFormat
import org.apache.spark.sql.{Dataset, Row, SQLContext}
import org.slf4j.LoggerFactory
class LlapQuery(val sc: SQLContext) {
private val log = LoggerFactory.getLogger(getClass)
private val handleIds = new scala.collection.mutable.HashSet[String]
private var currentDatabase: String = "default"
def setCurrentDatabase(dbName: String): Unit = {
currentDatabase = dbName
}
def sql(queryString: String): Dataset[Row] = {
val handleId = UUID.randomUUID().toString()
handleIds.add(handleId)
val df = sc.read
.format("org.apache.spark.sql.hive.llap")
.option("query", queryString)
.option("handleid", handleId)
.option("currentdatabase", currentDatabase)
.load()
df
}
def close(): Unit = {
handleIds.foreach ((handleId) => {
try {
LlapBaseInputFormat.close(handleId)
} catch {
case ex: Exception =>
log.error("Error closing " + handleId, ex)
}
})
handleIds.clear
}
}
| hortonworks-spark/spark-llap | src/main/scala/com/hortonworks/spark/sql/hive/llap/LlapQuery.scala | Scala | apache-2.0 | 1,915 |
package org.ensime.sexp.formats
import scala.util._
import org.ensime.sexp._
class SexpFormatUtilsSpec extends FormatSpec with SexpFormats {
import SexpFormatUtils._
describe("SexpFormatUtils") {
it("should lift writers") {
val lifted = lift(new SexpWriter[SexpString] {
def write(o: SexpString) = o
})
assert(foo.toSexp(lifted) === foo)
intercept[UnsupportedOperationException] {
foo.convertTo[SexpString](lifted)
}
}
it("should lift readers") {
val lifted = lift(new SexpReader[SexpString] {
def read(o: Sexp) = o.asInstanceOf[SexpString]
})
assert(foo.convertTo[SexpString](lifted) === foo)
intercept[UnsupportedOperationException] {
foo.toSexp(lifted)
}
}
it("should combine readers and writers") {
val reader = new SexpReader[SexpString] {
def read(o: Sexp) = o.asInstanceOf[SexpString]
}
val writer = new SexpWriter[SexpString] {
def write(o: SexpString) = o
}
val combo = sexpFormat(reader, writer)
assert(foo.convertTo[SexpString](combo) === foo)
assert(foo.toSexp(combo) === foo)
}
it("should support lazy formats") {
var init = false
val lazyF = lazyFormat {
init = true
SexpStringFormat
}
assert(!init)
assert(SexpString("foo").convertTo[SexpString](lazyF) === SexpString("foo"))
assert(init)
assert(SexpString("foo").toSexp(lazyF) === SexpString("foo"))
}
it("should support safe readers") {
val safe = safeReader(
new SexpReader[SexpString] {
def read(value: Sexp) = value match {
case s: SexpString => s
case x => deserializationError(x)
}
}
)
assert(foo.convertTo[Try[SexpString]](safe) === Success(foo))
assert(bar.convertTo[Try[SexpString]](safe).isInstanceOf[Failure[_]])
}
}
}
| jacobono/ensime-server | sexpress/src/test/scala/org/ensime/sexp/formats/SexpFormatUtilsSpec.scala | Scala | gpl-3.0 | 1,945 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
package provider
package encoder
import java.util._
import net.liftweb.http.provider.{HTTPCookie, SameSite}
import net.liftweb.common.{Full}
/**
* Converts an HTTPCookie into a string to used as header cookie value.
*
* The string representation follows the <a href="https://tools.ietf.org/html/rfc6265">RFC6265</a>
* standard with the added field of SameSite to support secure browsers as explained at
* <a href="https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie/SameSite">MDN SameSite Cookies</a>
*
* This code is based on the Netty's HTTP cookie encoder.
*
* Multiple cookies are supported just sending separate "Set-Cookie" headers for each cookie.
*
*/
object CookieEncoder {
private val VALID_COOKIE_NAME_OCTETS = validCookieNameOctets();
private val VALID_COOKIE_VALUE_OCTETS = validCookieValueOctets();
private val VALID_COOKIE_ATTRIBUTE_VALUE_OCTETS = validCookieAttributeValueOctets();
private val PATH = "Path"
private val EXPIRES = "Expires"
private val MAX_AGE = "Max-Age"
private val DOMAIN = "Domain"
private val SECURE = "Secure"
private val HTTPONLY = "HTTPOnly"
private val SAMESITE = "SameSite"
private val DAY_OF_WEEK_TO_SHORT_NAME = Array("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")
private val CALENDAR_MONTH_TO_SHORT_NAME = Array("Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug",
"Sep", "Oct", "Nov", "Dec")
def encode(cookie: HTTPCookie): String = {
val name = cookie.name
val value = cookie.value.getOrElse("")
val skipValidation = isOldVersionCookie(cookie)
if (!skipValidation) {
validateCookie(name, value)
}
val buf = new StringBuilder()
add(buf, name, value);
cookie.maxAge foreach { maxAge =>
add(buf, MAX_AGE, maxAge);
val expires = new Date(maxAge * 1000 + System.currentTimeMillis());
buf.append(EXPIRES);
buf.append('=');
appendDate(expires, buf);
buf.append(';');
buf.append(' ');
}
cookie.path foreach { path =>
add(buf, PATH, path);
}
cookie.domain foreach { domain =>
add(buf, DOMAIN, domain);
}
cookie.secure_? foreach { isSecure =>
if (isSecure) add(buf, SECURE);
}
cookie.httpOnly foreach { isHttpOnly =>
if (isHttpOnly) add(buf, HTTPONLY)
}
cookie.sameSite foreach {
case SameSite.LAX =>
add(buf, SAMESITE, "Lax")
case SameSite.STRICT =>
add(buf, SAMESITE, "Strict")
case SameSite.NONE =>
add(buf, SAMESITE, "None")
}
stripTrailingSeparator(buf)
}
private def validateCookie(name: String, value: String): Unit = {
val posFirstInvalidCookieNameOctet = firstInvalidCookieNameOctet(name)
if (posFirstInvalidCookieNameOctet >= 0) {
throw new IllegalArgumentException("Cookie name contains an invalid char: " +
name.charAt(posFirstInvalidCookieNameOctet))
}
val unwrappedValue = unwrapValue(value);
if (unwrappedValue == null) {
throw new IllegalArgumentException("Cookie value wrapping quotes are not balanced: " +
value);
}
val postFirstInvalidCookieValueOctet = firstInvalidCookieValueOctet(unwrappedValue)
if (postFirstInvalidCookieValueOctet >= 0) {
throw new IllegalArgumentException("Cookie value contains an invalid char: " +
unwrappedValue.charAt(postFirstInvalidCookieValueOctet));
}
}
/**
* Checks if the cookie is set with an old version 0.
*
* More info about the cookie version at https://javadoc.io/static/jakarta.servlet/jakarta.servlet-api/5.0.0/jakarta/servlet/http/Cookie.html#setVersion-int-
*
* @param cookie
* @return true if the cookie version is 0, false if it has no value or a different value than 0
*/
private def isOldVersionCookie(cookie: HTTPCookie): Boolean = {
cookie.version map (_ == 0) getOrElse false
}
private def appendDate(date: Date, sb: StringBuilder): StringBuilder = {
val cal = new GregorianCalendar(TimeZone.getTimeZone("UTC"))
cal.setTime(date)
sb.append(DAY_OF_WEEK_TO_SHORT_NAME(cal.get(Calendar.DAY_OF_WEEK) - 1)).append(", ")
appendZeroLeftPadded(cal.get(Calendar.DAY_OF_MONTH), sb).append(' ')
sb.append(CALENDAR_MONTH_TO_SHORT_NAME(cal.get(Calendar.MONTH))).append(' ')
sb.append(cal.get(Calendar.YEAR)).append(' ')
appendZeroLeftPadded(cal.get(Calendar.HOUR_OF_DAY), sb).append(':')
appendZeroLeftPadded(cal.get(Calendar.MINUTE), sb).append(':')
appendZeroLeftPadded(cal.get(Calendar.SECOND), sb).append(" GMT")
}
private def appendZeroLeftPadded(value: Int, sb: StringBuilder): StringBuilder = {
if (value < 10) {
sb.append('0');
}
return sb.append(value);
}
private def validCookieNameOctets() = {
val bits = new BitSet()
(32 until 127) foreach bits.set
val separators = Array('(', ')', '<', '>', '@', ',', ';', ':', '\\\\', '"', '/', '[', ']', '?', '=', '{',
'}', ' ', '\\t' )
separators.foreach(separator => bits.set(separator, false))
bits
}
private def validCookieValueOctets() = {
val bits = new BitSet()
bits.set(0x21);
(0x23 to 0x2B) foreach bits.set
(0x2D to 0x3A) foreach bits.set
(0x3C to 0x5B) foreach bits.set
(0x5D to 0x7E) foreach bits.set
bits
}
private def validCookieAttributeValueOctets() = {
val bits = new BitSet()
(32 until 127) foreach bits.set
bits.set(';', false)
bits
}
private def stripTrailingSeparator(buf: StringBuilder) = {
if (buf.length() > 0) {
buf.setLength(buf.length() - 2);
}
buf.toString()
}
private def add(sb: StringBuilder, name: String, value: Long) = {
sb.append(name);
sb.append('=');
sb.append(value);
sb.append(';');
sb.append(' ');
}
private def add(sb: StringBuilder, name: String, value: String) = {
sb.append(name);
sb.append('=');
sb.append(value);
sb.append(';');
sb.append(' ');
}
private def add(sb: StringBuilder, name: String) = {
sb.append(name);
sb.append(';');
sb.append(' ');
}
private def firstInvalidCookieNameOctet(cs: CharSequence): Int = {
return firstInvalidOctet(cs, VALID_COOKIE_NAME_OCTETS);
}
private def firstInvalidCookieValueOctet(cs: CharSequence): Int = {
return firstInvalidOctet(cs, VALID_COOKIE_VALUE_OCTETS);
}
private def firstInvalidOctet(cs: CharSequence, bits: BitSet): Int = {
(0 until cs.length()).foreach { i =>
val c = cs.charAt(i)
if (!bits.get(c)) {
return i;
}
}
-1;
}
private def unwrapValue(cs: CharSequence): CharSequence = {
val len = cs.length()
if (len > 0 && cs.charAt(0) == '"') {
if (len >= 2 && cs.charAt(len - 1) == '"') {
if (len == 2) "" else cs.subSequence(1, len - 1)
} else {
null
}
} else {
cs
}
}
}
| lift/framework | web/webkit/src/main/scala/net/liftweb/http/provider/encoder/CookieEncoder.scala | Scala | apache-2.0 | 7,694 |
package benchmarks.simple
import java.util.concurrent.TimeUnit
import benchmarks.{EngineParam, Size, Step, Workload}
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.BenchmarkParams
import rescala.core.{Engine, Struct}
import rescala.reactives.{Signal, Var}
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 3, time = 1000, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = 3, time = 1000, timeUnit = TimeUnit.MILLISECONDS)
@Fork(3)
@Threads(1)
@State(Scope.Thread)
class ChainSignalHalfChange[S <: Struct] {
implicit var engine: Engine[S] = _
var source: Var[Int, S] = _
var result: Signal[Int, S] = _
@Setup
def setup(params: BenchmarkParams, size: Size, step: Step, engineParam: EngineParam[S], work: Workload) = {
engine = engineParam.engine
source = Var(step.run())
result = source
for (_ <- Range(0, size.size)) {
result = result.map{v => val r = v + 1; work.consume(); r}
}
for (_ <- Range(0, size.size)) {
result = result.map{v => v + 1; work.consume(); 0}
}
}
@Benchmark
def run(step: Step): Unit = source.set(step.run())
}
| volkc/REScala | Research/Microbenchmarks/src/main/scala/benchmarks/simple/ChainSignalHalfChange.scala | Scala | apache-2.0 | 1,172 |
package mot.dump
import java.io.OutputStream
import java.text.SimpleDateFormat
import mot.impl.Connection
object Direction extends Enumeration {
val Incoming, Outgoing = Value
}
trait Event {
val timestampMs = System.currentTimeMillis()
def direction: Direction.Value
def conn: Connection
def print(os: OutputStream, sdf: SimpleDateFormat, showBody: Boolean, maxBodyLength: Int, showAttributes: Boolean): Unit
def protocol: String
lazy val (fromAddress, fromName, toAddress, toName) = direction match {
case Direction.Incoming => (conn.remoteAddress, conn.remoteName, conn.localAddress, conn.localName)
case Direction.Outgoing => (conn.localAddress, conn.localName, conn.remoteAddress, conn.remoteName)
}
} | marianobarrios/mot | src/main/scala/mot/dump/Event.scala | Scala | bsd-2-clause | 748 |
package com.kodekutters.gpsd4scala.collector
import com.kodekutters.gpsd4scala.protocol.Report
/**
* Author: Ringo Wathelet
* Date: 19/04/13
* Version: 1
*/
trait Collector {
def collect(info: Report)
}
| workingDog/Gpsd4Scala | src/main/scala/com/kodekutters/gpsd4scala/collector/Collector.scala | Scala | bsd-3-clause | 213 |
/**
* @author Daniel Perez
*/
package net.javachallenge.util.settings
/**
* A general wrapper to load settings from files
*/
object SettingsLoader {
/**
* Gets the loader class name depending on the format and current settings
*/
def loadSettings: Unit = EffectiveSettings.settingsFormat match {
case "xml" => load(EffectiveSettings.xmlSettingsParserClassName)
case x => throw new IllegalSettingsException("No parser found " +
"for format %s.".format(x))
}
/**
* Loads the settings using the format and file defined in
* [[net.javachallenge.util.settings.Defaults]] and
* [[net.javachallenge.util.settings.EffectiveSettings]] with
* the given loader
*
* @param parserClass the name of the class used to load the settings
*/
def load(parserClass: String): Unit = {
val file = "%s/config.%s".format(Defaults.SETTINGS_PATH,
EffectiveSettings.settingsFormat)
val parser = Class.forName(parserClass).newInstance.asInstanceOf[SettingsParser]
parser.loadSettings(file)
}
} | AI-comp/JavaChallenge2012 | src/main/scala/net/javachallenge/util/settings/SettingsLoader.scala | Scala | apache-2.0 | 1,047 |
/*
* Copyright (c) 2012-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common.enrichments.registry
package apirequest
case class ValueNotFoundException(message: String) extends Throwable {
override def toString = s"Value not found $message"
}
case class JsonPathException(message: String) extends Throwable {
override def toString = s"JSONPath error $message"
}
case class InvalidStateException(message: String) extends Throwable {
override def toString = message
}
| TimothyKlim/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/apirequest/Errors.scala | Scala | apache-2.0 | 1,155 |
package de.kaufhof.hajobs.testutils
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
abstract class CassandraSpec extends StandardSpec with TestCassandraConnection with Eventually with IntegrationPatience {
} | MarcoPriebe/ha-jobs | ha-jobs-core/src/test/scala/de/kaufhof/hajobs/testutils/CassandraSpec.scala | Scala | apache-2.0 | 228 |
package com.twitter.finagle.netty4.http
import com.twitter.app.GlobalFlag
import com.twitter.finagle.http.{
Chunk,
Fields,
HeaderMap,
Method,
Request,
Response,
Status,
Version
}
import com.twitter.finagle.netty4.ByteBufConversion
import com.twitter.io.Reader
import io.netty.handler.codec.{http => NettyHttp}
import java.net.InetSocketAddress
object revalidateInboundHeaders
extends GlobalFlag[Boolean](
default = false,
help = "Perform Finagle based validation of headers when converting from Netty `HeaderMap`s"
)
private[finagle] object Bijections {
object netty {
def versionToFinagle(v: NettyHttp.HttpVersion): Version = v match {
case NettyHttp.HttpVersion.HTTP_1_0 => Version.Http10
case NettyHttp.HttpVersion.HTTP_1_1 => Version.Http11
case _ => Version.Http11
}
def methodToFinagle(m: NettyHttp.HttpMethod): Method =
Method(m.name)
def statusToFinagle(s: NettyHttp.HttpResponseStatus): Status =
Status.fromCode(s.code)
private def copyToFinagleRequest(in: NettyHttp.HttpRequest, out: Request): Unit = {
out.version = Bijections.netty.versionToFinagle(in.protocolVersion)
out.method = Bijections.netty.methodToFinagle(in.method)
out.uri = in.uri
writeNettyHeadersToFinagle(in.headers, out.headerMap)
}
def chunkedRequestToFinagle(
in: NettyHttp.HttpRequest,
r: Reader[Chunk],
remoteAddr: InetSocketAddress
): Request = {
val out = new Request.Inbound(r, remoteAddr, HeaderMap.Empty)
out.setChunked(true)
copyToFinagleRequest(in, out)
out
}
def fullRequestToFinagle(
in: NettyHttp.FullHttpRequest,
remoteAddr: InetSocketAddress
): Request = {
val payload = ByteBufConversion.byteBufAsBuf(in.content)
val reader =
if (payload.isEmpty) Reader.empty[Chunk]
else Reader.value(Chunk(payload))
val trailers =
if (in.trailingHeaders.isEmpty) HeaderMap.Empty
else headersToFinagle(in.trailingHeaders)
val out = new Request.Inbound(reader, remoteAddr, trailers)
out.setChunked(false)
out.content = payload
copyToFinagleRequest(in, out)
out
}
def headersToFinagle(h: NettyHttp.HttpHeaders): HeaderMap = {
val result = HeaderMap.newHeaderMap
writeNettyHeadersToFinagle(h, result)
result
}
def writeNettyHeadersToFinagle(head: NettyHttp.HttpHeaders, out: HeaderMap): Unit = {
val shouldValidate = revalidateInboundHeaders()
val itr = head.iteratorAsString()
while (itr.hasNext) {
val entry = itr.next()
// addUnsafe because Netty already validates Headers for us, but sometimes
// it's better to be double sure so enable opting into revalidation.
if (shouldValidate) out.add(entry.getKey, entry.getValue)
else out.addUnsafe(entry.getKey, entry.getValue)
}
}
private def copyToFinagleResponse(in: NettyHttp.HttpResponse, out: Response): Unit = {
out.version = versionToFinagle(in.protocolVersion())
out.status = statusToFinagle(in.status)
writeNettyHeadersToFinagle(in.headers, out.headerMap)
}
def chunkedResponseToFinagle(in: NettyHttp.HttpResponse, r: Reader[Chunk]): Response = {
val out = new Response.Inbound(r, HeaderMap.Empty)
out.setChunked(true)
copyToFinagleResponse(in, out)
out
}
def fullResponseToFinagle(in: NettyHttp.FullHttpResponse): Response = {
val payload = ByteBufConversion.byteBufAsBuf(in.content)
val reader =
if (payload.isEmpty) Reader.empty[Chunk]
else Reader.value(Chunk(payload))
val trailers =
if (in.trailingHeaders.isEmpty) HeaderMap.Empty
else headersToFinagle(in.trailingHeaders)
val out = new Response.Inbound(reader, trailers)
out.setChunked(false)
out.content = payload
copyToFinagleResponse(in, out)
out
}
}
object finagle {
def writeFinagleHeadersToNetty(in: HeaderMap, out: NettyHttp.HttpHeaders): Unit =
in.nameValueIterator.foreach { nv =>
out.add(nv.name, nv.value)
}
def headersToNetty(h: HeaderMap): NettyHttp.HttpHeaders = {
// We don't want to validate headers here since they are already validated
// by Finagle's own HeaderMap.
val result = new NettyHttp.DefaultHttpHeaders(false /*validate headers*/ )
writeFinagleHeadersToNetty(h, result)
result
}
def statusToNetty(s: Status): NettyHttp.HttpResponseStatus =
NettyHttp.HttpResponseStatus.valueOf(s.code)
def versionToNetty(v: Version): NettyHttp.HttpVersion = v match {
case Version.Http10 => NettyHttp.HttpVersion.HTTP_1_0
case Version.Http11 => NettyHttp.HttpVersion.HTTP_1_1
case _ => NettyHttp.HttpVersion.HTTP_1_1
}
def chunkedResponseToNetty(r: Response): NettyHttp.HttpResponse =
new NettyHttp.DefaultHttpResponse(
versionToNetty(r.version),
statusToNetty(r.status),
headersToNetty(r.headerMap)
)
def fullResponseToNetty(r: Response): NettyHttp.FullHttpResponse =
new NettyHttp.DefaultFullHttpResponse(
versionToNetty(r.version),
statusToNetty(r.status),
ByteBufConversion.bufAsByteBuf(r.content),
headersToNetty(r.headerMap),
NettyHttp.EmptyHttpHeaders.INSTANCE // trailers are only propagated from chunked messages
)
def methodToNetty(m: Method): NettyHttp.HttpMethod =
NettyHttp.HttpMethod.valueOf(m.toString)
def requestToNetty(r: Request): NettyHttp.HttpRequest = {
if (r.isChunked) {
val result = new NettyHttp.DefaultHttpRequest(
versionToNetty(r.version),
methodToNetty(r.method),
r.uri,
headersToNetty(r.headerMap)
)
// We only set the Transfer-Encoding to "chunked" if the request does not have
// Content-Length set. This mimics Netty 3 behavior, wherein a request can be "chunked"
// and not have a "Transfer-Encoding: chunked" header (instead, it has a Content-Length).
if (!r.headerMap.contains(Fields.ContentLength)) {
result.headers
.add(NettyHttp.HttpHeaderNames.TRANSFER_ENCODING, NettyHttp.HttpHeaderValues.CHUNKED)
}
result
} else {
new NettyHttp.DefaultFullHttpRequest(
versionToNetty(r.version),
methodToNetty(r.method),
r.uri,
ByteBufConversion.bufAsByteBuf(r.content),
headersToNetty(r.headerMap),
NettyHttp.EmptyHttpHeaders.INSTANCE // trailers are only propagated from chunked messages
)
}
}
}
}
| luciferous/finagle | finagle-netty4-http/src/main/scala/com/twitter/finagle/netty4/http/Bijections.scala | Scala | apache-2.0 | 6,722 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.calculations
import uk.gov.hmrc.ct.accounts.frs102.boxes._
import uk.gov.hmrc.ct.box.CtTypeConverters
trait TotalCurrentAssetsCalculator extends CtTypeConverters {
def calculateCurrentTotalCurrentAssets(ac50: AC50, ac52: AC52, ac54: AC54): AC56 = {
(ac50.value, ac52.value, ac54.value) match {
case (None, None, None) => AC56(None)
case _ => AC56(Some(ac50 + ac52 + ac54))
}
}
def calculatePreviousTotalCurrentAssets(ac51: AC51, ac53: AC53, ac55: AC55): AC57 = {
(ac51.value, ac53.value, ac55.value) match {
case (None, None, None) => AC57(None)
case _ => AC57(Some(ac51 + ac53 + ac55))
}
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/calculations/TotalCurrentAssetsCalculator.scala | Scala | apache-2.0 | 1,290 |
package cosbench_ng
import org.slf4j. { LoggerFactory }
import java.util.Date
import java.nio.file.{ FileSystems, Files, StandardOpenOption }
case class IntermediateStats (vSum_ : Double = 0, vSumSqr: Double = 0, count: Long =0, k: Option[Double] = None)
case class Metric(average: Double = 0,
min: Double = 999999999,
max: Double = 0,
inter: IntermediateStats = IntermediateStats()) {
def merge(newStat: Double): Metric = {
// calculate summary stats. Variance from
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance (computing shifted data)
// variance calculation
val newCount = inter.count +1
val local = (newStat - inter.k.getOrElse(newStat))
val nInter = IntermediateStats(inter.vSum_ + local,
inter.vSumSqr + local * local,
newCount, Some(inter.k.getOrElse(newStat)))
new Metric( ((average * inter.count) + newStat) / newCount, //avg
if (newStat < min) newStat else min,
if (newStat > max) newStat else max,
nInter)
}
def stdDeviation =
Math.sqrt( (inter.vSumSqr - (inter.vSum_ * inter.vSum_)/inter.count)/( if (inter.count > 1) (inter.count - 1) else 1))
}
object SmryStats {
def apply ( a:Int, b:Int, c:Int , f: Int, m1: Metric, m2: Metric) = new SmryStats(a,b,c,f,m1,m2)
def apply (c: SmryStats) = new SmryStats(
c.opsStartedNCompleted,
c.opsCompletedStatsNSent,
c.opsNStarted,
c.failed, c.rspStart, c.rspEnd)
def apply() = new SmryStats()
}
class SmryStats (
val opsStartedNCompleted: Int = 0,
val opsCompletedStatsNSent : Int = 0,
val opsNStarted : Int = 0,
val failed: Int = 0,
val rspStart: Metric = new Metric(),
val rspEnd: Metric = new Metric()) {
def merge(g: GoodStat): SmryStats = new SmryStats(opsStartedNCompleted,
opsCompletedStatsNSent,
opsNStarted,
failed,
rspStart.merge(g.rspStarted),
rspEnd.merge(g.rspComplete))
def merge(b: BadStat): SmryStats = new SmryStats(opsStartedNCompleted,
opsCompletedStatsNSent,
opsNStarted,
failed + 1,
rspStart,
rspEnd)
def merge(f: FinalStat): SmryStats = new SmryStats(
opsStartedNCompleted + f.opsStartedNCompleted,
opsCompletedStatsNSent + f.opsCompletedStatsNSent,
opsNStarted + f.opsNStarted,
failed,
rspStart,
rspEnd)
// calculate summary stats. Variance from
// https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance (computing shifted data)
def updateSmryStats (x: Stats) : SmryStats = x match {
case y: GoodStat => merge(y)
case y: BadStat => merge(y)
case y: FinalStat => merge(y)
}
def printSmryStats (runTime: Long) = {
val log = LoggerFactory.getLogger(this.getClass)
val count = rspStart.inter.count
val stdDeviation = rspStart.stdDeviation.toLong
val objRate = count.toFloat/(runTime/1000)
println()
println("---------------")
println("Test Complete (results logged in %s):".format(LogFile.directory))
println ("TTFB (avg,min,max,std) : (" + rspStart.average.toLong
+ "," + rspStart.min.toLong + "," + rspStart.max.toLong
+ "," + rspStart.stdDeviation.toLong +") ms" );
println ("TTLB (avg,min,max,std) : (" + rspEnd.average.toLong
+ "," + rspEnd.min.toLong
+ "," + rspEnd.max.toLong +","
+ rspEnd.stdDeviation.toLong +") ms" );
println ("No of ops (Target, Actual) : (" + MyConfig.cl.get.maxOps + "," + count +")")
println ("Ops/second (Target, Actual) : (%d,%4.2f)".format(MyConfig.cl.get.opsRate, objRate))
println ("Throughput(KB)/sec (Target, Actual) : (%4.2f,%4.2f) MB/s".format(
MyConfig.cl.get.opsRate.toFloat*MyConfig.cl.get.objSize.toFloat/1024,
objRate.toFloat*MyConfig.cl.get.objSize.toFloat/1024))
val errStr = """
|Run time : %d seconds
|Known Errors:
|+ ops - queued but not started : +%d
|+ ops - started but not completed : +%d
|+ ops - completed but stats dropped : +%d
|Unknown Errors:
|+ ops - failed : +%d
|+ ops - Unaccounted / Unacknowledged: +%d""".stripMargin.format(runTime/1000
,opsNStarted.toLong
,opsStartedNCompleted.toLong
,opsCompletedStatsNSent.toLong
,failed.toLong
,MyConfig.cl.get.maxOps -
(count + failed.toLong
+ opsNStarted.toLong
+ opsStartedNCompleted.toLong
+ opsCompletedStatsNSent.toLong))
val logHeader : String = "tag,time,cmd,objSize(KB),endpoint,rangeRead,targetOps,actualOps," +
"targetOpsRate,actualOpsRate,ttFb(avg),ttFb(min)," +
"ttFb(max),ttFb(SD),ttLb(avg),ttLb(min),ttLb(max)," +
"ttLb(SD),targetThroughput,actualThroughput,runTime(ms),cmdLine\\n"
val logOutput = "%s,%s,%s,%d,%s,%d,%d,%d,%d,%4.2f,%d,%d,%d,%d,%d,%d,%d,%d,%4.2f,%4.2f,%d,%s\\n".format(
MyConfig.cl.get.testTag,
new Date(System.currentTimeMillis()),
MyConfig.cl.get.cmd,
MyConfig.cl.get.objSize,
MyConfig.cl.get.endpoint,
if (MyConfig.cl.get.cmd == "GET" && MyConfig.cl.get.rangeReadStart != 0)
MyConfig.cl.get.rangeReadEnd - MyConfig.cl.get.rangeReadStart
else
-1,
MyConfig.cl.get.maxOps,
count,
MyConfig.cl.get.opsRate,
objRate,
rspStart.average.toLong ,
rspStart.min.toLong ,
rspStart.max.toLong ,
rspStart.stdDeviation.toLong,
rspEnd.average.toLong ,
rspEnd.min.toLong ,
rspEnd.max.toLong ,
rspEnd.stdDeviation.toLong ,
MyConfig.cl.get.opsRate.toFloat*MyConfig.cl.get.objSize.toFloat/1024,
objRate.toFloat*MyConfig.cl.get.objSize.toFloat/1024,
runTime/1000,
MyConfig.rawCl.get.mkString(" "))
println(errStr)
log.warn(logOutput)
log.warn(errStr)
val p = FileSystems.getDefault().getPath("/tmp/cosbench_ng/results.csv")
if (p.toFile().exists == false)
Files.write(p, logHeader.getBytes, StandardOpenOption.CREATE)
Files.write( p, logOutput.getBytes, StandardOpenOption.APPEND)
}
}
| vardhanv/cosbench_ng | server/src/main/scala/SmryStats.scala | Scala | mit | 6,724 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.export.formats
import org.geotools.data.simple.SimpleFeatureCollection
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
object NullExporter extends FeatureExporter {
override def export(features: SimpleFeatureCollection): Option[Long] = {
var count = 0L
SelfClosingIterator(features.features).foreach(_ => count += 1)
Some(count)
}
override def close(): Unit = {}
}
| ronq/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/export/formats/NullExporter.scala | Scala | apache-2.0 | 913 |
package pl.touk.nussknacker.engine.api
case class ValueWithContext[T](value: T, context: Context) {
def map[N](f: T => N): ValueWithContext[N] =
copy(value = f(value))
}
| TouK/nussknacker | components-api/src/main/scala/pl/touk/nussknacker/engine/api/ValueWithContext.scala | Scala | apache-2.0 | 179 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.nio.ByteBuffer
import scala.language.existentials
import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.shuffle.ShuffleWriter
/**
* A ShuffleMapTask divides the elements of an RDD into multiple buckets (based on a partitioner
* specified in the ShuffleDependency).
*
* See [[org.apache.spark.scheduler.Task]] for more information.
*
* @param stageId id of the stage this task belongs to
* @param taskBinary broadcast version of of the RDD and the ShuffleDependency. Once deserialized,
* the type should be (RDD[_], ShuffleDependency[_, _, _]).
* @param partition partition of the RDD this task is associated with
* @param locs preferred task execution locations for locality scheduling
*/
private[spark] class ShuffleMapTask(
stageId: Int,
taskBinary: Broadcast[Array[Byte]],
partition: Partition,
@transient private var locs: Seq[TaskLocation])
extends Task[MapStatus](stageId, partition.index) with Logging {
/** A constructor used only in test suites. This does not require passing in an RDD. */
def this(partitionId: Int) {
this(0, null, new Partition { override def index = 0 }, null)
}
@transient private val preferredLocs: Seq[TaskLocation] = {
if (locs == null) Nil else locs.toSet.toSeq
}
override def runTask(context: TaskContext): (Long, MapStatus) = {
// Deserialize the RDD using the broadcast variable.
val before = System.currentTimeMillis()
val ser = SparkEnv.get.closureSerializer.newInstance()
val (rdd, dep) = ser.deserialize[(RDD[_], ShuffleDependency[_, _, _])](
ByteBuffer.wrap(taskBinary.value), Thread.currentThread.getContextClassLoader)
metrics = Some(context.taskMetrics)
var writer: ShuffleWriter[Any, Any] = null
try {
val manager = SparkEnv.get.shuffleManager
writer = manager.getWriter[Any, Any](dep.shuffleHandle, partitionId, context)
writer.write(rdd.iterator(partition, context).asInstanceOf[Iterator[_ <: Product2[Any, Any]]])
return (metrics.get.inputMetrics.map(_.bytesRead).getOrElse(0L), writer.stop(success = true).get)
} catch {
case e: Exception =>
try {
if (writer != null) {
writer.stop(success = false)
}
} catch {
case e: Exception =>
log.debug("Could not stop writer", e)
}
throw e
}
}
override def getPartition = partition
override def preferredLocations: Seq[TaskLocation] = preferredLocs
override def toString = "ShuffleMapTask(%d, %d)".format(stageId, partitionId)
}
| trueyao/spark-lever | core/src/main/scala/org/apache/spark/scheduler/ShuffleMapTask.scala | Scala | apache-2.0 | 3,479 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.redis.tools
import java.io.File
import com.beust.jcommander.Parameter
import org.locationtech.geomesa.redis.data.{RedisDataStore, RedisDataStoreParams}
import org.locationtech.geomesa.redis.tools.RedisDataStoreCommand.RedisDataStoreParams
import org.locationtech.geomesa.tools.{CatalogParam, DataStoreCommand, DistributedCommand}
import org.locationtech.geomesa.utils.classpath.ClassPathUtils
/**
* Abstract class for commands that require a RedisDataStore
*/
trait RedisDataStoreCommand extends DataStoreCommand[RedisDataStore] {
override def params: RedisDataStoreParams
override def connection: Map[String, String] = {
Map(
RedisDataStoreParams.RedisUrlParam.key -> params.url,
RedisDataStoreParams.RedisCatalogParam.key -> params.catalog
)
}
}
object RedisDataStoreCommand {
trait RedisDistributedCommand extends RedisDataStoreCommand with DistributedCommand {
abstract override def libjarsFiles: Seq[String] =
Seq("org/locationtech/geomesa/redis/tools/redis-libjars.list") ++ super.libjarsFiles
abstract override def libjarsPaths: Iterator[() => Seq[File]] = Iterator(
() => ClassPathUtils.getJarsFromEnvironment("GEOMESA_REDIS_HOME", "lib"),
() => ClassPathUtils.getJarsFromClasspath(classOf[RedisDataStore])
) ++ super.libjarsPaths
}
trait RedisDataStoreParams extends CatalogParam {
@Parameter(names = Array("--url", "-u"), description = "Redis connection URL", required = true)
var url: String = _
}
}
| locationtech/geomesa | geomesa-redis/geomesa-redis-tools/src/main/scala/org/locationtech/geomesa/redis/tools/RedisDataStoreCommand.scala | Scala | apache-2.0 | 2,000 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.entitlement
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import akka.http.scaladsl.model.StatusCodes._
import whisk.core.entitlement.Privilege._
import whisk.common.Logging
import whisk.common.TransactionId
import whisk.core.controller.RejectRequest
import whisk.core.database.DocumentTypeMismatchException
import whisk.core.database.NoDocumentException
import whisk.core.entity._
import whisk.core.entity.types.EntityStore
import whisk.http.Messages
class PackageCollection(entityStore: EntityStore)(implicit logging: Logging) extends Collection(Collection.PACKAGES) {
protected override val allowedEntityRights = {
Set(Privilege.READ, Privilege.PUT, Privilege.DELETE)
}
/**
* Computes implicit rights on a package/binding.
*
* Must fetch the resource (a package or binding) to determine if it is in allowed namespaces.
* There are two cases:
*
* 1. the resource is a package: then either it is in allowed namespaces or it is public.
* 2. the resource is a binding: then it must be in allowed namespaces and (1) must hold for
* the referenced package.
*
* A published package makes all its assets public regardless of their shared bit.
* All assets that are not in an explicit package are private because the default package is private.
*/
protected[core] override def implicitRights(user: Identity, namespaces: Set[String], right: Privilege, resource: Resource)(
implicit ep: EntitlementProvider, ec: ExecutionContext, transid: TransactionId): Future[Boolean] = {
resource.entity map {
pkgname =>
val isOwner = namespaces.contains(resource.namespace.root.asString)
right match {
case Privilege.READ =>
// must determine if this is a public or owned package
// or, for a binding, that it references a public or owned package
val docid = FullyQualifiedEntityName(resource.namespace.root.toPath, EntityName(pkgname)).toDocId
checkPackageReadPermission(namespaces, isOwner, docid)
case _ => Future.successful(isOwner && allowedEntityRights.contains(right))
}
} getOrElse {
// only a READ on the package collection is permitted;
// NOTE: currently, the implementation allows any subject to
// list packages in any namespace, and defers the filtering of
// public packages to non-owning subjects to the API handlers
// for packages
Future.successful(right == Privilege.READ)
}
}
/**
* @param namespaces the set of namespaces the subject is entitled to
* @param isOwner indicates if the resource is owned by the subject requesting authorization
* @param docid the package (or binding) document id
*/
private def checkPackageReadPermission(namespaces: Set[String], isOwner: Boolean, doc: DocId)(
implicit ec: ExecutionContext, transid: TransactionId): Future[Boolean] = {
val right = Privilege.READ
WhiskPackage.get(entityStore, doc) flatMap {
case wp if wp.binding.isEmpty =>
val allowed = wp.publish || isOwner
logging.info(this, s"entitlement check on package, '$right' allowed?: $allowed")
Future.successful(allowed)
case wp =>
if (isOwner) {
val binding = wp.binding.get
val pkgOwner = namespaces.contains(binding.namespace.asString)
val pkgDocid = binding.docid
logging.info(this, s"checking subject has privilege '$right' for bound package '$pkgDocid'")
checkPackageReadPermission(namespaces, pkgOwner, pkgDocid)
} else {
logging.info(this, s"entitlement check on package binding, '$right' allowed?: false")
Future.successful(false)
}
} recoverWith {
case t: NoDocumentException =>
logging.info(this, s"the package does not exist (owner? $isOwner)")
// if owner, reject with not found, otherwise fail the future to reject with
// unauthorized (this prevents information leaks about packages in other namespaces)
if (isOwner) {
Future.failed(RejectRequest(NotFound))
} else {
Future.successful(false)
}
case t: DocumentTypeMismatchException =>
logging.info(this, s"the requested binding is not a package (owner? $isOwner)")
// if owner, reject with not found, otherwise fail the future to reject with
// unauthorized (this prevents information leaks about packages in other namespaces)
if (isOwner) {
Future.failed(RejectRequest(Conflict, Messages.conformanceMessage))
} else {
Future.successful(false)
}
case t: RejectRequest =>
logging.error(this, s"entitlement check on package failed: $t")
Future.failed(t)
case t =>
logging.error(this, s"entitlement check on package failed: ${t.getMessage}")
if (isOwner) {
Future.failed(RejectRequest(InternalServerError, Messages.corruptedEntity))
} else {
Future.successful(false)
}
}
}
}
| prccaraujo/openwhisk | core/controller/src/main/scala/whisk/core/entitlement/PackageCollection.scala | Scala | apache-2.0 | 6,468 |
package hr.element.beepo
package Security.postgres
import hr.ngs.patterns._
class UserRepository(
private val sessionFactory: org.pgscala.PGSessionFactory
, private val locator: IServiceLocator
) extends Security.IUserRepository {
import org.pgscala._
val createFromResultSet = (rS: PGScalaResultSet) =>
Security.postgres.UserConverter.fromPGString(rS.one[String], locator)
def find(uris: Traversable[String]): IndexedSeq[Security.User] = {
val pks = if(uris eq null) Array.empty[String] else uris.filter(_ ne null).toArray
if (pks.isEmpty) {
IndexedSeq.empty
}
else {
val formattedUris = postgres.Utils.buildSimpleUriList(pks)
sessionFactory.using( _.arr("""SELECT r
FROM "Security"."User_entity" r
WHERE r."name" IN (%s)""".format(formattedUris)) (createFromResultSet)
)
}
}
private val typeConverter = Security.postgres.UserConverter
private val rootTypeConverter = typeConverter.toPGString _
def persist(insert: Traversable[Security.User], update: Traversable[(Security.User, Security.User)], delete: Traversable[Security.User]): IndexedSeq[String] = {
sessionFactory.using{ dbSession =>
val insertValues = insert.toArray
val updateValues = update.toArray
val deleteValues = delete.toArray
insertValues foreach { item => item.URI = Security.postgres.UserConverter.buildURI(item.name) }
updateValues foreach { case(_, item) => item.URI = Security.postgres.UserConverter.buildURI(item.name) }
val sqlCom = new StringBuilder("""/*NO LOAD BALANCE*/
SELECT "Security"."persist_User"(
%s::"Security"."User_entity"[],
%s::"Security"."User_entity"[],
%s::"Security"."User_entity"[],
%s::"Security"."User_entity"[]""".format(
postgres.Utils.createArrayLiteral(insertValues, rootTypeConverter),
postgres.Utils.createArrayLiteral(updateValues map(_._1), rootTypeConverter),
postgres.Utils.createArrayLiteral(updateValues map(_._2), rootTypeConverter),
postgres.Utils.createArrayLiteral(deleteValues, rootTypeConverter)))
sqlCom.append(")")
dbSession.exec(sqlCom.toString)
insertValues.map(_.URI)
} // using
}
}
| element-doo/beepo | code/scala/model-services-generated/src/main/scala/hr/element/beepo/Security/postgres/UserRepository.scala | Scala | bsd-3-clause | 2,232 |
package io.github.rlazoti.tictactoe.models
object Piece {
def getByType(pieceType: String): Piece =
pieceType match {
case "X" | "x" => Cross()
case _ => Nought()
}
def getOpponentPiece(playerPiece: Piece) =
playerPiece match {
case Cross() => Nought()
case _ => Cross()
}
}
sealed trait Piece {
def get: String
}
case class Cross() extends Piece {
val get = "X"
}
case class Nought() extends Piece {
val get = "O"
}
sealed trait Player {
def name: String
def getPiece: String
}
case class Computer(piece: Piece) extends Player {
val name = "Computer"
val getPiece = piece.get
}
case class User(piece: Piece) extends Player {
val name = "User"
val getPiece = piece.get
}
| rlazoti/tictactoe-scala-react | src/main/scala/io/github/rlazoti/tictactoe/models/Player.scala | Scala | apache-2.0 | 741 |
package io.getquill.context.finagle.mysql
import io.getquill.context.sql.ProductSpec
import com.twitter.util.Await
import com.twitter.util.Future
import io.getquill.context.sql.Id
class ProductFinagleMysqlSpec extends ProductSpec {
val context = testContext
import testContext._
def await[T](r: Future[T]) = Await.result(r)
override def beforeAll = {
await(testContext.run(quote(query[Product].delete)))
()
}
"Product" - {
"Insert multiple products" in {
val inserted = await(Future.collect(productEntries.map(product => testContext.run(productInsert(lift(product))))))
val product = await(testContext.run(productById(lift(inserted(2))))).head
product.description mustEqual productEntries(2).description
product.id mustEqual inserted(2)
}
"Single insert product" in {
val inserted = await(testContext.run(productSingleInsert))
val product = await(testContext.run(productById(lift(inserted)))).head
product.description mustEqual "Window"
product.id mustEqual inserted
}
"Single insert with inlined free variable" in {
val prd = Product(0L, "test1", 1L)
val inserted = await {
testContext.run {
product.insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returning(_.id)
}
}
val returnedProduct = await(testContext.run(productById(lift(inserted)))).head
returnedProduct.description mustEqual "test1"
returnedProduct.sku mustEqual 1L
returnedProduct.id mustEqual inserted
}
"Single insert with free variable and explicit quotation" in {
val prd = Product(0L, "test2", 2L)
val q1 = quote {
product.insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returning(_.id)
}
val inserted = await(testContext.run(q1))
val returnedProduct = await(testContext.run(productById(lift(inserted)))).head
returnedProduct.description mustEqual "test2"
returnedProduct.sku mustEqual 2L
returnedProduct.id mustEqual inserted
}
"Single product insert with a method quotation" in {
val prd = Product(0L, "test3", 3L)
val inserted = await(testContext.run(productInsert(lift(prd))))
val returnedProduct = await(testContext.run(productById(lift(inserted)))).head
returnedProduct.description mustEqual "test3"
returnedProduct.sku mustEqual 3L
returnedProduct.id mustEqual inserted
}
"Single insert with wrapped value" in {
case class Product(id: Id, description: String, sku: Long)
val prd = Product(Id(0L), "test2", 2L)
val q1 = quote {
query[Product].insert(_.sku -> lift(prd.sku), _.description -> lift(prd.description)).returning(_.id)
}
await(testContext.run(q1)) mustBe a[Id]
}
"supports casts from string to number" - {
"toInt" in {
case class Product(id: Long, description: String, sku: Int)
val queried = await {
testContext.run {
query[Product].filter(_.sku == lift("1004").toInt)
}
}.head
queried.sku mustEqual 1004L
}
"toLong" in {
val queried = await {
testContext.run {
query[Product].filter(_.sku == lift("1004").toLong)
}
}.head
queried.sku mustEqual 1004L
}
}
}
}
| jcranky/quill | quill-finagle-mysql/src/test/scala/io/getquill/context/finagle/mysql/ProductFinagleMysqlSpec.scala | Scala | apache-2.0 | 3,369 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network.netty
import org.scalatest.mockito.MockitoSugar
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.network.util.NettyUtils
class SparkTransportConfSuite extends SparkFunSuite with MockitoSugar {
val module = "rpc"
val serThreads = "serverThreads"
val cliThreads = "clientThreads"
test("default value is get when neither role nor module is set") {
val numUsableCores = 4
val conf = new SparkConf()
val sparkTransportConf = SparkTransportConf.fromSparkConf(conf, module, numUsableCores, None)
val expected = NettyUtils.defaultNumThreads(numUsableCores)
val serActual = sparkTransportConf.get(s"spark.$module.io.$serThreads", "")
val cliActual = sparkTransportConf.get(s"spark.$module.io.$cliThreads", "")
assert(serActual == expected.toString)
assert(cliActual == expected.toString)
}
test("module value is get when role is not set") {
val numUsableCores = 3
val serExpected = "7"
val cliExpected = "5"
val conf = new SparkConf()
.set(s"spark.$module.io.$serThreads", serExpected)
.set(s"spark.$module.io.$cliThreads", cliExpected)
val sparkTransportConf = SparkTransportConf.fromSparkConf(conf, module, numUsableCores, None)
val serActual = sparkTransportConf.get(s"spark.$module.io.$serThreads", "")
val cliActual = sparkTransportConf.get(s"spark.$module.io.$cliThreads", "")
assert(serActual == serExpected)
assert(cliActual == cliExpected)
}
test("use correct configuration when both module and role configs are present") {
val role = Some("driver")
val numUsableCores = 10
val serModule = "7"
val cliModule = "5"
val serExpected = "8"
val cliExpected = "6"
val conf = new SparkConf()
.set(s"spark.$module.io.$serThreads", serModule)
.set(s"spark.$module.io.$cliThreads", cliModule)
.set(s"spark.${role.get}.$module.io.$serThreads", serExpected)
.set(s"spark.${role.get}.$module.io.$cliThreads", cliExpected)
val sparkTransportConf = SparkTransportConf.fromSparkConf(conf, module, numUsableCores, role)
val serActual = sparkTransportConf.get(s"spark.$module.io.$serThreads", "")
val cliActual = sparkTransportConf.get(s"spark.$module.io.$cliThreads", "")
assert(serActual == serExpected)
assert(cliActual == cliExpected)
val exeRole = Some("executor")
val sparkTransConfExe = SparkTransportConf.fromSparkConf(conf, module, numUsableCores, exeRole)
val serActualExe = sparkTransConfExe.get(s"spark.$module.io.$serThreads", "")
val cliActualExe = sparkTransConfExe.get(s"spark.$module.io.$cliThreads", "")
assert(serActualExe == serModule)
assert(cliActualExe == cliModule)
}
}
| bdrillard/spark | core/src/test/scala/org/apache/spark/network/netty/SparkTransportConfSuite.scala | Scala | apache-2.0 | 3,538 |
package mlbigbook.ml
import simulacrum._
@typeclass
trait Hashable[T] {
def hash(t: T): Int
}
object ImplicitHashable {
implicit val bIsH: Hashable[Boolean] = new Hashable[Boolean] {
@inline override def hash(t: Boolean) = if (t) 1 else 0
}
implicit val iIsH: Hashable[Int] = new Hashable[Int] {
@inline override def hash(t: Int) = t
}
implicit val sIsH: Hashable[String] = new Hashable[String] {
@inline override def hash(t: String) = t.hashCode
}
implicit def optIsH[T: Hashable]: Hashable[Option[T]] =
new Hashable[Option[T]] {
import Hashable.ops._
@inline override def hash(maybeT: Option[T]) = maybeT match {
case Some(t) => t.hash
case None => 0
}
}
}
| malcolmgreaves/bigmlbook | fp4ml-main/src/main/scala/mlbigbook/ml/Hashable.scala | Scala | lgpl-3.0 | 738 |
package com.softwaremill.codebrag.rest
import com.typesafe.scalalogging.slf4j.Logging
import com.softwaremill.codebrag.service.user.Authenticator
import com.softwaremill.codebrag.domain.RepositoryStatus
import com.softwaremill.codebrag.dao.repositorystatus.RepositoryStatusDAO
import com.softwaremill.codebrag.repository.config.RepoData
import com.softwaremill.codebrag.repository.Repository
class RepoStatusServlet(val authenticator: Authenticator, repository: Repository, repoStatusDao: RepositoryStatusDAO) extends JsonServletWithAuthentication with Logging {
get("/") {
getRepositoryStatus(repository.repoData)
}
private def getRepositoryStatus(repoData: RepoData): Map[String, RepositoryStatus] = {
repoStatusDao.getRepoStatus(repoData.repoName) match {
case Some(status) => Map("repoStatus" -> status)
case None => {
logger.debug(s"No status found for ${repoData.repoName}, assuming it is first run and repo is being cloned at the moment.")
Map("repoStatus" -> RepositoryStatus.notReady(repoData.repoName))
}
}
}
}
object RepoStatusServlet {
val Mapping = "repoStatus"
} | softwaremill/codebrag | codebrag-rest/src/main/scala/com/softwaremill/codebrag/rest/RepoStatusServlet.scala | Scala | agpl-3.0 | 1,138 |
package im.actor.server.group
import akka.pattern.ask
import akka.util.Timeout
import im.actor.api.rpc.AuthorizedClientData
import im.actor.api.rpc.groups.{ Group โ ApiGroup, Member โ ApiMember }
import im.actor.server.file.Avatar
import im.actor.server.sequence.{ SeqState, SeqStateDate }
import scala.concurrent.{ ExecutionContext, Future }
trait GroupOperations extends Commands with Queries
private[group] sealed trait Commands {
import GroupCommands._
def create(groupId: Int, title: String, randomId: Long, userIds: Set[Int])(
implicit
peerManagerRegion: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext,
client: AuthorizedClientData
): Future[CreateAck] = create(groupId, client.userId, client.authId, title, randomId, userIds)
def create(groupId: Int, clientUserId: Int, clientAuthId: Long, title: String, randomId: Long, userIds: Set[Int])(
implicit
peerManagerRegion: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[CreateAck] =
(peerManagerRegion.ref ? Create(groupId, clientUserId, clientAuthId, title, randomId, userIds.toSeq)).mapTo[CreateAck]
def createInternal(groupId: Int, creatorUserId: Int, title: String, userIds: Set[Int])(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[CreateInternalAck] =
(region.ref ? CreateInternal(groupId, creatorUserId, title, userIds.toSeq)).mapTo[CreateInternalAck]
def makePublic(groupId: Int, description: String)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[MakePublicAck] =
(region.ref ? MakePublic(groupId, Some(description))).mapTo[MakePublicAck]
def leaveGroup(groupId: Int, randomId: Long)(
implicit
timeout: Timeout,
peerManagerRegion: GroupProcessorRegion,
ec: ExecutionContext,
client: AuthorizedClientData
): Future[SeqStateDate] =
(peerManagerRegion.ref ? Leave(groupId, client.userId, client.authId, randomId)).mapTo[SeqStateDate]
def kickUser(groupId: Int, kickedUserId: Int, randomId: Long)(
implicit
timeout: Timeout,
peerManagerRegion: GroupProcessorRegion,
ec: ExecutionContext,
client: AuthorizedClientData
): Future[SeqStateDate] =
(peerManagerRegion.ref ? Kick(groupId, kickedUserId, client.userId, client.authId, randomId)).mapTo[SeqStateDate]
def joinGroup(groupId: Int, joiningUserId: Int, joiningUserAuthId: Long, invitingUserId: Int)(
implicit
timeout: Timeout,
peerManagerRegion: GroupProcessorRegion,
ec: ExecutionContext
): Future[(SeqStateDate, Vector[Int], Long)] =
(peerManagerRegion.ref ? Join(groupId, joiningUserId, joiningUserAuthId, invitingUserId)).mapTo[(SeqStateDate, Vector[Int], Long)]
def joinAfterFirstRead(groupId: Int, joiningUserId: Int, joiningUserAuthId: Long)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[Unit] = (region.ref ? JoinAfterFirstRead(groupId, joiningUserId, joiningUserAuthId)) map (_ โ ())
def inviteToGroup(groupId: Int, inviteeUserId: Int, randomId: Long)(
implicit
timeout: Timeout,
peerManagerRegion: GroupProcessorRegion,
ec: ExecutionContext,
client: AuthorizedClientData
): Future[SeqStateDate] =
(peerManagerRegion.ref ? Invite(groupId, inviteeUserId, client.userId, client.authId, randomId)).mapTo[SeqStateDate]
def updateAvatar(groupId: Int, clientUserId: Int, clientAuthId: Long, avatarOpt: Option[Avatar], randomId: Long)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[UpdateAvatarAck] = (region.ref ? UpdateAvatar(groupId, clientUserId, clientAuthId, avatarOpt, randomId)).mapTo[UpdateAvatarAck]
def updateTitle(groupId: Int, clientUserId: Int, clientAuthId: Long, title: String, randomId: Long)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[SeqStateDate] = (region.ref ? UpdateTitle(groupId, clientUserId, clientAuthId, title, randomId)).mapTo[SeqStateDate]
def updateTopic(groupId: Int, clientUserId: Int, clientAuthId: Long, topic: Option[String], randomId: Long)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[SeqStateDate] = (region.ref ? ChangeTopic(groupId, clientUserId, clientAuthId, topic, randomId)).mapTo[SeqStateDate]
def updateAbout(groupId: Int, clientUserId: Int, clientAuthId: Long, about: Option[String], randomId: Long)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[SeqStateDate] = (region.ref ? ChangeAbout(groupId, clientUserId, clientAuthId, about, randomId)).mapTo[SeqStateDate]
def makeUserAdmin(groupId: Int, clientUserId: Int, clientAuthId: Long, candidateId: Int)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[(Vector[ApiMember], SeqState)] = (region.ref ? MakeUserAdmin(groupId, clientUserId, clientAuthId, candidateId)).mapTo[(Vector[ApiMember], SeqState)]
def revokeIntegrationToken(groupId: Int, clientUserId: Int)(
implicit
region: GroupProcessorRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[String] = (region.ref ? RevokeIntegrationToken(groupId, clientUserId)).mapTo[RevokeIntegrationTokenAck] map (_.token)
}
private[group] sealed trait Queries {
import GroupQueries._
def getIntegrationToken(groupId: Int, clientUserId: Int)(
implicit
region: GroupViewRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[Option[String]] = (region.ref ? GetIntegrationToken(groupId, clientUserId)).mapTo[GetIntegrationTokenResponse] map (_.token)
//for use in inner services only
def getIntegrationToken(groupId: Int)(
implicit
region: GroupViewRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[Option[String]] = (region.ref ? GetIntegrationTokenInternal(groupId)).mapTo[GetIntegrationTokenResponse] map (_.token) //FIXME
def getApiStruct(groupId: Int, clientUserId: Int)(
implicit
region: GroupViewRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[ApiGroup] = (region.ref ? GetApiStruct(groupId, clientUserId)).mapTo[GetApiStructResponse] map (_.struct)
def isPublic(groupId: Int)(
implicit
region: GroupViewRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[Boolean] = (region.ref ? IsPublic(groupId)).mapTo[IsPublicResponse] map (_.isPublic)
def checkAccessHash(groupId: Int, hash: Long)(
implicit
region: GroupViewRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[Boolean] = (region.ref ? CheckAccessHash(groupId, hash)).mapTo[CheckAccessHashResponse] map (_.isCorrect)
def getMemberIds(groupId: Int)(
implicit
region: GroupViewRegion,
timeout: Timeout,
ec: ExecutionContext
): Future[(Seq[Int], Seq[Int], Option[Int])] = (region.ref ? GetMembers(groupId)).mapTo[GetMembersResponse] map (r โ (r.memberIds, r.invitedUserIds, r.botId))
} | berserkertdl/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/group/GroupOperations.scala | Scala | mit | 7,392 |
package shield.implicits
import com.amazonaws.auth.{AWSCredentials, AWSCredentialsProvider, AWSCredentialsProviderChain}
import org.specs2.mutable.Specification
import shield.aws.{AWSSigningConfig, AuthUtil}
import spray.http._
/**
* Created by amaffei on 3/15/16.
*/
class AuthUtilSpec extends Specification {
//Set consistant times that will produce consistant results for the tests
val d1 = "20160315T141234Z"
val d2 = "20160315"
//Create a new config, these values are typically found in application.conf
val config = new AWSSigningConfig("example-elasticsearch-host", "us-west-1", "es", true, new AWSCredentialsProviderChain(new StaticCredentialProvider()))
"AuthUtil" should {
"Use SHA256" in {
println(AuthUtil.hashAsString("Hello world!"))
AuthUtil.hashAsString("Hello world!") must be equalTo "c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a"
AuthUtil.hashAsString("123$%^abcDEF") must be equalTo "3b43642576e2c2cf349f34ff7f10e700bf485e6982647a50e361e883a5aaafa2"
AuthUtil.hashAsString(" _***~` ") must be equalTo "0597e54e8278a8673f09842d03e4af3a2688d1a15a55a640968382a5311416b4"
}
"Create canonical request hash" in {
val request = new HttpRequest(HttpMethods.GET, Uri("https://example-elasticsearch-host.com:80"), List(), HttpEntity(HttpData("Sample data for a sample request ~*)@#$) @#(((")))
println(AuthUtil.createCanonicalHash(request, "example-elasticsearch-host"))
AuthUtil.createCanonicalHash(request, "example-elasticsearch-host") must be equalTo "05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
}
"Create string to sign from a given canonical request" in {
val canonicalRequestHash = "05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
AuthUtil.createStringToSign(d1, d2, config.region, config.service, canonicalRequestHash) must be equalTo "AWS4-HMAC-SHA256\\n20160315\\n20160315T141234Z/us-west-1/es/aws4_request\\n05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
}
"Create a signature" in {
val stringToSign = "AWS4-HMAC-SHA256\\n20160315\\n20160315T141234Z/us-west-1/es/aws4_request\\n05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d"
val signature = AuthUtil.hmacSHA256AsString("AWS4-HMAC-SHA256\\n20160315\\n20160315T141234Z/us-west-1/es/aws4_request\\n05ef99e67afa47f06ed12084460baa4fca0bfbf92faebabed00fa78796028c5d", AuthUtil.createSignatureKey(config.getSecretKey(), d1, config.region, config.service))
signature must be equalTo "68e811337b35141320236cf585a7fefad71d8948e4d1e9d5eb3583474d31eb6a"
}
}
}
//Create a static credential provider so that the access key and secret key stay the same for the purposes of testing
class StaticCredentialProvider extends AWSCredentialsProvider {
override def refresh(): Unit = { }
override def getCredentials: AWSCredentials = new AWSCredentials {
override def getAWSAccessKeyId: String = "AccessKeyId"
override def getAWSSecretKey: String = "SuperSecretKey"
}
}
| RetailMeNot/shield | src/test/scala/shield/implicits/AuthUtilSpec.scala | Scala | mit | 3,056 |
/*
* Copyright (C) 2015 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.tool
import java.util.UUID
import java.util.concurrent.Semaphore
import java.util.concurrent.locks._
package object lock {
implicit class LockDecorator(lock: Lock) {
def apply[T](block: โ T): T = {
lock.lock()
try {
block
}
finally lock.unlock()
}
}
implicit class SemaphoreDecorator(s: Semaphore) {
def apply[T](block: โ T): T = {
s.acquire()
try {
block
}
finally s.release()
}
def acquireAndRelease() = {
s.acquire()
s.release()
}
}
implicit class ReadWriteLockDecorator(l: ReadWriteLock) {
def read[T](t: โ T) = {
l.readLock.lock
try t
finally l.readLock.unlock
}
def write[T](t: โ T) = {
l.writeLock.lock
try t
finally l.writeLock.unlock
}
}
case class LockKey(id: UUID = java.util.UUID.randomUUID())
}
| openmole/openmole | openmole/third-parties/org.openmole.tool.lock/src/main/scala/org/openmole/tool/lock/package.scala | Scala | agpl-3.0 | 1,614 |
package org.openurp.edu.eams.teach.program.major.web.action
import java.util.Date
import org.apache.commons.lang3.ArrayUtils
import org.beangle.commons.collection.Collections
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.security.blueprint.User
import com.ekingstar.eams.core.CommonAuditState
import org.openurp.edu.eams.teach.program.Program
import org.openurp.edu.eams.teach.program.helper.ProgramCollector
import org.openurp.edu.eams.teach.program.major.MajorPlan
import org.openurp.edu.eams.teach.program.major.MajorPlanComment
import org.openurp.edu.eams.teach.program.major.guard.MajorProgramOperateGuard
import org.openurp.edu.eams.teach.program.major.guard.MajorProgramOperateType
import org.openurp.edu.eams.teach.program.major.model.MajorPlanCommentBean
import org.openurp.edu.eams.teach.program.major.service.MajorPlanAuditService
//remove if not needed
class MajorPlanAuditAction extends MajorPlanSearchAction {
var majorPlanAuditService: MajorPlanAuditService = _
var majorProgramBasicGuard: MajorProgramOperateGuard = _
def index(): String = {
setDataRealm(hasStdTypeCollege)
put("educations", getEducations)
put("stateList", CommonAuditState.values)
put("SUBMITTED", CommonAuditState.SUBMITTED)
put("ACCEPTED", CommonAuditState.ACCEPTED)
put("REJECTED", CommonAuditState.REJECTED)
forward()
}
override def info(): String = {
super.info()
put("SUBMITTED", CommonAuditState.SUBMITTED)
put("ACCEPTED", CommonAuditState.ACCEPTED)
put("REJECTED", CommonAuditState.REJECTED)
forward()
}
override def search(): String = {
if (Collections.isEmpty(getProjects) || Collections.isEmpty(getDeparts) ||
Collections.isEmpty(getStdTypes)) {
return forwardError("ๅฏนไธ่ตท๏ผๆจๆฒกๆๆ้๏ผ")
}
val query = majorPlanSearchHelper.buildPlanQuery()
query.where("plan.program.major.project in (:projects)", getProjects)
.where("plan.program.department in (:departs)", getDeparts)
.where("plan.program.stdType in (:stdTypes)", getStdTypes)
if (Collections.isNotEmpty(getEducations)) {
query.where("plan.program.education in (:educations)", getEducations)
}
val plans = entityDao.search(query)
put("plans", plans)
put("SUBMITTED", CommonAuditState.SUBMITTED)
put("ACCEPTED", CommonAuditState.ACCEPTED)
forward()
}
def readyAddReturnReason(): String = {
val planIds = getLongIds("plan")
if (ArrayUtils.isEmpty(planIds)) {
return forwardError("error.model.ids.needed")
}
val stateStr = get("auditState")
if (null == stateStr) {
return forwardError("error.parameters.needed")
}
put("planId", planIds(0))
put("auditState", stateStr)
forward()
}
def planReturnReasonList(): String = {
val planIds = getLongIds("plan")
if (ArrayUtils.isEmpty(planIds)) {
return forwardError("error.model.ids.needed")
}
val query = OqlBuilder.from(classOf[MajorPlanComment], "re")
query.where("re.majorPlan.id =" + planIds(0))
query.limit(getPageLimit)
val planReturnReasons = entityDao.search(query)
put("planReturnReasons", planReturnReasons)
forward()
}
def audit(): String = {
val planIds = getLongIds("plan")
if (ArrayUtils.isEmpty(planIds)) {
return forwardError("error.model.ids.needed")
}
val stateStr = get("auditState")
if (null == stateStr) {
return forwardError("error.parameters.needed")
}
val state = CommonAuditState.valueOf(stateStr.toUpperCase())
val plans = entityDao.get(classOf[MajorPlan], planIds)
guard(MajorProgramOperateType.AUDIT, plans)
majorPlanAuditService.audit(plans, state)
if (stateStr == "REJECTED") {
val reason = new MajorPlanCommentBean()
reason.setReason(get("reason"))
reason.setMajorPlan(plans.get(0).asInstanceOf[MajorPlan])
val date = new Date()
reason.setCreatedAt(date)
reason.setUpdatedAt(date)
entityDao.saveOrUpdate(reason)
}
redirect("search", "info.save.success")
}
def revokedAudit(): String = {
val planIds = getLongIds("plan")
if (ArrayUtils.isEmpty(planIds)) {
return forwardError("error.model.ids.needed")
}
val plans = entityDao.get(classOf[MajorPlan], planIds)
guard(MajorProgramOperateType.AUDIT, plans)
majorPlanAuditService.revokeAccepted(plans)
redirect("search", "info.save.success")
}
private def fillDataRealmContext(context: Map[String, Any]) {
context.put("realm/checkMe", true)
context.put("realm/user", entityDao.get(classOf[User], getUserId))
context.put("realm/project", getProject)
context.put("realm/stdTypes", getStdTypes)
context.put("realm/departs", getDeparts)
context.put("realm/educations", getEducations)
}
private def guard(operType: MajorProgramOperateType, plans: List[MajorPlan]) {
val context = Collections.newMap[Any]
fillDataRealmContext(context)
val programs = Collections.collect(plans, ProgramCollector.INSTANCE).asInstanceOf[List[_]]
majorProgramBasicGuard.guard(operType, programs, context)
}
private def guard(operType: MajorProgramOperateType, plan: MajorPlan) {
val context = Collections.newMap[Any]
fillDataRealmContext(context)
majorProgramBasicGuard.guard(operType, plan.getProgram, context)
}
}
| openurp/edu-eams-webapp | plan/src/main/scala/org/openurp/edu/eams/teach/program/major/web/action/MajorPlanAuditAction.scala | Scala | gpl-3.0 | 5,345 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.persistence.jdbc
import java.sql.Connection
import akka.persistence.query.Offset
import akka.stream.scaladsl.Flow
import akka.{ Done, NotUsed }
import com.lightbend.lagom.internal.persistence.jdbc.{ SlickOffsetDao, SlickOffsetStore, SlickProvider }
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcReadSide
import com.lightbend.lagom.scaladsl.persistence.{ AggregateEvent, AggregateEventTag, EventStreamElement }
import org.slf4j.LoggerFactory
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
/**
* INTERNAL API
*/
private[lagom] class JdbcReadSideImpl(slick: SlickProvider, offsetStore: SlickOffsetStore)(implicit val ec: ExecutionContext) extends JdbcReadSide {
private val log = LoggerFactory.getLogger(this.getClass)
override def builder[Event <: AggregateEvent[Event]](readSideId: String): ReadSideHandlerBuilder[Event] = new ReadSideHandlerBuilder[Event] {
var globalPrepare: Connection => Unit = { _ => () }
var prepare: (Connection, AggregateEventTag[Event]) => Unit = (_, _) => ()
var eventHandlers = Map.empty[Class[_ <: Event], (Connection, EventStreamElement[_ <: Event]) => Unit]
override def setGlobalPrepare(callback: Connection => Unit): ReadSideHandlerBuilder[Event] = {
globalPrepare = callback
this
}
override def setPrepare(callback: (Connection, AggregateEventTag[Event]) => Unit): ReadSideHandlerBuilder[Event] = {
prepare = callback
this
}
override def setEventHandler[E <: Event: ClassTag](handler: (Connection, EventStreamElement[E]) => Unit): ReadSideHandlerBuilder[Event] = {
val eventClass = implicitly[ClassTag[E]].runtimeClass.asInstanceOf[Class[Event]]
eventHandlers += (eventClass -> handler.asInstanceOf[(Connection, EventStreamElement[_ <: Event]) => Unit])
this
}
override def build(): ReadSideHandler[Event] = new JdbcReadSideHandler[Event](readSideId, globalPrepare, prepare, eventHandlers)
}
private class JdbcReadSideHandler[Event <: AggregateEvent[Event]](
readSideId: String,
globalPrepareCallback: Connection => Any,
prepareCallback: (Connection, AggregateEventTag[Event]) => Any,
eventHandlers: Map[Class[_ <: Event], (Connection, EventStreamElement[_ <: Event]) => Any]
) extends ReadSideHandler[Event] {
import slick.profile.api._
@volatile
private var offsetDao: SlickOffsetDao = _
override def globalPrepare(): Future[Done] =
slick.ensureTablesCreated().flatMap { _ =>
slick.db.run {
SimpleDBIO { ctx =>
globalPrepareCallback(ctx.connection)
Done.getInstance()
}
}
}
override def prepare(tag: AggregateEventTag[Event]): Future[Offset] =
for {
_ <- slick.db.run {
SimpleDBIO { ctx =>
prepareCallback(ctx.connection, tag)
}
}
dao <- offsetStore.prepare(readSideId, tag.tag)
} yield {
offsetDao = dao
dao.loadedOffset
}
override def handle(): Flow[EventStreamElement[Event], Done, NotUsed] =
Flow[EventStreamElement[Event]]
.mapAsync(parallelism = 1) { element =>
val dbAction = eventHandlers.get(element.event.getClass)
.map { handler =>
val castedHandler = handler.asInstanceOf[(Connection, EventStreamElement[Event]) => Unit]
SimpleDBIO { ctx => castedHandler(ctx.connection, element) }
}
.getOrElse {
// fallback to empty action if no handler is found
if (log.isDebugEnabled) log.debug("Unhandled event [{}]", element.event.getClass.getName)
DBIO.successful(())
}
.flatMap { _ =>
offsetDao.updateOffsetQuery(element.offset)
}
.map(_ => Done)
slick.db.run(dbAction.transactionally)
}
}
}
| rstento/lagom | persistence-jdbc/scaladsl/src/main/scala/com/lightbend/lagom/internal/scaladsl/persistence/jdbc/JdbcReadSideImpl.scala | Scala | apache-2.0 | 4,136 |
/**
* Copyright (c) 2012-2013, Tomasz Kaczmarzyk.
*
* This file is part of BeanDiff.
*
* BeanDiff is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* BeanDiff is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with BeanDiff; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
package org.beandiff.support
import java.lang.Number
import java.lang.Object
import org.beandiff.support.ObjectSupport.RichObject
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.matchers.ShouldMatchers
import org.beandiff.beans.DescendantJavaBean
@RunWith(classOf[JUnitRunner])
class ObjectSupportTest extends FunSuite with ShouldMatchers {
test("String.allClasses == (String, Object) ") {
"aaa".allClasses should have size 2
assert("aaa".allClasses.contains(classOf[String]))
assert("aaa".allClasses.contains(classOf[Object]))
}
test("Integer.allClasses == (Integer, Number, Object)") {
val classes = 1.allClasses
classes should have size 3
assert(classes.contains(classOf[Integer]))
assert(classes.contains(classOf[Number]))
assert(classes.contains(classOf[Object]))
}
test("should resolve field from supertype") {
val o = new DescendantJavaBean("bart", 10, "bartman")
o.getField("name") should not be null
}
test("should find field in supertype") {
val o = new DescendantJavaBean("bart", 10, "bartman")
o.hasField("name") should be === true
}
} | tkaczmarzyk/beandiff | src/test/scala/org/beandiff/support/ObjectSupportTest.scala | Scala | lgpl-3.0 | 2,015 |
package org.scalafmt.dynamic
import java.io.{ByteArrayOutputStream, PrintStream, PrintWriter}
import java.nio.charset.StandardCharsets
import java.nio.file.{Files, Path, Paths}
import java.nio.file.attribute.FileTime
import org.scalafmt.interfaces.{PositionException, Scalafmt, ScalafmtReporter}
import PositionSyntax._
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.reflect.ClassTag
import scala.{meta => m}
import munit.FunSuite
import munit.Location
class DynamicSuite extends FunSuite {
import DynamicSuite._
class Format(name: String, cfgFunc: ScalafmtDynamic => ScalafmtDynamic) {
val download = new ByteArrayOutputStream()
def downloadLogs: String = download.toString()
val out = new ByteArrayOutputStream()
val parsed = mutable.Map.empty[String, Int]
def parsedCount: Int = parsed.values.sum
val missingVersions = ListBuffer.empty[String]
val reporter: ScalafmtReporter =
new ConsoleScalafmtReporter(new PrintStream(out)) {
override def downloadWriter(): PrintWriter = new PrintWriter(download)
override def error(file: Path, e: Throwable): Unit =
e match {
case p: PositionException =>
val input = m.Input.VirtualFile(file.toString, p.code)
val pos =
m.Position.Range(
input,
p.startLine,
p.startCharacter,
p.endLine,
p.endCharacter
)
val formattedMessage = pos.formatMessage("error", p.shortMessage)
out.write(formattedMessage.getBytes(StandardCharsets.UTF_8))
case _ =>
super.error(file, e)
}
override def missingVersion(
config: Path,
defaultVersion: String
): Unit = {
missingVersions += defaultVersion
}
override def parsedConfig(
config: Path,
scalafmtVersion: String
): Unit = {
val n = parsed.getOrElse(scalafmtVersion, 0)
parsed(scalafmtVersion) = n + 1
}
override def trimStacktrace(e: Throwable): Unit = {
e.setStackTrace(
e.getStackTrace.takeWhile(!_.getClassName.contains("DynamicSuite"))
)
}
}
val dynamic: ScalafmtDynamic = cfgFunc(
Scalafmt
.create(this.getClass.getClassLoader)
.withReporter(reporter)
.asInstanceOf[ScalafmtDynamic]
)
val config = Files.createTempFile("scalafmt", ".scalafmt.conf")
val filename = Paths.get(name + ".scala")
var timestamps = 100L
def setConfig(newConfig: String): Unit = {
Files.write(this.config, newConfig.getBytes(StandardCharsets.UTF_8))
timestamps += 100000
Files.setLastModifiedTime(this.config, FileTime.fromMillis(timestamps))
}
def setVersion(newVersion: String, dialect: String, rest: String*): Unit = {
val dialectLine = Option(dialect).fold("")(x => s"runner.dialect = $x")
setConfig(s"""
|version=$newVersion
|$dialectLine
|${rest.mkString("\\n")}
|""".stripMargin)
}
def relevant: String = {
out.toString.replace(config.toString, "path/.scalafmt.conf")
}
def errors: String = {
out.toString.linesIterator
.filter(_.startsWith("error"))
.mkString("\\n")
}
def assertNotIgnored(filename: String)(implicit loc: Location): Unit = {
assertFormat(
"object A { }",
"object A {}\\n",
Paths.get(filename)
)
}
def assertIgnored(filename: String): Unit = {
out.reset()
val file = Paths.get(filename)
val original = "object A { }"
val obtained = dynamic.format(config, file, original)
val outString = out.toString().replaceAll("\\\\\\\\", "/")
assert(outString.contains(s"file excluded: $filename"))
assertNoDiff(obtained, original)
}
def assertFormat()(implicit loc: Location): Unit = {
assertFormat("object A { }", "object A {}\\n")
}
def assertFormat(
original: String,
expected: String,
file: Path = filename
)(implicit loc: Location): Unit = {
out.reset()
val obtained = dynamic.format(config, file, original)
if (errors.nonEmpty) {
assertNoDiff(out.toString(), "", "Reporter had errors")
}
assertNoDiff(obtained, expected)
}
def assertMissingVersion()(implicit loc: Location): Unit = {
out.reset()
missingVersions.clear()
intercept[ScalafmtDynamicError.ConfigMissingVersion] {
dynamic.format(config, filename, "object A")
}
assertEquals(out.toString(), "")
assert(missingVersions.nonEmpty)
}
def assertThrows[A <: Throwable: ClassTag](
code: String = "object A { }"
)(implicit loc: Location): A = {
out.reset()
intercept[A] {
dynamic.format(config, filename, code)
}
}
def assertError(expected: String)(implicit loc: Location): Unit = {
assertError("object A { }", expected)
}
def assertError(
code: String,
expected: String,
path: Path = filename
)(implicit loc: Location): Unit = {
out.reset()
val obtained = dynamic.format(config, path, code)
assertNoDiff(relevant, expected)
assertNoDiff(obtained, obtained, "Formatter did not error")
}
}
def check(
name: String,
cfgFunc: ScalafmtDynamic => ScalafmtDynamic = identity
)(fn: Format => Unit): Unit = {
test(name) {
val format = new Format(name, cfgFunc)
try fn(format)
finally format.dynamic.clear()
}
}
private val testedVersions = Seq(
"3.1.2",
"2.7.5",
"2.5.3",
"2.0.0-RC4",
"1.6.0-RC4",
"1.5.1",
"1.5.0",
"1.4.0",
"1.3.0",
"1.2.0",
"1.1.0",
"1.0.0-RC4",
"1.0.0"
)
def checkExhaustive(name: String)(config: String => String)(
fn: (Format, String) => Unit
): Unit = {
testedVersions.foreach { version =>
test(s"$name [v=$version]") {
val format = new Format(name, identity)
val dialect = if (version < "3.0.0") null else "scala213"
try {
format.setVersion(version, dialect, config(version))
fn(format, version)
} finally format.dynamic.clear()
}
}
}
def latest = BuildInfo.previousStable
def checkVersion(version: String, dialect: String): Unit = {
check(s"v$version") { f =>
f.setVersion(version, dialect)
f.assertFormat("object A { }", "object A {}\\n")
}
}
checkVersion(latest, "scala212")
checkVersion("1.5.1", "scala211")
checkVersion("1.0.0", "scala211")
// checkVersion("0.2.8") // fails for now
check("parse-error") { f =>
def check(version: String, dialect: String): Unit = {
f.setVersion(version, dialect)
val dialectError = getDialectError(version, dialect)
val code = s"""object object A { val version = "$version" }"""
f.assertError(
code,
s"""|parse-error.scala:1:8: error:$dialectError identifier expected but object found
|$code
| ^^^^^^""".stripMargin
)
}
check(latest, "scala212")
check("1.0.0", "Scala211")
}
check("missing-version") { f => f.assertMissingVersion() }
check("excluded-file") { f =>
val config = """
|project.includeFilters = [
| ".*Spec\\\\.scala$"
|]
|project.excludeFilters = [
| "UserSpec\\\\.scala$"
|]
|""".stripMargin
def check(version: String): Unit = {
f.setVersion(version, "scala211", config)
f.assertNotIgnored("path/FooSpec.scala")
f.assertIgnored("path/App.scala")
f.assertIgnored("path/UserSpec.scala")
}
check(latest)
check("1.0.0")
}
check("ignore-exclude-filters", _.withRespectProjectFilters(false)) { f =>
val config = """
|project.includeFilters = [
| ".*Spec\\\\.scala$"
|]
|project.excludeFilters = [
| "UserSpec\\\\.scala$"
|]
|""".stripMargin
def check(version: String): Unit = {
f.setVersion(version, "scala211", config)
f.assertNotIgnored("path/App.pm")
f.assertNotIgnored("path/App.scala")
f.assertNotIgnored("path/UserSpec.scala")
}
check(latest)
}
check("config-error") { f =>
f.setVersion(latest, "scala212", "max = 70")
val err = f.assertThrows[ScalafmtDynamicError.ConfigParseError]().getMessage
assert(
err.contains("error: found option 'max' which wasn't expected"),
err
)
}
check("config-cache") { f =>
f.setVersion(latest, "scala211")
f.assertFormat()
f.assertFormat()
assertEquals(f.parsedCount, 1, f.parsed)
f.setConfig("invalid")
val parseError = f.assertThrows[ScalafmtDynamicError.ConfigParseError]()
assert(
parseError.getMessage
.contains("Key 'invalid' may not be followed by token: end of file")
)
f.setConfig("maxColumn = 40")
f.assertMissingVersion()
f.setVersion(latest, "scala212", "maxColumn = 40")
f.assertFormat()
assertEquals(f.parsedCount, 2, f.parsed)
f.assertFormat()
assertEquals(f.parsedCount, 2, f.parsed)
f.setVersion("1.0.0", "scala211", "maxColumn = 40")
f.assertFormat()
assertEquals(f.parsedCount, 3, f.parsed)
f.assertFormat()
assertEquals(f.parsedCount, 3, f.parsed)
assertEquals(f.parsed.toMap, Map("1.0.0" -> 1, latest -> 2))
}
check("wrong-version") { f =>
f.setVersion("1.0", "scala211")
val error = f.assertThrows[ScalafmtDynamicError.ConfigInvalidVersion]()
assertEquals(error.getMessage, "Invalid version: 1.0")
assertEquals(f.downloadLogs, "")
}
check("sbt") { f =>
def check(version: String, dialect: String): Unit = {
f.setVersion(version, dialect, """project.includeFilters = [ ".*" ]""")
val dialectError = getDialectError(version, dialect)
List("build.sbt", "build.sc").foreach { filename =>
val path = Paths.get(filename)
// test sbt allows top-level terms
f.assertFormat(
s"""lazy val x = "$version"""",
s"""lazy val x = "$version"\\n""",
path
)
// test scala doesn't allow top-level terms (not passing path here)
f.assertError(
"lazy val x = project",
s"""|sbt.scala:1:1: error:$dialectError classes cannot be lazy
|lazy val x = project
|^^^^""".stripMargin
)
// check wrapped literals, supported in sbt using scala 2.13+
val wrappedLiteral = "object a { val x: Option[0] = Some(0) }"
def assertIsWrappedLiteralFailure(): Unit =
f.assertError(
wrappedLiteral,
s"""$filename:1:28: error: identifier expected but integer constant found
|$wrappedLiteral
| ^""".stripMargin,
path
)
def assertIsWrappedLiteralSuccess(): Unit =
f.assertFormat(
wrappedLiteral,
wrappedLiteral.replaceAll(" +", " ").trim + "\\n",
path
)
if (version > "2.0")
assertIsWrappedLiteralSuccess()
else
assertIsWrappedLiteralFailure()
}
}
check(latest, "scala213")
check("1.2.0", "Scala211")
}
check("no-config") { f =>
Files.delete(f.config)
val thrown = f.assertThrows[ScalafmtDynamicError.ConfigDoesNotExist]()
assert(thrown.getMessage.contains("Missing config"))
}
check("intellij-default-config") { f: Format =>
val version = ScalafmtVersion(1, 5, 1)
f.setVersion(version.toString, "Scala211")
f.assertFormat()
val cache = f.dynamic.moduleLoader match {
case x: ScalafmtModuleLoader.CachedProxy => x.cache
case x =>
fail("ReflectResolver is not cached: " + x.getClass.getSimpleName)
}
cache.getFromCache(version) match {
case Some(Right(x)) => assert(x.intellijScalaFmtConfig.nonEmpty)
case _ => fail(s"failed cache.getFromCache($version)")
}
}
checkExhaustive("continuation-indent-callSite-and-defnSite") { _ =>
"continuationIndent { callSite = 5, defnSite = 3 }"
} { (f, _) =>
val original =
"""class A {
| function1(
| argument1,
| ""
| )
|
| def function2(
| argument1: Type1
| ): ReturnType
|}
""".stripMargin
val expected =
"""class A {
| function1(
| argument1,
| ""
| )
|
| def function2(
| argument1: Type1
| ): ReturnType
|}
|""".stripMargin
f.assertFormat(original, expected)
}
checkExhaustive("hasRewriteRules-and-withoutRewriteRules") { _ =>
"rewrite.rules = [RedundantBraces]"
} { (f, version) =>
f.assertFormat()
val cache = f.dynamic.configLoader match {
case x: ScalafmtConfigLoader.CachedProxy => x.cache
case x =>
fail("ReflectConfigResolver is not cached: " + x.getClass.getSimpleName)
}
val configOpt = cache
.getFromCache(f.config)
.collect { case Right((cfg, _)) => cfg }
assert(configOpt.nonEmpty)
val config = configOpt.get
assert(config.hasRewriteRules)
val configWithoutRewrites = config.withoutRewriteRules
assertNotEquals(config, configWithoutRewrites)
assert(!configWithoutRewrites.hasRewriteRules)
}
check("invalid config in 3.0.0-RC6") { f =>
f.setConfig(
s"""|version=3.0.0-RC6
|align=does-not-exist
|""".stripMargin
)
val thrown = f.assertThrows[ScalafmtDynamicError.ConfigParseError]()
assert(
thrown.getMessage.startsWith(
"Invalid config: <input>:3:0 error: Type mismatch;"
)
)
}
check("invalid config in 2.7.5") { f =>
f.setConfig(
s"""|version=2.7.5
|align=does-not-exist
|""".stripMargin
)
val thrown = f.assertThrows[ScalafmtDynamicError.ConfigParseError]()
assert(thrown.getMessage.startsWith("Invalid config: Type mismatch;"))
}
check("invalid version - current") { f =>
f.setConfig(
s"""|version=current
|""".stripMargin
)
val error =
f.assertThrows[ScalafmtDynamicError.ConfigInvalidVersion]().getMessage
assertEquals(error, "Invalid version: current")
}
check("invalid version - missing") { f =>
f.setConfig(
s"""|maxColumn = 40
|""".stripMargin
)
val error =
f.assertThrows[ScalafmtDynamicError.ConfigMissingVersion]().getMessage
assertEquals(error, "Missing version")
}
private def assertDynamicConfig(
fmt: Format
)(f: ScalafmtReflectConfig => Unit): Unit =
fmt.dynamic.resolveConfig(fmt.config) match {
case Left(e) => fail("failed to load config", e)
case Right(cfg) => f(cfg)
}
private def checkDynamicConfig(
name: String,
version: String,
dialect: String,
rest: String*
)(f: ScalafmtReflectConfig => Unit): Unit = {
check(s"$name [v=$version d=$dialect]") { fmt =>
fmt.setVersion(version, dialect, rest: _*)
assertDynamicConfig(fmt)(f)
}
}
checkExhaustive("check project.git=true") { _ => "project.git = true" } {
(f, _) => assertDynamicConfig(f)(x => assertEquals(x.projectIsGit, true))
}
checkExhaustive("check project.git=false") { _ => "project.git = false" } {
(f, _) => assertDynamicConfig(f)(x => assertEquals(x.projectIsGit, false))
}
checkExhaustive("check project.git missing") { _ => "" } { (f, _) =>
assertDynamicConfig(f)(x => assertEquals(x.projectIsGit, false))
}
checkDynamicConfig(
s"check indent.main",
"3.0.0",
"scala211",
s"indent.main = 3"
) { cfg =>
assertEquals(cfg.indentMain, Some(3))
assertEquals(cfg.indentCallSite, Some(2))
assertEquals(cfg.indentDefnSite, Some(4))
}
Seq(("3.0.0", "indent"), ("2.5.3", "continuationIndent"))
.foreach { case (version, section) =>
checkDynamicConfig(
s"check $section.{call,defn}Site",
version,
"scala211",
s"$section.callSite = 3",
s"$section.defnSite = 5"
) { cfg =>
assertEquals(cfg.indentMain, Some(2))
assertEquals(cfg.indentCallSite, Some(3))
assertEquals(cfg.indentDefnSite, Some(5))
}
}
}
private object DynamicSuite {
def getDialectError(version: String, dialect: String) =
if (version >= "3.1.0") s" [dialect $dialect]" else ""
}
| scalameta/scalafmt | scalafmt-dynamic/src/test/scala/org/scalafmt/dynamic/DynamicSuite.scala | Scala | apache-2.0 | 16,623 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.charts.stats.buffers
import scala.collection.mutable
import io.gatling.commons.util.Maps._
import io.gatling.charts.stats.UserRecord
import io.gatling.core.stats.IntVsTimePlot
import io.gatling.core.stats.message.{ End, Start }
private[stats] object SessionDeltas {
val Empty = SessionDeltas(0, 0)
}
private[stats] case class SessionDeltas(starts: Int, ends: Int) {
def addStart() = copy(starts = starts + 1)
def addEnd() = copy(ends = ends + 1)
}
private[stats] class SessionDeltaBuffer(minTimestamp: Long, maxTimestamp: Long, buckets: Array[Int], runDurationInSeconds: Int) {
private val startCounts: Array[Int] = Array.fill(runDurationInSeconds)(0)
private val endCounts: Array[Int] = Array.fill(runDurationInSeconds)(0)
def addStart(second: Int): Unit = startCounts(second) += 1
def addEnd(second: Int): Unit = endCounts(second) += 1
def endOrphan(): Unit = addEnd(runDurationInSeconds - 1)
private val bucketWidthInMillis = ((maxTimestamp - minTimestamp) / buckets.length).toInt
private def secondToBucket(second: Int): Int = math.min(second * 1000 / bucketWidthInMillis, buckets.length - 1)
def distribution: List[IntVsTimePlot] = {
val eachSecondActiveSessions = Array.fill(runDurationInSeconds)(0)
for (second <- 0 until runDurationInSeconds) {
val previousSessions = if (second == 0) 0 else eachSecondActiveSessions(second - 1)
val previousEnds = if (second == 0) 0 else endCounts(second - 1)
val bucketSessions = previousSessions - previousEnds + startCounts(second)
eachSecondActiveSessions.update(second, bucketSessions)
}
eachSecondActiveSessions.zipWithIndex.iterator
.map { case (sessions, second) => second -> sessions }
.groupByKey(secondToBucket)
.map {
case (bucket, sessionCounts) =>
val averageSessionCount = sessionCounts.sum / sessionCounts.size
val time = buckets(bucket)
IntVsTimePlot(time, averageSessionCount)
}.toList.sortBy(_.time)
}
}
private[stats] trait SessionDeltaPerSecBuffers {
this: Buckets with RunTimes =>
private val sessionDeltaPerSecBuffers = mutable.Map.empty[Option[String], SessionDeltaBuffer]
private val orphanStartRecords = mutable.Map.empty[String, UserRecord]
private val runDurationInSeconds = math.ceil((maxTimestamp - minTimestamp) / 1000.0).toInt
def getSessionDeltaPerSecBuffers(scenarioName: Option[String]): SessionDeltaBuffer =
sessionDeltaPerSecBuffers.getOrElseUpdate(scenarioName, new SessionDeltaBuffer(minTimestamp, maxTimestamp, buckets, runDurationInSeconds))
private def timestamp2SecondOffset(timestamp: Long) = {
val millisOffset = timestamp - minTimestamp
val includeRightBorderCorrection =
if (millisOffset > 0 && millisOffset % 1000 == 0) {
1
} else {
0
}
(millisOffset / 1000).toInt - includeRightBorderCorrection
}
def addSessionBuffers(record: UserRecord): Unit = {
record.event match {
case Start =>
val startSecond = timestamp2SecondOffset(record.start)
getSessionDeltaPerSecBuffers(None).addStart(startSecond)
getSessionDeltaPerSecBuffers(Some(record.scenario)).addStart(startSecond)
orphanStartRecords += record.userId -> record
case End =>
val endSecond = timestamp2SecondOffset(record.end)
getSessionDeltaPerSecBuffers(None).addEnd(endSecond)
getSessionDeltaPerSecBuffers(Some(record.scenario)).addEnd(endSecond)
orphanStartRecords -= record.userId
}
}
def endOrphanUserRecords(): Unit =
orphanStartRecords.values.foreach { start =>
getSessionDeltaPerSecBuffers(None).endOrphan()
getSessionDeltaPerSecBuffers(Some(start.scenario)).endOrphan()
}
}
| wiacekm/gatling | gatling-charts/src/main/scala/io/gatling/charts/stats/buffers/SessionDeltaPerSecBuffers.scala | Scala | apache-2.0 | 4,387 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.twitter
import twitter4j._
import twitter4j.auth.Authorization
import twitter4j.conf.ConfigurationBuilder
import twitter4j.auth.OAuthAuthorization
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.Logging
import org.apache.spark.streaming.receiver.Receiver
/* A stream of Twitter statuses, potentially filtered by one or more keywords.
*
* @constructor create a new Twitter stream using the supplied Twitter4J authentication credentials.
* An optional set of string filters can be used to restrict the set of tweets. The Twitter API is
* such that this may return a sampled subset of all tweets during each interval.
*
* If no Authorization object is provided, initializes OAuth authorization using the system
* properties twitter4j.oauth.consumerKey, .consumerSecret, .accessToken and .accessTokenSecret.
*/
private[streaming]
class TwitterInputDStream(
@transient ssc_ : StreamingContext,
twitterAuth: Option[Authorization],
filters: Seq[String],
storageLevel: StorageLevel
) extends ReceiverInputDStream[Status](ssc_) {
private def createOAuthAuthorization(): Authorization = {
new OAuthAuthorization(new ConfigurationBuilder().build())
}
private val authorization = twitterAuth.getOrElse(createOAuthAuthorization())
override def getReceiver(): Receiver[Status] = {
new TwitterReceiver(authorization, filters, storageLevel)
}
}
private[streaming]
class TwitterReceiver(
twitterAuth: Authorization,
filters: Seq[String],
storageLevel: StorageLevel
) extends Receiver[Status](storageLevel) with Logging {
@volatile private var twitterStream: TwitterStream = _
@volatile private var stopped = false
def onStart() {
try {
val newTwitterStream = new TwitterStreamFactory().getInstance(twitterAuth)
newTwitterStream.addListener(new StatusListener {
def onStatus(status: Status) = {
store(status)
}
// Unimplemented
def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) {}
def onTrackLimitationNotice(i: Int) {}
def onScrubGeo(l: Long, l1: Long) {}
def onStallWarning(stallWarning: StallWarning) {}
def onException(e: Exception) {
if (!stopped) {
restart("Error receiving tweets", e)
}
}
})
val query = new FilterQuery
if (filters.size > 0) {
query.track(filters.toArray)
newTwitterStream.filter(query)
} else {
newTwitterStream.sample()
}
setTwitterStream(newTwitterStream)
logInfo("Twitter receiver started")
stopped = false
} catch {
case e: Exception => restart("Error starting Twitter stream", e)
}
}
def onStop() {
stopped = true
setTwitterStream(null)
logInfo("Twitter receiver stopped")
}
private def setTwitterStream(newTwitterStream: TwitterStream) = synchronized {
if (twitterStream != null) {
twitterStream.shutdown()
}
twitterStream = newTwitterStream
}
}
| trueyao/spark-lever | external/twitter/src/main/scala/org/apache/spark/streaming/twitter/TwitterInputDStream.scala | Scala | apache-2.0 | 3,937 |
package hotpepper4s
/**
* @author ponkotuy
*/
trait Budget {
def code: String
def name: String
def average: String
def codeName: CodeName = CodeName(code, name)
}
object Budget {
case class NormalBudget(code: String, name: String, average: String) extends Budget
case class LimitedBudget(code: String, name: String) extends Budget {
def average: String = throw new NotImplementedError("LimitedBudget doesn't exists average.")
}
} | ponkotuy/hotpepper4s | src/main/scala/hotpepper4s/Budget.scala | Scala | mit | 451 |
object main extends App {
/*
* map:
* Operation "map" takes operand List[T]
* and a function of type T => U.
* It returns the list that resuts from applying the function f
* to each list List[T] element.
*/
val x1 = List(1, 2, 3)
val x2 = x1.map(_ + 1)
println(x1)
println(x2)
val x3 = List("the", "naughty", "kitty")
val x4 = x3.map(_.length)
val x5 = x3.map(_.toList.reverse.mkString)
println(x3)
println(x4)
println(x5)
/*
* flatMap:
* Operation "flatmap" is similar to "map".
* flatMap operator take a function returning a list of elements
* and concatenates all of the lists into a single list.
*/
def f(x:String): List[(Int, String)] = {
List((x.length, x))
}
val x6 = x3.flatMap(x => f(x))
println(x6)
println(x6(0)._1)
// Example string -> list of char -> list of all characters
def stringToListChar(s: String): List[Char] = {
s.toList
}
val x7 = x3.flatMap(x => stringToListChar(x))
println(x7)
/*
* forEach:
* Operation "forEach" takes a procedure
* (function that results with Unit).
* The result of the operation "forEach" is unit
*/
val x8 = x3.foreach(println(_))
// Sum example:
var sum: Int = 0
def addToSum(x: Int): Unit = {
sum += x
}
val x9 = List(1, 2, 3, 4, 5)
x9.foreach(addToSum(_))
println(sum)
/*
* filter
*/
val x10 = x9.filter(_ > 3)
println(x10)
/*
* partition:
* Operation "partition" is similar to operation "filter",
* however, it returns a pair of List,
* first, that contains all elements that fulfill the requirement,
* second, those that do not.
*/
val x11 = x9.partition(_ > 1.5)
println(x11)
/*
* find:
* Operation "find" returns only the first element
* that satisfy the criteria.
* It returns Some(x), where x is the first value
* that satisfies the criteria.
*/
val x12 = x9.find(_ > 1.5)
println(x12)
/*
* takeWhile:
* Operation "takeWhile" takes returns a List of values,
* consist of values from the first value up to value,
* which does not satisfies.
*/
val y = List(1, 2, 3, 4, -5, 6, 7, 8)
val y1 = y.takeWhile(_ > 0)
println(y)
println(y1)
/*
* dropWhile:
* Operation "dropWhile" drops all values from a given List,
* up to a value that satisfies given requirement
* and all values afterward.
*/
println(y.dropWhile(_ > 0))
/*
* span:
* Operation "span" combines operations
* "takeWhile" and "dropWhile".
* It returns a tuple of Lists.
*/
val y2 = y.span(_ > 0)
println(y2)
println(y2._1 == y.takeWhile(_ > 0))
println(y2._2 == y.dropWhile(_ > 0))
/*
* forall:
* Operation "forall" return Boolean,
* if all elements satisfy given requirement.
*/
val y3 = List(1, 2, 3, 4, 5)
println(y3)
println(y3.forall(_ > 0))
println(y3.forall(_ > 3))
/*
* exists:
* Operation "exists" returns Boolean,
* if there is at least one element,
* which satisfies given requirement.
*/
println(y3.exists(_ > 0))
println(y3.exists(_ > 10))
/*
* "/:":
* "/:" is pronounced as "fold left".
* Its functionality can be presented as follows,
* where function f accepts two arguments:
* (z :/ List(a, b, c)(f) is the same as
* f(f(f(z, a), b), c)
*/
def addTwoInts(x: Int, y: Int): Int = x + y
val list1 = List(1, 2, 3, 4)
val sumList1 = (0 /: list1)(addTwoInts(_, _))
println(sumList1)
/*
* ":\\":
* ":\\" is pronounced as "fold right".
* Its functionality can be presented as follows,
* where function f accepts two arguments:
* (List(a, b, c) :\\ z)(f) is the same as
* f(a, f(b, f(c, z)))
*/
val list2 = List(1.0, 0.5, 4.0)
def divideDoubleByDouble(x: Double, y: Double): Double =
x / y
val dividedResult: Double = (list2 :\\ 2.0)(divideDoubleByDouble(_, _))
val dividedResult2: Double =
divideDoubleByDouble(list2(0),
divideDoubleByDouble(list2(1),
divideDoubleByDouble(list2(2), 2.0)))
println(dividedResult)
println(dividedResult2)
/*
* sorthWith:
*/
val list3: List[Double] = List(1.0, -2.0, 3.0, 2.5, 1.5)
val list4: List[Double] = list3.sortWith(_ < _)
println(list3)
println(list4)
val list5: List[(Double, Double)] = List((1.0, -1.0), (2.0, -2.0), (3.0, -3.0))
val list6: List[(Double, Double)] = list5.sortWith(_._1 < _._1)
val list7: List[(Double, Double)] = list5.sortWith(_._1 > _._1)
val list8: List[(Double, Double)] = list5.sortWith(_._2 > _._2)
println(list6)
println(list7)
println(list8)
} | arcyfelix/Courses | 18-10-18-Programming-in-Scala-by-Martin-Odersky-Lex-Spoon-and-Bill-Venners/46-Higher-OrderMethodsOnClassList/src/main.scala | Scala | apache-2.0 | 4,601 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.disjunction
import com.github.dnvriend.TestSpec
import scalaz._
import Scalaz._
import scala.concurrent.Future
class DisjunctionTest extends TestSpec {
it should "Disjunction (a scalaz Either) has methods to create the right/left projection of a disjunction" in {
"Success!".right shouldBe
\\/-("Success!")
// .left gives problems with ScalaTest's .left
Disjunction.left("Failure!") shouldBe
-\\/("Failure!")
}
it should "The Disjunction Singletion also has the right and left methods" in {
\\/.right("Success!") shouldBe
\\/-("Success!")
\\/.left("Failure!") shouldBe
-\\/("Failure!")
}
it should "Fully Symbolic" in {
\\/-("Success!") shouldBe
\\/-("Success!")
-\\/("Failure!") shouldBe
-\\/("Failure!")
}
it should "Converting Option to Disjunction" in {
None.toRightDisjunction("No object found") shouldBe
-\\/("No object found")
None \\/> "No object found" shouldBe
-\\/("No object found")
Some("My hovercraft is full of eels") \\/> "No object found" shouldBe
\\/-("My hovercraft is full of eels")
Some("My hovercraft is full of eels").toRightDisjunction("No object found") shouldBe
\\/-("My hovercraft is full of eels")
}
it should "Converting Disjunction to Option" in {
\\/-(1).toOption shouldBe
1.some
-\\/("Book not found").toOption shouldBe
None
}
it should "Disjunctions are monads, they are right associated so they fail with the first left, and return only that error message" in {
(for {
numOfBooks โ Option(10) \\/> "Book not in inventory"
prize โ Option(22.00) \\/> "Book not in prize definition"
} yield numOfBooks * prize) shouldBe \\/-(220.00)
(for {
numOfBooks โ none[Int] \\/> "Book not in inventory"
prize โ Option(22.00) \\/> "Book not in prize definition"
} yield numOfBooks * prize) shouldBe -\\/("Book not in inventory")
(for {
numOfBooks โ Option(10) \\/> "Book not in inventory"
prize โ none[Double] \\/> "Book not in prize definition"
} yield numOfBooks * prize) shouldBe -\\/("Book not in prize definition")
}
it should "construct disjunction from a code block that throws nonfatal" in {
val ex: RuntimeException = new RuntimeException("foo")
\\/.fromTryCatchNonFatal {
throw ex
} shouldBe -\\/(ex)
}
it should "construct disjunction from a code block that throws" in {
val ex: RuntimeException = new RuntimeException("foo")
\\/.fromTryCatchThrowable[String, RuntimeException] {
throw ex
} shouldBe -\\/(ex)
}
it should "construct a disjunction from a scala.util.Try success case" in {
scala.util.Try(1).toDisjunction shouldBe \\/-(1)
}
it should "construct a disjunction from a scala.util.Try failure case" in {
scala.util.Try(1 / 0).toDisjunction should matchPattern { case -\\/(_) => }
}
it should "append failure side" in {
\\/.left[String, String]("foo") |+| \\/.left[String, String]("bar") shouldBe \\/.left("foobar")
}
it should "append success side" in {
\\/.right[String, String]("foo") |+| \\/.right[String, String]("bar") shouldBe \\/.right("foobar")
}
it should "sequence a list of disjunctions" in {
// left gives error with the ScalaTest types
List(Disjunction.left("foo"), Disjunction.left("bar"), "baz".right[String]).sequenceU shouldBe -\\/("foo")
}
it should "sequence a list of ValidationNel result in a single (failed) Validation accumulating all errors" in {
List("foo".failureNel[String], "bar".failureNel[String], "baz".successNel[String]).sequenceU shouldBe Failure(NonEmptyList("foo", "bar"))
}
it should "sequence a list of ValidationNel result in a single (success) Validation accumulating all successes" in {
List("foo".successNel[String], "bar".successNel[String], "baz".successNel[String]).sequenceU shouldBe Success(List("foo", "bar", "baz"))
}
it should "sequence a list of ValidationNel result in a single (failed) Validation accumulating all errors converting to a left-disjunction" in {
List("foo".failureNel[String], "bar".failureNel[String], "baz".successNel[String]).sequenceU.disjunction shouldBe -\\/(NonEmptyList("foo", "bar"))
}
it should "sequence a list of ValidationNel result in a single (success) Validation accumulating all errors converting to a right-disjunction" in {
List("foo".successNel[String], "bar".successNel[String], "baz".successNel[String]).sequenceU.disjunction shouldBe \\/-(List("foo", "bar", "baz"))
}
it should "Converting Disjunction to Validation" in {
\\/-("Success!").validationNel[String] shouldBe
Success("Success!")
-\\/("Failure!").validationNel[String] shouldBe
Failure(NonEmptyList("Failure!"))
}
it should "Converted Validations can be folded failure case" in {
NonEmptyList(
\\/-("Success 1").validationNel[String],
\\/-("Success 2").validationNel[String],
-\\/("Failure 1").validationNel[String],
-\\/("Failure 2").validationNel[String],
\\/-("Success 3").validationNel[String],
\\/-("Success 4").validationNel[String]
).foldLeft(List.empty[String].successNel[String]) {
case (acc, v) โ (acc |@| v)(_ :+ _)
} shouldBe Failure(NonEmptyList("Failure 1", "Failure 2"))
}
it should "Converted Validations can be folded success case" in {
NonEmptyList(
\\/-("Success 1").validationNel[String],
\\/-("Success 2").validationNel[String],
\\/-("Success 3").validationNel[String],
\\/-("Success 4").validationNel[String]
).foldLeft(List.empty[String].successNel[String]) {
case (acc, v) โ (acc |@| v)(_ :+ _)
} shouldBe Success(List("Success 1", "Success 2", "Success 3", "Success 4"))
}
it should "map a left side of the disjunction to a type" in {
val x: Future[String \\/ Int] = Future.successful(1.right[String])
x.flatMap {
case \\/-(right) => Future.successful(right)
case -\\/(left) => Future.failed(new RuntimeException(left))
}.futureValue shouldBe 1
// .left gives problems with ScalaTest's .left
Future.successful(Disjunction.left("foo")).flatMap {
case \\/-(right) => Future.successful(right)
case -\\/(left) => Future.failed(new RuntimeException(left))
}.toTry should be a 'failure
}
"Disjunctions without symbols" should "be created for the left case" in {
Disjunction.left[NonEmptyList[String], String](NonEmptyList("foo")) shouldBe NonEmptyList("foo").left
}
it should "be created for the right case" in {
Disjunction.right[NonEmptyList[String], String]("foo") shouldBe "foo".right
}
"DisjunctionNel" should "be created" in {
// note: DisjunctionNel is *NOT* part of Scalaz, strange as a Disjunction[NonEmptyList[String], A] is a very
// common pattern when working with validation ie. "".failureNel.disjunction would create one..
// The TestSpec.scala contains a type alias called DisjunctionNel
// Also, TestSpec.scala contains two implicit conversions to create .leftNel and .rightNel DisjunctionNel types
// just like the .left/.right methods
"foo".leftNel[String] shouldBe a[DisjunctionNel[String, String]]
"foo".leftNel[String] shouldBe NonEmptyList("foo").left
"foo".rightNel shouldBe "foo".right
}
it should "be used on methods" in {
// note: .toNel on String is *NOT* part of Scalaz, it is provided by an extension method
// in TestSpec.scala. Sometimes it is convenient to convert a String to NonEmptyList by means
// of an extension method to make it compatible with the DisjunctionNel[String, A] pattern.
def strToInt(number: String): DisjunctionNel[String, Int] =
Disjunction.fromTryCatchNonFatal(number.toInt)
.leftMap(cause => s"Error while converting '$number' to Int".toNel)
// lets convert some 'numbers', or are they?
List("aa", "bb")
.map(strToInt) // get the disjunction
.traverseU(_.validation) // convert to a List[ValidationNel[String, Int]] with map/sequence combo called 'traverseU'
.disjunction shouldBe // convert to disjunction
NonEmptyList("Error while converting 'aa' to Int", "Error while converting 'bb' to Int").left
}
// of course using Validation as return type for a validation is better
"Validation to validate" should "be used on methods" in {
def strToLong(number: String): ValidationNel[String, Long] =
Disjunction.fromTryCatchNonFatal(number.toLong)
.leftMap(cause => s"Error while converting '$number' to Long".toNel)
.validation
// lets convert some 'numbers', or are they?
List("aa", "bb")
.traverseU(strToLong) // convert to a List[ValidationNel[String, Long]] with map/sequence combo called 'traverseU'
.disjunction shouldBe // convert to disjunction
NonEmptyList("Error while converting 'aa' to Long", "Error while converting 'bb' to Long").left
}
}
| dnvriend/study-category-theory | scalaz-test/src/test/scala/com/github/dnvriend/disjunction/DisjunctionTest.scala | Scala | apache-2.0 | 9,535 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import java.io.InputStream
import java.nio.charset.Charset
import java.util.zip.GZIPOutputStream
import scala.util.Using
import io.gatling.commons.util.Io._
object GzipHelper {
def gzip(string: String, charset: Charset): Array[Byte] = gzip(string.getBytes(charset))
def gzip(bytes: Array[Byte]): Array[Byte] =
gzip(new FastByteArrayInputStream(bytes))
def gzip(in: InputStream): Array[Byte] =
Using.resource(in) { is =>
val out = FastByteArrayOutputStream.pooled()
Using.resource(new GZIPOutputStream(out))(is.copyTo(_))
out.toByteArray
}
}
| gatling/gatling | gatling-commons/src/main/scala/io/gatling/commons/util/GzipHelper.scala | Scala | apache-2.0 | 1,235 |
/*
* Monkeyman static web site generator
* Copyright (C) 2013 Wilfred Springer
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
package nl.flotsam.monkeyman
import org.joda.time.LocalDateTime
import eu.medsea.mimeutil.{MimeType, MimeUtil}
import collection.JavaConversions._
case class ClasspathResource(path: String) extends Resource {
val url = getClass.getResource("/" + path)
val title = None
def subtitle = None
def summary = None
val pubDateTime = LocalDateTime.now()
val contentType = MimeUtil.getMimeTypes(url).asInstanceOf[java.util.Set[MimeType]].head.toString
val open = url.openStream()
def tags = Set.empty
val published = true
val asHtmlFragment = None
val id = path
} | wspringer/monkeyman | src/main/scala/nl/flotsam/monkeyman/ClasspathResource.scala | Scala | gpl-2.0 | 1,399 |
package pdi.jwt
import java.security.spec.ECGenParameterSpec
import java.security.{KeyPairGenerator, SecureRandom}
import org.scalacheck.Gen
import org.scalacheck.Prop._
import pdi.jwt.exceptions.JwtSignatureFormatException
case class TestObject(value: String) {
override def toString(): String = this.value
}
class JwtUtilsSpec extends munit.ScalaCheckSuite with Fixture {
val ENCODING = JwtUtils.ENCODING
test("hashToJson should transform a seq of tuples to a valid JSON") {
val values: Seq[(String, Seq[(String, Any)])] = Seq(
"""{"a":"b","c":1,"d":true,"e":2,"f":3.4,"g":5.6}""" -> Seq(
"a" -> "b",
"c" -> 1,
"d" -> true,
"e" -> 2L,
"f" -> 3.4f,
"g" -> 5.6
),
"{}" -> Seq(),
"""{"a\\"b":"a\\"b","c\\"d":"c\\"d","e\\"f":["e\\"f","e\\"f"]}""" -> Seq(
"""a"b""" -> """a"b""",
"""c"d""" -> TestObject("""c"d"""),
"""e"f""" -> Seq("""e"f""", TestObject("""e"f"""))
)
)
values.zipWithIndex.foreach { case (value, index) =>
assertEquals(value._1, JwtUtils.hashToJson(value._2), "at index " + index)
}
}
test("mergeJson should correctly merge 2 JSONs") {
val values: Seq[(String, String, Seq[String])] = Seq(
("{}", "{}", Seq("{}")),
("""{"a":1}""", """{"a":1}""", Seq("")),
("""{"a":1}""", """{"a":1}""", Seq("{}")),
("""{"a":1}""", """{}""", Seq("""{"a":1}""")),
("""{"a":1}""", "", Seq("""{"a":1}""")),
("""{"a":1,"b":2}""", """{"a":1}""", Seq("""{"b":2}""")),
("""{"a":1,"b":2,"c":"d"}""", """{"a":1}""", Seq("""{"b":2}""", """{"c":"d"}"""))
)
values.zipWithIndex.foreach { case (value, index) =>
assertEquals(value._1, JwtUtils.mergeJson(value._2, value._3: _*), "at index " + index)
}
}
test("Claim.toJson should correctly encode a Claim to JSON") {
val claim = JwtClaim(
issuer = Some(""),
audience = Some(Set("")),
subject = Some("da1b3852-6827-11e9-a923-1681be663d3e"),
expiration = Some(1597914901),
issuedAt = Some(1566378901),
content = "{\\"a\\":\\"da1b3852-6827-11e9-a923-1681be663d3e\\",\\"b\\":123.34}"
)
val jsonClaim =
"""{"iss":"","sub":"da1b3852-6827-11e9-a923-1681be663d3e","aud":"","exp":1597914901,"iat":1566378901,"a":"da1b3852-6827-11e9-a923-1681be663d3e","b":123.34}"""
assertEquals(jsonClaim, claim.toJson)
}
test("transcodeSignatureToDER should throw JwtValidationException if signature is too long") {
val signature = JwtUtils.bytify(
"AU6-jw28DX1QMY0Ar8CTcnIAc0WKGe3nNVHkE7ayHSxvOLxE5YQSiZtbPn3y-vDHoQCOMId4rPdIJhD_NOUqnH_rAKA5w9ZlhtW0GwgpvOg1_5oLWnWXQvPjJjC5YsLqEssoMITtOmfkBsQMgLAF_LElaaCWhkJkOCtcZmroUW_b5CXB"
)
interceptMessage[JwtSignatureFormatException]("Invalid ECDSA signature format") {
JwtUtils.transcodeSignatureToDER(signature ++ signature)
}
}
test("transcodeSignatureToDER should transocde empty signature") {
val signature: Array[Byte] = Array[Byte](0)
JwtUtils.transcodeSignatureToDER(signature)
}
test("transcodeSignatureToConcat should throw JwtValidationException if length incorrect") {
val signature = JwtUtils.bytify(
"MIEAAGg3OVb/ZeX12cYrhK3c07TsMKo7Kc6SiqW++4CAZWCX72DkZPGTdCv2duqlupsnZL53hiG3rfdOLj8drndCU+KHGrn5EotCATdMSLCXJSMMJoHMM/ZPG+QOHHPlOWnAvpC1v4lJb32WxMFNz1VAIWrl9Aa6RPG1GcjCTScKjvEE"
)
interceptMessage[JwtSignatureFormatException]("Invalid ECDSA signature format") {
JwtUtils.transcodeSignatureToConcat(signature, 132)
}
}
test(
"transcodeSignatureToConcat should throw JwtValidationException if signature is incorrect "
) {
val signature = JwtUtils.bytify(
"MIGBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
)
interceptMessage[JwtSignatureFormatException]("Invalid ECDSA signature format") {
JwtUtils.transcodeSignatureToConcat(signature, 132)
}
}
test(
"transcodeSignatureToConcat should throw JwtValidationException if signature is incorrect 2"
) {
val signature = JwtUtils.bytify(
"MIGBAD4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
)
interceptMessage[JwtSignatureFormatException]("Invalid ECDSA signature format") {
JwtUtils.transcodeSignatureToConcat(signature, 132)
}
}
test("transcodeSignatureToConcat and transcodeSignatureToDER should be symmetric") {
val signature = JwtUtils.bytify(
"AbxLPbA3dm9V0jt6c_ahf8PYioFvnryTe3odgolhcgwBUl4ifpwUBJ--GgiXC8vms45c8vI40ZSdkm5NoNn1wTHOAfkepNy-RRKHmBzAoWrWmBIb76yPa0lsjdAPEAXcbGfaQV8pKq7W10dpB2B-KeJxVonMuCLJHPuqsUl9S7CfASu2"
)
val dER: Array[Byte] = JwtUtils.transcodeSignatureToDER(signature)
val result = JwtUtils.transcodeSignatureToConcat(
dER,
JwtUtils.getSignatureByteArrayLength(JwtAlgorithm.ES512)
)
assertArrayEquals(signature, result)
}
test(
"transcodeSignatureToConcat and transcodeSignatureToDER should be symmetric for generated tokens"
) {
val ecGenSpec = new ECGenParameterSpec(ecCurveName)
val generatorEC = KeyPairGenerator.getInstance(JwtUtils.ECDSA)
generatorEC.initialize(ecGenSpec, new SecureRandom())
val randomECKey = generatorEC.generateKeyPair()
val header = """{"typ":"JWT","alg":"ES512"}"""
val claim = """{"test":"t"}"""
val signature = Jwt(validTimeClock)
.encode(header, claim, randomECKey.getPrivate, JwtAlgorithm.ES512)
.split("\\\\.")(2)
assertEquals(
signature,
JwtUtils.stringify(
JwtUtils.transcodeSignatureToConcat(
JwtUtils.transcodeSignatureToDER(JwtUtils.bytify(signature)),
JwtUtils.getSignatureByteArrayLength(JwtAlgorithm.ES512)
)
)
)
}
test("splitString should do nothing") {
forAll(Gen.asciiStr.suchThat(s => s.nonEmpty && !s.contains('a'))) { (value: String) =>
assertArrayEquals(
JwtUtils.splitString(value, 'a'),
Array(value)
)
}
}
test("splitString should split once") {
assertArrayEquals(JwtUtils.splitString("qwertyAzxcvb", 'A'), Array("qwerty", "zxcvb"))
}
test("splitString should split a token") {
assertArrayEquals(
JwtUtils.splitString("header.claim.signature", '.'),
Array("header", "claim", "signature")
)
}
test("splitString should split a token without signature") {
assertArrayEquals(JwtUtils.splitString("header.claim", '.'), Array("header", "claim"))
}
test("splitString should split a token with an empty signature") {
assertArrayEquals(JwtUtils.splitString("header.claim.", '.'), Array("header", "claim"))
}
test("splitString should split a token with an empty header") {
assertArrayEquals(JwtUtils.splitString(".claim.", '.'), Array("", "claim"))
}
test("splitString should be the same as normal split") {
var token = "header.claim.signature"
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
token = "header.claim."
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
token = "header.claim"
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
token = ".claim.signature"
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
token = ".claim."
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
token = "1"
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
token = "a.b.c.d"
assertArrayEquals(token.split("\\\\."), JwtUtils.splitString(token, '.'))
}
private def assertArrayEquals[A](arr1: Array[A], arr2: Array[A]): Unit = {
assertEquals(arr1.toSeq, arr2.toSeq)
}
}
| pauldijou/jwt-scala | core/src/test/scala/JwtUtilsSpec.scala | Scala | apache-2.0 | 7,870 |
import sbt._
/**
* Generate a range of boilerplate classes that would be tedious to write and maintain by hand.
*
* Copied, with some modifications, from
* [[https://github.com/milessabin/shapeless/blob/master/project/Boilerplate.scala Shapeless]].
*
* @author Miles Sabin
* @author Kevin Wright
*/
object Boilerplate {
import scala.StringContext._
implicit class BlockHelper(val sc: StringContext) extends AnyVal {
def block(args: Any*): String = {
val interpolated = sc.standardInterpolator(treatEscapes, args)
val rawLines = interpolated.split('\\n')
val trimmedLines = rawLines.map(_.dropWhile(_.isWhitespace))
trimmedLines.mkString("\\n")
}
}
val templates: Seq[Template] = Seq(
GenTupleInstances
)
val header = "// auto-generated boilerplate"
val maxArity = 22
/**
* Return a sequence of the generated files.
*
* As a side-effect, it actually generates them...
*/
def gen(dir: File): Seq[File] = templates.map { template =>
val tgtFile = template.filename(dir)
IO.write(tgtFile, template.body)
tgtFile
}
class TemplateVals(val arity: Int) {
val synTypes = (0 until arity).map(n => s"A$n")
val synVals = (0 until arity).map(n => s"a$n")
val `A..N` = synTypes.mkString(", ")
val `a..n` = synVals.mkString(", ")
val `_.._` = Seq.fill(arity)("_").mkString(", ")
val `(A..N)` = if (arity == 1) "Tuple1[A0]" else synTypes.mkString("(", ", ", ")")
val `(_.._)` = if (arity == 1) "Tuple1[_]" else Seq.fill(arity)("_").mkString("(", ", ", ")")
val `(a..n)` = if (arity == 1) "Tuple1(a)" else synVals.mkString("(", ", ", ")")
}
/**
* Blocks in the templates below use a custom interpolator, combined with post-processing to
* produce the body.
*
* - The contents of the `header` val is output first
* - Then the first block of lines beginning with '|'
* - Then the block of lines beginning with '-' is replicated once for each arity,
* with the `templateVals` already pre-populated with relevant relevant vals for that arity
* - Then the last block of lines prefixed with '|'
*
* The block otherwise behaves as a standard interpolated string with regards to variable
* substitution.
*/
trait Template {
def filename(root: File): File
def content(tv: TemplateVals): String
def range: IndexedSeq[Int] = 1 to maxArity
def body: String = {
val headerLines = header.split('\\n')
val raw = range.map(n => content(new TemplateVals(n)).split('\\n').filterNot(_.isEmpty))
val preBody = raw.head.takeWhile(_.startsWith("|")).map(_.tail)
val instances = raw.flatMap(_.filter(_.startsWith("-")).map(_.tail))
val postBody = raw.head.dropWhile(_.startsWith("|")).dropWhile(_.startsWith("-")).map(_.tail)
(headerLines ++ preBody ++ instances ++ postBody).mkString("\\n")
}
}
object GenTupleInstances extends Template {
override def range: IndexedSeq[Int] = 1 to maxArity
def filename(root: File): File = root / "algebra" / "instances" / "TupleAlgebra.scala"
def content(tv: TemplateVals): String = {
import tv._
def constraints(constraint: String) =
synTypes.map(tpe => s"${tpe}: ${constraint}[${tpe}]").mkString(", ")
def tuple(results: TraversableOnce[String]) = {
val resultsVec = results.toVector
val a = synTypes.size
val r = s"${0.until(a).map(i => resultsVec(i)).mkString(", ")}"
if (a == 1) "Tuple1(" ++ r ++ ")"
else s"(${r})"
}
def binMethod(name: String) =
synTypes.zipWithIndex.iterator.map {
case (tpe, i) =>
val j = i + 1
s"${tpe}.${name}(x._${j}, y._${j})"
}
def binTuple(name: String) =
tuple(binMethod(name))
def unaryTuple(name: String) = {
val m = synTypes.zipWithIndex.map { case (tpe, i) => s"${tpe}.${name}(x._${i + 1})" }
tuple(m)
}
def nullaryTuple(name: String) = {
val m = synTypes.map(tpe => s"${tpe}.${name}")
tuple(m)
}
block"""
|package algebra
|package instances
|
|import algebra.ring.{Rig, Ring, Rng, Semiring}
|
|trait TupleInstances extends cats.kernel.instances.TupleInstances {
-
- implicit def tuple${arity}Rig[${`A..N`}](implicit ${constraints("Rig")}): Rig[${`(A..N)`}] =
- new Rig[${`(A..N)`}] {
- def one: ${`(A..N)`} = ${nullaryTuple("one")}
- def plus(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("plus")}
- def times(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("times")}
- def zero: ${`(A..N)`} = ${nullaryTuple("zero")}
- }
-
- implicit def tuple${arity}Ring[${`A..N`}](implicit ${constraints("Ring")}): Ring[${`(A..N)`}] =
- new Ring[${`(A..N)`}] {
- def one: ${`(A..N)`} = ${nullaryTuple("one")}
- def plus(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("plus")}
- def times(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("times")}
- def zero: ${`(A..N)`} = ${nullaryTuple("zero")}
- def negate(x: ${`(A..N)`}): ${`(A..N)`} = ${unaryTuple("negate")}
- }
-
- implicit def tuple${arity}Rng[${`A..N`}](implicit ${constraints("Rng")}): Rng[${`(A..N)`}] =
- new Rng[${`(A..N)`}] {
- def plus(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("plus")}
- def times(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("times")}
- def zero: ${`(A..N)`} = ${nullaryTuple("zero")}
- def negate(x: ${`(A..N)`}): ${`(A..N)`} = ${unaryTuple("negate")}
- }
-
- implicit def tuple${arity}Semiring[${`A..N`}](implicit ${constraints("Semiring")}): Semiring[${`(A..N)`}] =
- new Semiring[${`(A..N)`}] {
- def plus(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("plus")}
- def times(x: ${`(A..N)`}, y: ${`(A..N)`}): ${`(A..N)`} = ${binTuple("times")}
- def zero: ${`(A..N)`} = ${nullaryTuple("zero")}
- }
|}
"""
}
}
}
| tixxit/algebra | project/Boilerplate.scala | Scala | mit | 6,295 |
/* Copyright (c) 2016 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lingua
package lexikon
import better.files.File
sealed abstract class DikoOptions extends Options {
def mkCompile: CompileOptions =
CompileOptions(verbose = verbose, timing = timing)
def mkQuery: QueryOptions =
QueryOptions(verbose = verbose, timing = timing)
def mkVerbose: DikoOptions
def mkTimed: DikoOptions
}
final case class NoCommandOptions(
verbose: Boolean = false,
timing: Boolean = false) extends DikoOptions {
def mkVerbose: NoCommandOptions =
copy(verbose = true)
def mkTimed: NoCommandOptions =
copy(timing = true)
}
final case class CompileOptions(
inputs: List[File] = Nil,
outputDir: File = File("out"),
generateLemmas: Boolean = false,
generateInflections: Boolean = false,
generateDeflexions: Boolean = false,
occupation: Int = 70,
lemmasFile: String = "lemmas",
inflectionsFile: String = "inflections",
deflexionsFile: String = "deflexions",
saveNFst: Boolean = false,
saveFst: Boolean = false,
verbose: Boolean = false,
timing: Boolean = false) extends DikoOptions {
def mkVerbose: CompileOptions =
copy(verbose = true)
def mkTimed: CompileOptions =
copy(timing = true)
}
final case class QueryOptions(
input: File = null,
query: String = null,
verbose: Boolean = false,
timing: Boolean = false) extends DikoOptions {
def mkVerbose: QueryOptions =
copy(verbose = true)
def mkTimed: QueryOptions =
copy(timing = true)
}
| satabin/lingua | lexikon/src/main/scala/lingua/lexikon/Options.scala | Scala | apache-2.0 | 2,090 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.engine.regular.module
import java.io.File
import java.util.logging.LogManager
import com.bwsw.sj.common.config.TempHelperForConfigDestroy
import com.bwsw.sj.common.utils.benchmark.ProcessTerminator
import com.bwsw.sj.engine.regular.module.DataFactory._
import com.bwsw.sj.engine.regular.module.SjRegularBenchmarkConstants._
object SjRegularModuleDestroy extends App {
LogManager.getLogManager.reset()
ProcessTerminator.terminateProcessAfter { () =>
val streamService = connectionRepository.getStreamRepository
val serviceManager = connectionRepository.getServiceRepository
val providerService = connectionRepository.getProviderRepository
val instanceService = connectionRepository.getInstanceRepository
val fileStorage = connectionRepository.getFileStorage
val module = new File(modulePath)
deleteStreams(streamService, inputStreamsType, serviceManager, inputCount, outputCount)
deleteServices(serviceManager)
deleteProviders(providerService)
deleteInstance(instanceService)
deleteModule(fileStorage, module.getName)
val tempHelperForConfigDestroy = new TempHelperForConfigDestroy(connectionRepository)
tempHelperForConfigDestroy.deleteConfigs()
connectionRepository.close()
}
}
| bwsw/sj-platform | core/sj-regular-streaming-engine/src/test/scala/com/bwsw/sj/engine/regular/module/SjRegularModuleDestroy.scala | Scala | apache-2.0 | 2,076 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush
import scala.collection.JavaConversions.asScalaIterator
import org.scalatest.Finders
import org.scalatest.fixture.{ FlatSpec, UnitFixture }
import com.signalcollect.triplerush.sparql.Sparql
class SparqlSpec extends FlatSpec with UnitFixture {
"Sparql" should "correctly translate a SPARQL query that has results" in new TestStore {
tr.addStringTriple("http://a", "http://b", "http://c")
tr.addStringTriple("http://a", "http://d", "http://e")
val query = """
SELECT ?X
WHERE {
?X <http://b> <http://c> .
?X <http://d> <http://e>
}
"""
val results = Sparql(query)
val decodedResults = results.map(_.get("X").toString)
assert(decodedResults.toSet === Set("http://a"))
}
it should "correctly translate a SPARQL query that has no results" in new TestStore {
tr.addStringTriple("http://a", "http://b", "http://c")
tr.addStringTriple("http://f", "http://d", "http://e")
val query = """
SELECT ?X
WHERE {
?X <http://b> <http://c> .
?X <http://d> <http://e>
}
"""
val results = Sparql(query)
val decodedResults = results.map(_.get("X").toString)
assert(decodedResults.toSet === Set())
}
it should "correctly eliminate a SPARQL query that is guaranteed to have no results" in new TestStore {
tr.addStringTriple("http://a", "http://b", "http://c")
tr.addStringTriple("http://f", "http://d", "http://e")
val query = """
SELECT ?X
WHERE {
?X <http://z> <http://c> .
?X <http://d> <http://e>
}
"""
val results = Sparql(query)
assert(results.toList == Nil)
}
}
| uzh/triplerush | src/test/scala/com/signalcollect/triplerush/SparqlSpec.scala | Scala | apache-2.0 | 2,304 |
package scalax.collection
package mutable
import scala.collection.mutable.{ExtHashSet, GrowableBuilder}
import scala.collection.{IterableFactory, IterableFactoryDefaults, SortedSet, StrictOptimizedIterableOps}
import scala.util.Random
import scalax.collection.immutable.SortedArraySet
/** A basic [[ArraySet]] implementation suitable for efficient add operations.
* Element removal could be optimized by another implementation.
*
* @param hints Optimization hints controlling the growth of the underlying
* [[scala.collection.mutable.ArrayBuffer!]].
* @define OPT Optimized by use of unchecked insertions.
* @author Peter Empen
*/
@SerialVersionUID(1L)
final class SimpleArraySet[A](override val hints: ArraySet.Hints)
extends ArraySet[A]
with StrictOptimizedIterableOps[A, SimpleArraySet, SimpleArraySet[A]]
with IterableFactoryDefaults[A, SimpleArraySet]
with Serializable {
override def iterableFactory = SimpleArraySet
protected[collection] def newNonCheckingBuilder[B] = new SimpleArraySet.NonCheckingBuilder[A, B](this)
override def clone = (newNonCheckingBuilder ++= this).result()
private var nextFree: Int = 0
private var arr: Array[A] = _
private var hashSet: ExtHashSet[A] = _
private def initialize(): Unit = {
val capacity = hints.nextCapacity(0)
if (capacity == 0) hashSet = ExtHashSet.empty[A]
else arr = new Array[AnyRef](capacity).asInstanceOf[Array[A]]
}
initialize()
def capacity: Int = if (isHash) 0 else arr.length
@inline private def isHash: Boolean = arr eq null
@inline def isArray: Boolean = !isHash
protected[collection] def array = arr
protected[collection] def set = hashSet
def addOne(elem: A) = { add(elem); this }
def subtractOne(elem: A) = {
if (isHash) hashSet -= elem
else removeIndex(indexOf(elem))
this
}
protected def removeIndex(i: Int): Unit =
if (i != -1) {
if (i + 1 < nextFree)
java.lang.System.arraycopy(arr, i + 1, arr, i, nextFree - i - 1)
nextFree -= 1
}
protected[collection] def +=!(elem: A): this.type = {
if (isHash) hashSet add elem
else {
if (nextFree == capacity)
if (resizedToHash) {
add(elem); return this
}
arr(nextFree) = elem
nextFree += 1
}
this
}
protected[collection] def map(xs: IterableOnce[A]): this.type = this
override def iterator: Iterator[A] =
if (isHash) hashSet.iterator
else
new scala.collection.AbstractIterator[A] {
private[this] var i = 0
private[this] var prevElm: A = _
def hasNext =
i < nextFree
def next() = {
if (i >= nextFree)
throw new NoSuchElementException
prevElm = arr(i)
i += 1
prevElm
}
}
override def foreach[U](f: (A) => U): Unit =
if (isHash) hashSet foreach f
else {
var i = 0
while (i < nextFree) { f(arr(i)); i += 1 }
}
protected def resizeArray(fromCapacity: Int, toCapacity: Int): Unit = {
val newArr: Array[AnyRef] = new Array(toCapacity)
java.lang.System.arraycopy(arr, 0, newArr, 0, math.min(fromCapacity, toCapacity))
arr = newArr.asInstanceOf[Array[A]]
}
protected def setToArray(set: Iterable[A], size: Int): Unit = {
arr = new Array[AnyRef](size).asInstanceOf[Array[A]]
nextFree = 0
set foreach { elem =>
arr(nextFree) = elem
nextFree += 1
}
hashSet = null
}
def compact(): Unit =
if (isHash) {
val _size = size
if (_size < hints.hashTableThreshold)
setToArray(hashSet, _size)
} else if (
hints.compactUpToUsed match {
case perc if perc == 0 => false
case perc if perc == 100 => nextFree < capacity
case perc => perc >= nextFree * 100 / capacity
}
)
resizeArray(capacity, nextFree)
protected def indexOf[B](elem: B, pred: (A, B) => Boolean): Int = {
var i = 0
while (i < nextFree)
if (pred(arr(i), elem)) return i
else i += 1
-1
}
/* Optimized 'arr contains c'. */
protected def indexOf(elem: A): Int = {
var i = 0
while (i < nextFree)
if (arr(i) == elem) return i
else i += 1
-1
}
override def contains(elem: A): Boolean =
if (isHash) hashSet contains elem
else indexOf(elem) >= 0
def find(elem: A): Option[A] =
if (isHash) hashSet find (_ == elem)
else {
val i = indexOf(elem)
if (i >= 0) Some(arr(i)) else None
}
override def add(elem: A): Boolean =
if (isHash) hashSet add elem
else {
if (nextFree == capacity)
if (resizedToHash)
return add(elem)
var i = 0
while (i < nextFree)
if (arr(i) == elem) return false
else i += 1
arr(nextFree) = elem
nextFree += 1
true
}
protected def resizedToHash: Boolean = {
val newCapacity = hints.nextCapacity(capacity)
if (newCapacity == 0) {
hashSet = ExtHashSet.empty[A]
hashSet sizeHint capacity
hashSet ++= iterator
arr = null
true
} else {
resizeArray(capacity, newCapacity)
false
}
}
override def size = if (isHash) hashSet.size else nextFree
protected[collection] def upsert(elem: A with AnyRef): Boolean =
if (isHash) hashSet upsert elem
else {
val i = indexOf(elem)
val isUpdate = i >= 0
if (isUpdate) arr(i) = elem
else add(elem)
!isUpdate
}
/** $OPT */
override def filter(p: (A) => Boolean) =
if (isHash) super.filter(p)
else {
val b = newNonCheckingBuilder[A]
for (x <- this)
if (p(x)) b += x
b.result()
}
/** Faster mapping in case the caller ensures to insert no duplicates. */
protected[collection] def mapUnchecked[B, That](f: A => B): SimpleArraySet[B] =
if (isHash) super.map(f)
else {
val b = newNonCheckingBuilder[B]
for (x <- this) b += f(x)
b.result()
}
/** $OPT */
override def partition(p: A => Boolean) =
if (isHash) super.partition(p)
else {
val l, r = newNonCheckingBuilder[A]
for (x <- this) (if (p(x)) l else r) += x
(l.result(), r.result())
}
def sorted(implicit ord: Ordering[A]): SortedSet[A] =
if (isHash) {
SortedSet.from(hashSet)
} else {
val newArr: Array[AnyRef] = new Array(nextFree)
java.lang.System.arraycopy(arr, 0, newArr, 0, nextFree)
new SortedArraySet(newArr.asInstanceOf[Array[A]])
}
def findElem[B](other: B, correspond: (A, B) => Boolean): A =
if (isHash) hashSet findElem (other, correspond)
else {
val idx = indexOf(other, (a: A, b: B) => a.hashCode == b.hashCode && correspond(a, b))
(if (idx < 0) null else arr(idx)).asInstanceOf[A]
}
def draw(random: Random): A =
if (isHash) hashSet draw random
else arr(random.nextInt(size))
override def clear(): Unit =
for (elem <- this.toList)
this -= elem
}
/** @define FROM The [[ArraySet]] instance an operation of which this builder is invoked on.
*/
object SimpleArraySet extends IterableFactory[SimpleArraySet] {
/** Returns an empty set with default hints. */
override def empty[A]: SimpleArraySet[A] = new SimpleArraySet[A](ArraySet.Hints())
/** Returns an empty set with custom hints. */
def emptyWithHints[A](implicit hints: ArraySet.Hints): SimpleArraySet[A] =
new SimpleArraySet[A](hints)
/** Returns an empty set with hints propagated from `arraySet`. */
def emptyWithPropagatedHints[A, B](arraySet: ArraySet[A]): SimpleArraySet[B] =
emptyWithHints(arraySet.hints.propagate(arraySet.size))
/** Default `ArraySet` builder preventing duplicates. The hints passed are propagated
* such that `initial size == from.size`.
*
* @param from $FROM
*/
protected class CheckingBuilder[A](from: ArraySet[A])
extends GrowableBuilder[A, SimpleArraySet[A]](emptyWithPropagatedHints(from))
/** An `ArraySet` builder without duplicate checking.
*
* @param from $FROM
*/
protected class NonCheckingBuilder[A, B](from: ArraySet[A])
extends GrowableBuilder[B, SimpleArraySet[B]](emptyWithPropagatedHints[A, B](from)) {
override def addOne(x: B): this.type = { elems +=! x; this }
}
override def from[A](source: IterableOnce[A]) = empty ++= source
override def newBuilder[A] = new GrowableBuilder[A, SimpleArraySet[A]](empty)
}
| scala-graph/scala-graph | core/src/main/scala/scalax/collection/mutable/SimpleArraySet.scala | Scala | apache-2.0 | 8,608 |
package reopp.common.examples
import reopp.common.guardedcommands.dataconnectors.ConnectorGen._
import reopp.common.{NoneSol, SomeSol}
/**
* Divide 100 by the input value. Shows the usage of partial functions.
* Note that several things are partial functions in Scala, including most scala sequences (lists) and maps.
*
* Created by jose on 12/04/13.
*/
object Divide extends App {
val seed = 100
// type cannot be inferred for this (and most) partial functions. Needed type of the argument.
val divide: PartialFunction[Int,Int] =
{ case d: Int if d != 0 => seed / d }
//def div2()
val connector =
writer("a",List(0)) ++
writer("b",List(2)) ++
transf("a","aout",divide) ++
transf("b","bout",divide) ++
sdrain("a","b") ++
reader("aout",1) ++
reader("bout",1)
val sol = connector.getConstraints.solveChocoDyn
sol match {
case SomeSol(s) => println("solved!\\n"+s)
case _ => println("no sol")
}
}
| joseproenca/ip-constraints | code/src/main/scala/reopp/common/examples/Divide.scala | Scala | mit | 972 |
/**
* @author Francisco Miguel Arรกmburo Torres - [email protected]
*/
package engine
/** Interface to reset parameters of an object. */
trait Resettable {
/** Resets parameters of an object.
*
* @param variables that may be used.
*/
def reset (variables: Array[String]): Unit
}
| FrancoAra/census | app/engine/Resettable.scala | Scala | mit | 302 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.tf.loaders
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.tf.Tensorflow.typeAttr
import com.intel.analytics.bigdl.utils.tf.TensorflowSpecHelper
import org.tensorflow.framework.{DataType, NodeDef}
class ReciprocalSpec extends TensorflowSpecHelper {
"Reciprocal" should "be correct for float tensor" in {
compare[Float](
NodeDef.newBuilder()
.setName("reciprocal_test")
.putAttr("T", typeAttr(DataType.DT_FLOAT))
.setOp("Reciprocal"),
Seq(Tensor[Float](4, 32, 32, 3).rand()),
0
)
}
}
| yiheng/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/utils/tf/loaders/ReciprocalSpec.scala | Scala | apache-2.0 | 1,219 |
package com.cloudray.scalapress.plugin.compatibility
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{PathVariable, RequestParam, RequestMapping}
import com.cloudray.scalapress.util.UrlGenerator
import org.springframework.beans.factory.annotation.Autowired
import com.cloudray.scalapress.framework.ScalapressContext
/** @author Stephen Samuel */
@Controller
@Autowired
class ECRedirectController(context: ScalapressContext) {
@RequestMapping(Array("c.do"))
def category(@RequestParam("category") id: Long) = {
Option(context.folderDao.find(id)) match {
case None => "redirect:/"
case Some(folder) => "redirect:" + UrlGenerator.url(folder)
}
}
@RequestMapping(Array("item.do"))
def item(@RequestParam("item") id: Long) = {
Option(context.itemDao.find(id)) match {
case None => "redirect:/"
case Some(obj) => "redirect:" + UrlGenerator.url(obj)
}
}
@RequestMapping(Array("{bumf}-c{id:\\\\d+}.html"))
def categoryHtml(@PathVariable("id") id: Long) = category(id)
@RequestMapping(Array("{bumf}-i{id:\\\\d+}.html"))
def itemHtml(@PathVariable("id") id: Long) = item(id)
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/plugin/compatibility/ECRedirectController.scala | Scala | apache-2.0 | 1,177 |
package colossus
package protocols.redis
import core._
import service._
class RedisServerCodec extends Codec.ServerCodec[Command, Reply] {
private var commandParser = RedisCommandParser.command
def reset() {
commandParser = RedisCommandParser.command
}
def encode(reply: Reply) = reply.raw
def decode(data: DataBuffer): Option[DecodedResult[Command]] = DecodedResult.static(commandParser.parse(data))
}
| noikiy/colossus | colossus/src/main/scala/colossus/protocols/redis/RedisServerCodec.scala | Scala | apache-2.0 | 420 |
object Test {
def main(args: Array[String]): Unit = {
val a = Array.ofDim[Int](2,2)
test(a)
}
def test[A](t: Array[Array[A]]): Unit = {
val tmp = t(0)
t(1) = tmp
}
}
| yusuke2255/dotty | tests/run/t2005.scala | Scala | bsd-3-clause | 190 |
package io.hydrosphere.mist.master
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.stream.scaladsl.Flow
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{Await, Future, Promise}
trait TestUtils {
implicit class AwaitSyntax[A](f: => Future[A]) {
def await: A = Await.result(f, Duration.Inf)
def await(d: FiniteDuration): A = Await.result(f, d)
}
}
object TestUtils extends TestUtils {
val cfgStr =
"""
|context-defaults {
| downtime = Inf
| streaming-duration = 1 seconds
| max-parallel-jobs = 20
| precreated = false
| spark-conf = { }
| worker-mode = "shared"
| run-options = "--opt"
| max-conn-failures = 5
|}
|
|context {
|
| foo {
| spark-conf {
| spark.master = "local[2]"
| }
| }
|}
""".stripMargin
val contextSettings = {
val cfg = ConfigFactory.parseString(cfgStr)
ContextsSettings(cfg)
}
val FooContext = contextSettings.contexts.get("foo").get
object MockHttpServer {
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import akka.util.Timeout
import scala.concurrent.duration._
def onServer[A](
routes: Flow[HttpRequest, HttpResponse, _],
f: (Http.ServerBinding) => A): Future[A] = {
implicit val system = ActorSystem("mock-http-cli")
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
implicit val timeout = Timeout(1.seconds)
val binding = Http().bindAndHandle(routes, "localhost", 0)
val close = Promise[Http.ServerBinding]
close.future
.flatMap(binding => binding.unbind())
.onComplete(_ => {
materializer.shutdown()
Await.result(system.terminate(), Duration.Inf)
})
val result = binding.flatMap(binding => {
try {
Future.successful(f(binding))
} catch {
case e: Throwable =>
Future.failed(e)
} finally {
close.success(binding)
}
})
result
}
}
}
| Hydrospheredata/mist | mist/master/src/test/scala/io/hydrosphere/mist/master/TestUtils.scala | Scala | apache-2.0 | 2,262 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules
import edu.latrobe._
import edu.latrobe.blaze._
import scala.collection._
import scala.util.hashing._
/**
* This applies the supplied augmenters in a cyclic fashion.
*/
final class AlternatePath(override val builder: AlternatePathBuilder,
override val inputHints: BuildHints,
override val seed: InstanceSeed,
override val weightBufferBuilder: ValueTensorBufferBuilder)
extends PathSwitch[AlternatePathBuilder] {
val interval
: Int = builder.interval
private var iterationNo
: Long = 0L
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
override protected def doPredictEx(mode: Mode,
inPlaceAllowed: Boolean,
input: Tensor,
reference: Tensor,
onEnter: OnEnterPredict,
onLeave: OnLeavePredict)
: Int = {
val intervalNo = iterationNo / interval
val childNo = (intervalNo % children.length).toInt
iterationNo += 1L
childNo
}
// ---------------------------------------------------------------------------
// State backup and retrieval.
// ---------------------------------------------------------------------------
override def state
: AlternatePathState = AlternatePathState(super.state, iterationNo)
override def restoreState(state: InstanceState): Unit = {
super.restoreState(state.parent)
state match {
case state: AlternatePathState =>
iterationNo = state.iterationNo
case _ =>
throw new MatchError(state)
}
}
}
final class AlternatePathBuilder
extends PathSwitchBuilder[AlternatePathBuilder] {
override def repr
: AlternatePathBuilder = this
private var _interval
: Int = 1
def interval
: Int = _interval
def interval_=(value: Int)
: Unit = {
require(value > 0)
_interval = value
}
def setInterval(value: Int)
: AlternatePathBuilder = {
interval_=(value)
this
}
override protected def doToString()
: List[Any] = _interval :: super.doToString()
override def hashCode()
: Int = MurmurHash3.mix(super.hashCode(), _interval.hashCode())
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[AlternatePathBuilder]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: AlternatePathBuilder =>
_interval == other._interval
case _ =>
false
})
override protected def doCopy()
: AlternatePathBuilder = AlternatePathBuilder()
override def copyTo(other: InstanceBuilder)
: Unit = {
super.copyTo(other)
other match {
case other: AlternatePathBuilder =>
other._interval = _interval
case _ =>
}
}
// ---------------------------------------------------------------------------
// Weights / Building related.
// ---------------------------------------------------------------------------
override def build(hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: AlternatePath = new AlternatePath(this, hints, seed, weightsBuilder)
}
object AlternatePathBuilder {
final def apply()
: AlternatePathBuilder = new AlternatePathBuilder
final def apply(module0: ModuleBuilder)
: AlternatePathBuilder = apply() += module0
final def apply(module0: ModuleBuilder,
modules: ModuleBuilder*)
: AlternatePathBuilder = apply(module0) ++= modules
final def apply(modules: TraversableOnce[ModuleBuilder])
: AlternatePathBuilder = apply() ++= modules
final def apply(modules: Array[ModuleBuilder])
: AlternatePathBuilder = apply() ++= modules
}
final case class AlternatePathState(override val parent: InstanceState,
iterationNo: Long)
extends ModuleState {
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/AlternatePath.scala | Scala | apache-2.0 | 4,933 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package kafka.server
import java.util.Properties
import java.util.concurrent.ExecutionException
import java.util.concurrent.TimeUnit
import kafka.server.ClientQuotaManager.DefaultTags
import kafka.utils.TestUtils
import org.apache.kafka.common.config.internals.QuotaConfigs
import org.apache.kafka.common.internals.KafkaFutureImpl
import org.apache.kafka.common.message.CreatePartitionsRequestData
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic
import org.apache.kafka.common.message.CreateTopicsRequestData
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic
import org.apache.kafka.common.message.DeleteTopicsRequestData
import org.apache.kafka.common.metrics.KafkaMetric
import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.quota.ClientQuotaAlteration
import org.apache.kafka.common.quota.ClientQuotaEntity
import org.apache.kafka.common.requests.AlterClientQuotasRequest
import org.apache.kafka.common.requests.AlterClientQuotasResponse
import org.apache.kafka.common.requests.CreatePartitionsRequest
import org.apache.kafka.common.requests.CreatePartitionsResponse
import org.apache.kafka.common.requests.CreateTopicsRequest
import org.apache.kafka.common.requests.CreateTopicsResponse
import org.apache.kafka.common.requests.DeleteTopicsRequest
import org.apache.kafka.common.requests.DeleteTopicsResponse
import org.apache.kafka.common.security.auth.AuthenticationContext
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder
import org.apache.kafka.test.{TestUtils => JTestUtils}
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Assertions.assertTrue
import org.junit.jupiter.api.Assertions.fail
import org.junit.jupiter.api.BeforeEach
import org.junit.jupiter.api.Test
import scala.jdk.CollectionConverters._
object ControllerMutationQuotaTest {
// Principal used for all client connections. This is updated by each test.
var principal = KafkaPrincipal.ANONYMOUS
class TestPrincipalBuilder extends DefaultKafkaPrincipalBuilder(null, null) {
override def build(context: AuthenticationContext): KafkaPrincipal = {
principal
}
}
def asPrincipal(newPrincipal: KafkaPrincipal)(f: => Unit): Unit = {
val currentPrincipal = principal
principal = newPrincipal
try f
finally principal = currentPrincipal
}
val ThrottledPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "ThrottledPrincipal")
val UnboundedPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "UnboundedPrincipal")
val StrictCreateTopicsRequestVersion = ApiKeys.CREATE_TOPICS.latestVersion
val PermissiveCreateTopicsRequestVersion = 5.toShort
val StrictDeleteTopicsRequestVersion = ApiKeys.DELETE_TOPICS.latestVersion
val PermissiveDeleteTopicsRequestVersion = 4.toShort
val StrictCreatePartitionsRequestVersion = ApiKeys.CREATE_PARTITIONS.latestVersion
val PermissiveCreatePartitionsRequestVersion = 2.toShort
val Topic1 = "topic-1"
val Topic2 = "topic-2"
val TopicsWithOnePartition = Map(Topic1 -> 1, Topic2 -> 1)
val TopicsWith30Partitions = Map(Topic1 -> 30, Topic2 -> 30)
val TopicsWith31Partitions = Map(Topic1 -> 31, Topic2 -> 31)
val ControllerQuotaSamples = 10
val ControllerQuotaWindowSizeSeconds = 1
val ControllerMutationRate = 2.0
}
class ControllerMutationQuotaTest extends BaseRequestTest {
import ControllerMutationQuotaTest._
override def brokerCount: Int = 1
override def brokerPropertyOverrides(properties: Properties): Unit = {
properties.put(KafkaConfig.ControlledShutdownEnableProp, "false")
properties.put(KafkaConfig.OffsetsTopicReplicationFactorProp, "1")
properties.put(KafkaConfig.OffsetsTopicPartitionsProp, "1")
properties.put(KafkaConfig.PrincipalBuilderClassProp,
classOf[ControllerMutationQuotaTest.TestPrincipalBuilder].getName)
// Specify number of samples and window size.
properties.put(KafkaConfig.NumControllerQuotaSamplesProp, ControllerQuotaSamples.toString)
properties.put(KafkaConfig.ControllerQuotaWindowSizeSecondsProp, ControllerQuotaWindowSizeSeconds.toString)
}
@BeforeEach
override def setUp(): Unit = {
super.setUp()
// Define a quota for ThrottledPrincipal
defineUserQuota(ThrottledPrincipal.getName, Some(ControllerMutationRate))
waitUserQuota(ThrottledPrincipal.getName, ControllerMutationRate)
}
@Test
def testSetUnsetQuota(): Unit = {
val rate = 1.5
val principal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "User")
// Default Value
waitUserQuota(principal.getName, Long.MaxValue)
// Define a new quota
defineUserQuota(principal.getName, Some(rate))
// Check it
waitUserQuota(principal.getName, rate)
// Remove it
defineUserQuota(principal.getName, None)
// Back to the default
waitUserQuota(principal.getName, Long.MaxValue)
}
@Test
def testQuotaMetric(): Unit = {
asPrincipal(ThrottledPrincipal) {
// Metric is lazily created
assertTrue(quotaMetric(principal.getName).isEmpty)
// Create a topic to create the metrics
val (_, errors) = createTopics(Map("topic" -> 1), StrictDeleteTopicsRequestVersion)
assertEquals(Set(Errors.NONE), errors.values.toSet)
// Metric must be there with the correct config
waitQuotaMetric(principal.getName, ControllerMutationRate)
// Update quota
defineUserQuota(ThrottledPrincipal.getName, Some(ControllerMutationRate * 2))
waitUserQuota(ThrottledPrincipal.getName, ControllerMutationRate * 2)
// Metric must be there with the updated config
waitQuotaMetric(principal.getName, ControllerMutationRate * 2)
}
}
@Test
def testStrictCreateTopicsRequest(): Unit = {
asPrincipal(ThrottledPrincipal) {
// Create two topics worth of 30 partitions each. As we use a strict quota, we
// expect one to be created and one to be rejected.
// Theoretically, the throttle time should be below or equal to:
// -(-10) / 2 = 5s
val (throttleTimeMs1, errors1) = createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion)
assertThrottleTime(5000, throttleTimeMs1)
// Ordering is not guaranteed so we only check the errors
assertEquals(Set(Errors.NONE, Errors.THROTTLING_QUOTA_EXCEEDED), errors1.values.toSet)
// Retry the rejected topic. It should succeed after the throttling delay is passed and the
// throttle time should be zero.
val rejectedTopicName = errors1.filter(_._2 == Errors.THROTTLING_QUOTA_EXCEEDED).keys.head
val rejectedTopicSpec = TopicsWith30Partitions.filter(_._1 == rejectedTopicName)
TestUtils.waitUntilTrue(() => {
val (throttleTimeMs2, errors2) = createTopics(rejectedTopicSpec, StrictCreateTopicsRequestVersion)
throttleTimeMs2 == 0 && errors2 == Map(rejectedTopicName -> Errors.NONE)
}, "Failed to create topics after having been throttled")
}
}
@Test
def testPermissiveCreateTopicsRequest(): Unit = {
asPrincipal(ThrottledPrincipal) {
// Create two topics worth of 30 partitions each. As we use a permissive quota, we
// expect both topics to be created.
// Theoretically, the throttle time should be below or equal to:
// -(-40) / 2 = 20s
val (throttleTimeMs, errors) = createTopics(TopicsWith30Partitions, PermissiveCreateTopicsRequestVersion)
assertThrottleTime(20000, throttleTimeMs)
assertEquals(Map(Topic1 -> Errors.NONE, Topic2 -> Errors.NONE), errors)
}
}
@Test
def testUnboundedCreateTopicsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
// Create two topics worth of 30 partitions each. As we use an user without quota, we
// expect both topics to be created. The throttle time should be equal to 0.
val (throttleTimeMs, errors) = createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion)
assertEquals(0, throttleTimeMs)
assertEquals(Map(Topic1 -> Errors.NONE, Topic2 -> Errors.NONE), errors)
}
}
@Test
def testStrictDeleteTopicsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion)
}
asPrincipal(ThrottledPrincipal) {
// Delete two topics worth of 30 partitions each. As we use a strict quota, we
// expect the first topic to be deleted and the second to be rejected.
// Theoretically, the throttle time should be below or equal to:
// -(-10) / 2 = 5s
val (throttleTimeMs1, errors1) = deleteTopics(TopicsWith30Partitions, StrictDeleteTopicsRequestVersion)
assertThrottleTime(5000, throttleTimeMs1)
// Ordering is not guaranteed so we only check the errors
assertEquals(Set(Errors.NONE, Errors.THROTTLING_QUOTA_EXCEEDED), errors1.values.toSet)
// Retry the rejected topic. It should succeed after the throttling delay is passed and the
// throttle time should be zero.
val rejectedTopicName = errors1.filter(_._2 == Errors.THROTTLING_QUOTA_EXCEEDED).keys.head
val rejectedTopicSpec = TopicsWith30Partitions.filter(_._1 == rejectedTopicName)
TestUtils.waitUntilTrue(() => {
val (throttleTimeMs2, errors2) = deleteTopics(rejectedTopicSpec, StrictDeleteTopicsRequestVersion)
throttleTimeMs2 == 0 && errors2 == Map(rejectedTopicName -> Errors.NONE)
}, "Failed to delete topics after having been throttled")
}
}
@Test
def testPermissiveDeleteTopicsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion)
}
asPrincipal(ThrottledPrincipal) {
// Delete two topics worth of 30 partitions each. As we use a permissive quota, we
// expect both topics to be deleted.
// Theoretically, the throttle time should be below or equal to:
// -(-40) / 2 = 20s
val (throttleTimeMs, errors) = deleteTopics(TopicsWith30Partitions, PermissiveDeleteTopicsRequestVersion)
assertThrottleTime(20000, throttleTimeMs)
assertEquals(Map(Topic1 -> Errors.NONE, Topic2 -> Errors.NONE), errors)
}
}
@Test
def testUnboundedDeleteTopicsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion)
// Delete two topics worth of 30 partitions each. As we use an user without quota, we
// expect both topics to be deleted. The throttle time should be equal to 0.
val (throttleTimeMs, errors) = deleteTopics(TopicsWith30Partitions, StrictDeleteTopicsRequestVersion)
assertEquals(0, throttleTimeMs)
assertEquals(Map(Topic1 -> Errors.NONE, Topic2 -> Errors.NONE), errors)
}
}
@Test
def testStrictCreatePartitionsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion)
}
asPrincipal(ThrottledPrincipal) {
// Add 30 partitions to each topic. As we use a strict quota, we
// expect the first topic to be extended and the second to be rejected.
// Theoretically, the throttle time should be below or equal to:
// -(-10) / 2 = 5s
val (throttleTimeMs1, errors1) = createPartitions(TopicsWith31Partitions, StrictCreatePartitionsRequestVersion)
assertThrottleTime(5000, throttleTimeMs1)
// Ordering is not guaranteed so we only check the errors
assertEquals(Set(Errors.NONE, Errors.THROTTLING_QUOTA_EXCEEDED), errors1.values.toSet)
// Retry the rejected topic. It should succeed after the throttling delay is passed and the
// throttle time should be zero.
val rejectedTopicName = errors1.filter(_._2 == Errors.THROTTLING_QUOTA_EXCEEDED).keys.head
val rejectedTopicSpec = TopicsWith30Partitions.filter(_._1 == rejectedTopicName)
TestUtils.waitUntilTrue(() => {
val (throttleTimeMs2, errors2) = createPartitions(rejectedTopicSpec, StrictCreatePartitionsRequestVersion)
throttleTimeMs2 == 0 && errors2 == Map(rejectedTopicName -> Errors.NONE)
}, "Failed to create partitions after having been throttled")
}
}
@Test
def testPermissiveCreatePartitionsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion)
}
asPrincipal(ThrottledPrincipal) {
// Create two topics worth of 30 partitions each. As we use a permissive quota, we
// expect both topics to be created.
// Theoretically, the throttle time should be below or equal to:
// -(-40) / 2 = 20s
val (throttleTimeMs, errors) = createPartitions(TopicsWith31Partitions, PermissiveCreatePartitionsRequestVersion)
assertThrottleTime(20000, throttleTimeMs)
assertEquals(Map(Topic1 -> Errors.NONE, Topic2 -> Errors.NONE), errors)
}
}
@Test
def testUnboundedCreatePartitionsRequest(): Unit = {
asPrincipal(UnboundedPrincipal) {
createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion)
// Create two topics worth of 30 partitions each. As we use an user without quota, we
// expect both topics to be created. The throttle time should be equal to 0.
val (throttleTimeMs, errors) = createPartitions(TopicsWith31Partitions, StrictCreatePartitionsRequestVersion)
assertEquals(0, throttleTimeMs)
assertEquals(Map(Topic1 -> Errors.NONE, Topic2 -> Errors.NONE), errors)
}
}
private def assertThrottleTime(max: Int, actual: Int): Unit = {
assertTrue(
(actual >= 0) && (actual <= max),
s"Expected a throttle time between 0 and $max but got $actual")
}
private def createTopics(topics: Map[String, Int], version: Short): (Int, Map[String, Errors]) = {
val data = new CreateTopicsRequestData()
topics.foreach { case (topic, numPartitions) =>
data.topics.add(new CreatableTopic()
.setName(topic).setNumPartitions(numPartitions).setReplicationFactor(1))
}
val request = new CreateTopicsRequest.Builder(data).build(version)
val response = connectAndReceive[CreateTopicsResponse](request)
response.data.throttleTimeMs -> response.data.topics.asScala
.map(topic => topic.name -> Errors.forCode(topic.errorCode)).toMap
}
private def deleteTopics(topics: Map[String, Int], version: Short): (Int, Map[String, Errors]) = {
val data = new DeleteTopicsRequestData()
.setTimeoutMs(60000)
.setTopicNames(topics.keys.toSeq.asJava)
val request = new DeleteTopicsRequest.Builder(data).build(version)
val response = connectAndReceive[DeleteTopicsResponse](request)
response.data.throttleTimeMs -> response.data.responses.asScala
.map(topic => topic.name -> Errors.forCode(topic.errorCode)).toMap
}
private def createPartitions(topics: Map[String, Int], version: Short): (Int, Map[String, Errors]) = {
val data = new CreatePartitionsRequestData().setTimeoutMs(60000)
topics.foreach { case (topic, numPartitions) =>
data.topics.add(new CreatePartitionsTopic()
.setName(topic).setCount(numPartitions).setAssignments(null))
}
val request = new CreatePartitionsRequest.Builder(data).build(version)
val response = connectAndReceive[CreatePartitionsResponse](request)
response.data.throttleTimeMs -> response.data.results.asScala
.map(topic => topic.name -> Errors.forCode(topic.errorCode)).toMap
}
private def defineUserQuota(user: String, quota: Option[Double]): Unit = {
val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> user).asJava)
val quotas = Map(QuotaConfigs.CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG -> quota)
try alterClientQuotas(Map(entity -> quotas))(entity).get(10, TimeUnit.SECONDS) catch {
case e: ExecutionException => throw e.getCause
}
}
private def waitUserQuota(user: String, expectedQuota: Double): Unit = {
val quotaManager = servers.head.quotaManagers.controllerMutation
var actualQuota = Double.MinValue
TestUtils.waitUntilTrue(() => {
actualQuota = quotaManager.quota(user, "").bound()
expectedQuota == actualQuota
}, s"Quota of $user is not $expectedQuota but $actualQuota")
}
private def quotaMetric(user: String): Option[KafkaMetric] = {
val metrics = servers.head.metrics
val metricName = metrics.metricName(
"tokens",
QuotaType.ControllerMutation.toString,
"Tracking remaining tokens in the token bucket per user/client-id",
Map(DefaultTags.User -> user, DefaultTags.ClientId -> "").asJava)
Option(servers.head.metrics.metric(metricName))
}
private def waitQuotaMetric(user: String, expectedQuota: Double): Unit = {
TestUtils.retry(JTestUtils.DEFAULT_MAX_WAIT_MS) {
quotaMetric(user) match {
case Some(metric) =>
val config = metric.config()
assertEquals(expectedQuota, config.quota().bound(), 0.1)
assertEquals(ControllerQuotaSamples, config.samples())
assertEquals(ControllerQuotaWindowSizeSeconds * 1000, config.timeWindowMs())
case None =>
fail(s"Quota metric of $user is not defined")
}
}
}
private def alterClientQuotas(request: Map[ClientQuotaEntity, Map[String, Option[Double]]]): Map[ClientQuotaEntity, KafkaFutureImpl[Void]] = {
val entries = request.map { case (entity, alter) =>
val ops = alter.map { case (key, value) =>
new ClientQuotaAlteration.Op(key, value.map(Double.box).orNull)
}.asJavaCollection
new ClientQuotaAlteration(entity, ops)
}
val response = request.map(e => e._1 -> new KafkaFutureImpl[Void]).asJava
sendAlterClientQuotasRequest(entries).complete(response)
val result = response.asScala
assertEquals(request.size, result.size)
request.foreach(e => assertTrue(result.get(e._1).isDefined))
result.toMap
}
private def sendAlterClientQuotasRequest(entries: Iterable[ClientQuotaAlteration]): AlterClientQuotasResponse = {
val request = new AlterClientQuotasRequest.Builder(entries.asJavaCollection, false).build()
connectAndReceive[AlterClientQuotasResponse](request, destination = controllerSocketServer)
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala | Scala | apache-2.0 | 18,899 |
package org.usagram.clarify.generation.generator
object Fragments {
def ComplexValidator(n: Int) = s"ComplexValidator$n"
def Validity(n: Int) = s"Validity$n"
def Product(n: Int) = s"Product$n"
def V(n: Int) = new Fragment(n)("V" + _)
def Indefinite(n: Int) = new Fragment(n)("Indefinite[" + V(_) + "]")
def Definite(n: Int) = new Fragment(n)("Definite[" + V(_) + "]")
def value(n: Int) = new Fragment(n)("value" + _)
def valueArg(n: Int) = new Fragment(n)(n => s"${value(n)}: ${Indefinite(n)}")
def __(n: Int) = new Fragment(n)("_" + _)
def __Arg(n: Int) = new Fragment(n)(n => s"${__(n)}: ${Definite(n)}")
def thisValidity(n: Int) = new Fragment(n)("thisValidity._" + _)
def thatValidity(n: Int) = new Fragment(n)("thatValidity._" + _)
}
| takkkun/clarify | generation/src/main/scala/org/usagram/clarify/generation/generator/Fragments.scala | Scala | mit | 775 |
/*
* Copyright 2015 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rhttpc.akkapersistence.impl
import akka.actor.FSM
import rhttpc.client.subscription.SubscriptionOnResponse
private[akkapersistence] trait StateTransitionHandler[S, D] {
protected def onSubscriptionsOffered(subscriptions: Set[SubscriptionOnResponse]): Unit
protected def onStateTransition(transitionData: TransitionData[S, D]): Unit
protected def onFinishedJobAfterTransition(afterAllData: FinishedJobAfterTransitionData[S, D]): Unit
}
private[rhttpc] trait FSMStateTransitionRegistrar[S, D] { self: FSM[S, D] with StateTransitionHandler[S, D] with FSMAfterAllListenerHolder[S, D] =>
protected def incOwnLastSequenceNr(): Long
onTransition {
case (_, to) =>
onStateTransition(TransitionData[S, D](to, nextStateData, incOwnLastSequenceNr(), useCurrentAfterAllListener()))
}
}
private[rhttpc] case class TransitionData[S, D](state: S, data: D, sequenceNumber: Long, afterAllListener: Option[RecipientWithMsg]) {
def toFinishedJobData(subscriptions: Set[SubscriptionOnResponse]): FinishedJobAfterTransitionData[S, D] = {
FinishedJobAfterTransitionData(state, data, subscriptions, sequenceNumber, afterAllListener)
}
}
private[rhttpc] case class FinishedJobAfterTransitionData[S, D](state: S, data: D, subscriptions: Set[SubscriptionOnResponse], sequenceNumber: Long, afterAllListener: Option[RecipientWithMsg]) | arkadius/reliable-http-client | rhttpc-akka-persistence/src/main/scala/rhttpc/akkapersistence/impl/StateTransitionHandler.scala | Scala | apache-2.0 | 1,969 |
package de.alog.util
import akka.actor._
import com.mongodb.casbah.Imports._
import com.typesafe.config.Config
class LogDatabaseImpl(config: Config) extends Extension {
lazy val mongoDb:MongoDB = createMongoDbConn
private def createMongoDbConn = {
import com.mongodb.casbah.commons.conversions.scala._
RegisterJodaTimeConversionHelpers()
val host = config.getString("mongo.database_host")
val port = config.getInt("mongo.database_port")
val dbn = config.getString("mongo.database_name")
val db = MongoClient(
host=host,
port=port
)(dbn)
db("logEntries").ensureIndex("timestamp")
db("logEntries").ensureIndex(MongoDBObject("message" -> "text"))
db("logMeta").ensureIndex("key")
db("logMeta").ensureIndex("type")
db
}
}
object LogDatabaseExtension extends ExtensionId[LogDatabaseImpl] with ExtensionIdProvider {
def createExtension(system: ExtendedActorSystem): LogDatabaseImpl = new LogDatabaseImpl(system.settings.config)
def lookup = LogDatabaseExtension
}
trait LogDatabase {
self: Actor =>
def mongoDb = LogDatabaseExtension(context.system).mongoDb
def logEntriesDb = mongoDb("logEntries")
} | eweinell/alog | alog/src/main/scala/de/alog/util/LogDatabase.scala | Scala | apache-2.0 | 1,200 |
package fif
import algebra.Semigroup
object TestHelpers {
implicit val sg = new Semigroup[Int] {
override def combine(a: Int, b: Int) = a + b
}
}
| malcolmgreaves/abstract_data | data-tc-extra/src/test/scala/fif/TestHelpers.scala | Scala | apache-2.0 | 158 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.ops.{RandomUniform => RandomUniformOps}
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import org.tensorflow.framework.{DataType, NodeDef}
import scala.reflect.ClassTag
class RandomUniform extends TensorflowOpsLoader {
override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
val seed = if (nodeDef.getAttrMap.containsKey("seed")) {
Some(nodeDef.getAttrMap.get("seed").getI().toInt)
} else {
None
}
nodeDef.getAttrMap.get("dtype").getType match {
case DataType.DT_FLOAT =>
val min = 0
val max = 1
RandomUniformOps[T, Float](min, max, seed)
case DataType.DT_DOUBLE =>
val min = 0
val max = 1
RandomUniformOps[T, Double](min, max, seed)
case _ =>
throw new IllegalArgumentException("Not support data type")
}
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/RandomUniform.scala | Scala | apache-2.0 | 1,766 |
package org.flowpaint.brush
import scala.xml.Node
import org.flowpaint.filters.{StrokeListener, StrokeFilter, PathProcessor}
import org.flowpaint.ink.Ink
import javax.swing.JComponent
import org.flowpaint.pixelprocessor.{PixelProcessor, ScanlineCalculator}
import org.flowpaint.property.{DataEditor, GradientSliderEditor, DataImpl, Data}
import org.flowpaint.ui.editors.Editor
import org.flowpaint.ui.slider.InkSliderUi
import org.flowpaint.ui.{BrushSliderUi, ParameterUi}
import org.flowpaint.util.{DataSample, ListenableList, Tome}
import org.flowpaint.util.ConfigurationMetadata
import java.util.HashSet
import scala.collection.JavaConversions._
/**
* Contains deserialization code for brushes.
*/
object Brush {
def fromXML(node : Node) : Brush = {
val name = (node \\ "@id").text
val settings = Data.fromXML( (node \\ "settings").first )
val pixelProcessorMetadatas = (node \\ "pixelProcessors" \\ "object") map
{(n : Node) => ConfigurationMetadata.fromXML( n, classOf[PixelProcessor] )}
val pathProcessorMetadatas = (node \\ "pathProcessors" \\ "object") map
{(n : Node) => ConfigurationMetadata.fromXML( n, classOf[PathProcessor] )}
val editorMetadatas = (node \\ "editors" \\ "object") map
{(n : Node) => ConfigurationMetadata.fromXML( n, classOf[Editor] )}
val brush = new Brush(name, settings, pixelProcessorMetadatas.toList, pathProcessorMetadatas.toList, editorMetadatas.toList )
brush
}
}
/**
* Contains all settings for a certain brush tool.
*
* @author Hans Haggstrom
*/
class Brush(val identifier: String,
initialSettings : Data,
pixelProcessorMetadatas: List[ConfigurationMetadata[PixelProcessor]],
pathProcessorMetadatas: List[ConfigurationMetadata[PathProcessor]],
initialEditors: List[ConfigurationMetadata[Editor]]) extends Tome {
val settings = new DataImpl( initialSettings )
val pixelProcessors = new ListenableList[ConfigurationMetadata[PixelProcessor]](pixelProcessorMetadatas, notifyListenersOnChildListChange)
val strokeProcessors = new ListenableList[ConfigurationMetadata[PathProcessor]](pathProcessorMetadatas, notifyListenersOnChildListChange)
val editors = new ListenableList[ConfigurationMetadata[Editor]](initialEditors, notifyListenersOnChildListChange)
private val listeners = new HashSet[ChangeListener]()
private var scanlineCalculator : ScanlineCalculator = null
type ChangeListener = (Brush) => Unit
private def notifyListenersOnChildListChange = (l: Any) => notifyListeners()
private def notifyListeners() {
scanlineCalculator = null; // Reset
listeners foreach {listener => listener(this)}
}
settings.addListener((data: Data, prop: String) => notifyListeners())
def getScanlineCalculator = {
if (scanlineCalculator == null) {
scanlineCalculator = new ScanlineCalculator()
scanlineCalculator.init( createPixelProcessors(), settings )
}
scanlineCalculator
}
def name = identifier
def createPixelProcessors() : List[PixelProcessor] = {
pixelProcessors.elements.map( _.createInstance() )
}
def createPathProcessors() : List[PathProcessor] = {
strokeProcessors.elements.map( _.createInstance() )
}
def createEditors() : List[Editor] = {
val list = editors.elements.map( _.createInstance() )
list foreach ( _.setEditedData( settings ) )
list
}
// Listener support
def addChangeListener(listener: ChangeListener) {listeners.add(listener)}
def removeChangeListener(listener: ChangeListener) {listeners.remove(listener)}
/**
* Create a copy of this brush, that can be edited without affecting this Brush.
* NOTE: Assumes pixel processors and stroke processors and editors are immutable.
*/
def createCopy(): Brush = new Brush(identifier, settings, pixelProcessors.elements, strokeProcessors.elements, editors.elements)
override def hashCode = {
var code = identifier.hashCode
code ^= 133 + settings.hashCode
code ^= 234 + pixelProcessors.hashCode
code ^= 435 + strokeProcessors.hashCode
code ^= 978 + editors.hashCode
code
}
def toXML() = <brush id={identifier} description={name}>
<settings>{settings.toXML()}</settings>
<pathProcessors>{strokeProcessors.elements map (_.toXML()) }</pathProcessors>
<pixelProcessors>{pixelProcessors.elements map (_.toXML()) }</pixelProcessors>
<editors>{editors.elements map (_.toXML()) }</editors>
</brush>
}
| zzorn/flowpaint | src/main/scala/org/flowpaint/brush/Brush.scala | Scala | gpl-2.0 | 4,714 |
package com.wavesplatform.generator
import java.net.{InetSocketAddress, URL}
import java.time.LocalDateTime
import java.time.temporal.ChronoUnit
import cats.Show
import cats.effect.concurrent.Ref
import cats.syntax.flatMap._
import com.wavesplatform.generator.Worker.{EmptyState, Settings, SkipState, State}
import com.wavesplatform.network.client.NetworkSender
import com.wavesplatform.transaction.Transaction
import com.wavesplatform.utils.ScorexLogging
import io.netty.channel.Channel
import monix.eval.Task
import monix.execution.Scheduler
import org.asynchttpclient.AsyncHttpClient
import play.api.libs.json.Json
import scala.compat.java8.FutureConverters
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
class Worker(
settings: Settings,
transactionSource: Iterator[Transaction],
networkSender: NetworkSender,
node: InetSocketAddress,
nodeRestAddress: URL,
canContinue: () => Boolean,
initial: Seq[Transaction],
richAccountAddresses: Seq[String],
tailInitial: Seq[Transaction] = Seq()
)(implicit httpClient: AsyncHttpClient, ec: ExecutionContext)
extends ScorexLogging {
def run(): Future[Unit] =
task.runAsyncLogErr(Scheduler(ec))
private[this] val task =
for {
state <- Ref.of[Task, State](EmptyState(settings.warmUp))
initState <- settings.initialWarmUp match {
case Some(warmUp) => Ref.of[Task, State](EmptyState(warmUp))
case None => Ref.of[Task, State](SkipState(settings.utxLimit))
}
channel <- withReconnect(getChannel)
_ <- logInfo("INITIAL PHASE")
channel <- withReconnect(writeInitial(channel, initState))
_ <- logInfo("GENERAL PHASE")
channel <- withReconnect(pullAndWrite(channel, state))
_ <- closeChannel(channel)
} yield ()
private[this] val nodeUTXTransactionsToSendCount: Task[Int] = Task.defer {
import org.asynchttpclient.Dsl._
val request = get(s"$nodeRestAddress/transactions/unconfirmed/size").build()
Task
.fromFuture(FutureConverters.toScala(httpClient.executeRequest(request).toCompletableFuture))
.map(r => math.max(settings.utxLimit - (Json.parse(r.getResponseBody) \\ "size").as[Int], 0))
}
private[this] val balanceOfRichAccount: Task[Map[String, Long]] =
Task
.defer {
import org.asynchttpclient.Dsl._
val results = richAccountAddresses.map { address =>
val request = get(s"$nodeRestAddress/addresses/balance/$address").build()
Task
.fromFuture(FutureConverters.toScala(httpClient.executeRequest(request).toCompletableFuture))
.map(r => address -> (Json.parse(r.getResponseBody) \\ "balance").as[Long])
}
Task.parSequence(results).map(_.toMap)
}
.onErrorFallbackTo(Task.now(Map()))
private[this] val retrieveBalances: Task[Unit] =
if (!canContinue())
Task.unit
else
for {
balances <- balanceOfRichAccount
_ <- if (balances.nonEmpty) logInfo(s"Balances: ${balances.mkString("(", ", ", ")")}") else Task.unit
} yield ()
private[this] def writeInitial(channel: Channel, state: Ref[Task, State], txs: Seq[Transaction] = initial): Task[Channel] =
if (!canContinue())
Task.now(channel)
else
for {
validChannel <- validateChannel(channel)
_ <- logInfo(s"Sending initial transactions to $validChannel")
cntToSend <- calcAndSaveCntToSend(state)
_ <- Task.deferFuture(networkSender.send(validChannel, txs.take(cntToSend): _*))
r <- if (cntToSend >= txs.size) sleepOrWaitEmptyUtx(settings.tailInitialDelay) *> writeTailInitial(validChannel, state)
else sleep(settings.delay) *> Task.defer(writeInitial(channel, state, txs.drop(cntToSend)))
} yield r
private[this] def sleepOrWaitEmptyUtx(strategy: Either[FiniteDuration, FiniteDuration]): Task[Unit] =
strategy match {
case Left(duration) => sleep(duration)
case Right(duration) =>
for {
_ <- sleep(duration)
_ <- nodeUTXTransactionsToSendCount >>= (cnt => if (cnt == settings.utxLimit) Task.unit else Task.defer(sleepOrWaitEmptyUtx(strategy)))
} yield ()
}
private[this] def writeTailInitial(channel: Channel, state: Ref[Task, State], txs: Seq[Transaction] = tailInitial): Task[Channel] =
if (!canContinue())
Task.now(channel)
else
for {
validChannel <- validateChannel(channel)
_ <- logInfo(s"Sending tail initial transactions to $validChannel")
cntToSend <- calcAndSaveCntToSend(state)
_ <- Task.deferFuture(networkSender.send(validChannel, txs.take(cntToSend): _*))
r <- if (cntToSend >= txs.size) sleepOrWaitEmptyUtx(settings.initialDelay) *> Task.now(validChannel)
else sleep(settings.delay) *> Task.defer(writeTailInitial(validChannel, state, txs.drop(cntToSend)))
} yield r
private[this] def pullAndWrite(channel: Channel, state: Ref[Task, State], cnt: Int = 0): Task[Channel] =
if (!canContinue())
Task.now(channel)
else
for {
_ <- if (cnt % 10 == 0) retrieveBalances.executeAsync else Task.unit
validChannel <- validateChannel(channel)
cntToSend <- calcAndSaveCntToSend(state)
_ <- logInfo(s"Sending $cntToSend transactions to $validChannel")
txs <- Task(transactionSource.take(cntToSend).to(LazyList))
_ <- txs.headOption.fold(Task.unit)(tx => logInfo(s"Head transaction id: ${tx.id()}"))
_ <- Task.deferFuture(networkSender.send(validChannel, txs: _*))
_ <- sleep(settings.delay)
r <- Task.defer(pullAndWrite(validChannel, state, (cnt + 1) % 10))
} yield r
private[this] def calcAndSaveCntToSend(stateRef: Ref[Task, State]): Task[Int] =
for {
utxCnt <- nodeUTXTransactionsToSendCount
state <- stateRef.get
nextState = state.next(utxCnt)
_ <- logTrace(s"Prev state: $state, new state: $nextState, tx number to utx limit: $utxCnt")
_ <- stateRef.set(nextState)
} yield nextState.cnt
private[this] def withReconnect[A](baseTask: Task[A]): Task[A] =
baseTask.onErrorHandleWith {
case error if settings.autoReconnect && canContinue() =>
logError(s"[$node] An error during sending transactions, reconnect", error) *>
sleep(settings.reconnectDelay) *>
Task.defer(withReconnect(baseTask))
case error =>
logError("Stopping because autoReconnect is disabled", error) *>
Task.raiseError(error)
}
private[this] def getChannel: Task[Channel] = Task.deferFuture(networkSender.connect(node))
private[this] def closeChannel(channel: Channel): Task[Unit] = Task(channel.close())
private[this] def validateChannel(channel: Channel): Task[Channel] = if (channel.isOpen) Task.now(channel) else getChannel
private[this] def logError(msg: => String, err: Throwable): Task[Unit] = Task(log.error(msg, err))
private[this] def logInfo(msg: => String): Task[Unit] = Task(log.info(msg))
private[this] def logTrace(msg: => String): Task[Unit] = Task(log.trace(msg))
private[this] def sleep(delay: FiniteDuration): Task[Unit] = logInfo(s"Sleeping for $delay") *> Task.sleep(delay)
}
object Worker {
case class Settings(
utxLimit: Int,
delay: FiniteDuration,
tailInitialDelay: Either[FiniteDuration, FiniteDuration],
initialDelay: Either[FiniteDuration, FiniteDuration],
workingTime: FiniteDuration,
autoReconnect: Boolean,
reconnectDelay: FiniteDuration,
warmUp: WarmUp,
initialWarmUp: Option[WarmUp]
)
case class WarmUp(
start: Int,
end: Int,
step: Int,
duration: Option[FiniteDuration],
once: Boolean
)
sealed trait State {
def cnt: Int
def next(utxToSendCnt: Int): State =
this match {
case SkipState(_) => SkipState(utxToSendCnt)
case EmptyState(warmUp) =>
WorkState(warmUp.start, false, warmUp.duration.map(d => LocalDateTime.now.plus(d.toMillis, ChronoUnit.MILLIS)), warmUp)
case s @ WorkState(cnt, raised, endAfter, warmUp) =>
if (raised) s.copy(cnt = utxToSendCnt)
else {
endAfter match {
case Some(ldt) if ldt.isBefore(LocalDateTime.now) => s.copy(cnt = utxToSendCnt, raised = true)
case _ =>
val mayBeNextCnt = math.min(cnt + warmUp.step, warmUp.end)
val nextCnt = math.min(mayBeNextCnt, utxToSendCnt)
val nextRaised = nextCnt == warmUp.end && warmUp.once
WorkState(nextCnt, nextRaised, endAfter, warmUp)
}
}
}
}
final case class EmptyState(warmUp: WarmUp) extends State {
val cnt: Int = 0
override def toString: String = "EmptyState"
}
final case class WorkState(cnt: Int, raised: Boolean, endAfter: Option[LocalDateTime], warmUp: WarmUp) extends State {
require(cnt >= 0)
override def toString: String = s"State(cnt=$cnt, raised=$raised, endAfter=$endAfter)"
}
final case class SkipState(cnt: Int) extends State {
override def toString: String = "SkipState"
}
implicit val toPrintable: Show[Settings] = { x =>
import x._
s"""initial delay: $initialDelay
|delay between iterations: $delay
|auto reconnect: ${if (autoReconnect) "enabled" else "disabled"}
|reconnect delay: $reconnectDelay
|warm-up: (start=${warmUp.start}, end=${warmUp.end}, step=${warmUp.step}, once=${warmUp.once})
|""".stripMargin
}
}
| wavesplatform/Waves | node-generator/src/main/scala/com/wavesplatform/generator/Worker.scala | Scala | mit | 9,801 |
package com.teambytes.awsleader
import akka.actor._
import akka.cluster.ClusterEvent.{MemberRemoved, MemberUp, CurrentClusterState, ClusterDomainEvent}
import akka.cluster.{MemberStatus, Cluster, Member}
import com.teambytes.awsleader.LeaderElectionActor.{Data, State}
private[awsleader] class LeaderElectionActor(minMembers: Int, leaderProp: () => Props) extends Actor with FSM[State, Data] with ActorLogging {
import LeaderElectionActor._
startWith(NoQuorum, Data(None, Set()))
private val cluster = Cluster(context.system)
override def preStart() = {
log.info("LeaderElectionActor: Starting.")
cluster.subscribe(self, classOf[ClusterDomainEvent])
}
override def postStop() = {
cluster.unsubscribe(self)
log.info("LeaderElectionActor: Stopped.")
}
when(NoQuorum) {
case e@Event(s:CurrentClusterState, d: Data) => stayOrGoToQuorum(d.copy(clusterMembers = s.members.filter(_.status == MemberStatus.Up)))
case e@Event(MemberUp(member), d: Data) => stayOrGoToQuorum(d.copy(clusterMembers = d.clusterMembers + member))
case e@Event(MemberRemoved(member, previousStatus), d: Data) => stayOrGoToQuorum(d.copy(clusterMembers = d.clusterMembers - member))
}
when(Quorum) {
case e@Event(s:CurrentClusterState, d: Data) => stayOrGoToNoQuorum(d.copy(clusterMembers = s.members.filter(_.status == MemberStatus.Up)))
case e@Event(MemberUp(member), d: Data) => stayOrGoToNoQuorum(d.copy(clusterMembers = d.clusterMembers + member))
case e@Event(MemberRemoved(member, previousStatus), d: Data) => stayOrGoToNoQuorum(d.copy(clusterMembers = d.clusterMembers - member))
}
whenUnhandled {
case e@Event(c:ClusterDomainEvent, d: Data) => stay using d
}
private def stayOrGoToQuorum(newData: Data) =
if (newData.numberOfMembers() >= minMembers){
log.info("LeaderElectionActor: Quorum has been achieved. Current members: {}", newData.clusterMembers)
goto(Quorum) using newData.copy(target = Some(context.actorOf(leaderProp(), "leader")))
} else {
log.info("LeaderElectionActor: Quorum has not been reached. Current members: {}", newData.clusterMembers)
stay using newData
}
private def stayOrGoToNoQuorum(newData: Data) =
if (newData.numberOfMembers() < minMembers) {
log.info("LeaderElectionActor: Quorum has been lost. Current members: {}", newData.clusterMembers)
newData.target.foreach(_ ! PoisonPill)
goto(NoQuorum) using newData.copy(target = None)
} else {
log.info("LeaderElectionActor: Still have quorum. Current members: {}", newData.clusterMembers)
stay using newData
}
}
object LeaderElectionActor {
def props(handler: LeaderActionsHandler, minMembers: Int) =
Props(classOf[LeaderElectionActor], minMembers, () => LeaderActor.props(handler))
// states
private[awsleader] sealed trait State
private[awsleader] case object NoQuorum extends State
private[awsleader] case object Quorum extends State
private[awsleader] case class Data(target: Option[ActorRef], clusterMembers: Set[Member]){
def numberOfMembers() = clusterMembers.size
}
}
| grahamar/aws-leader-election | src/main/scala/com/teambytes/awsleader/LeaderElectionActor.scala | Scala | apache-2.0 | 3,115 |
package io.neons.collector.application.guice.application.akka.actor
import akka.actor.{Actor, IndirectActorProducer}
import com.google.inject.name.Names
import com.google.inject.{Injector, Key}
class GuiceActorProducer(injector: Injector, actorName: String) extends IndirectActorProducer {
override def produce(): Actor = injector.getBinding(Key.get(classOf[Actor], Names.named(actorName))).getProvider.get()
override def actorClass = classOf[Actor]
}
| NeonsIo/collector | src/main/scala/io/neons/collector/application/guice/application/akka/actor/GuiceActorProducer.scala | Scala | mit | 458 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.http.play
import akka.http.impl.engine.ws._
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.ws.UpgradeToWebSocket
import akka.stream.scaladsl._
import akka.stream.stage._
import akka.stream.{ Attributes, FlowShape, Inlet, Outlet }
import akka.util.ByteString
import play.api.http.websocket._
import play.api.libs.streams.AkkaStreams
import play.core.server.common.WebSocketFlowHandler
import play.core.server.common.WebSocketFlowHandler.{ MessageType, RawMessage }
object WebSocketHandler {
/**
* Handle a WebSocket
*/
def handleWebSocket(upgrade: UpgradeToWebSocket, flow: Flow[Message, Message, _], bufferLimit: Int): HttpResponse = upgrade match {
case lowLevel: UpgradeToWebSocketLowLevel =>
lowLevel.handleFrames(messageFlowToFrameFlow(flow, bufferLimit))
case other => throw new IllegalArgumentException("UpgradeToWebsocket is not an Akka HTTP UpgradeToWebsocketLowLevel")
}
/**
* Convert a flow of messages to a flow of frame events.
*
* This implements the WebSocket control logic, including handling ping frames and closing the connection in a spec
* compliant manner.
*/
def messageFlowToFrameFlow(flow: Flow[Message, Message, _], bufferLimit: Int): Flow[FrameEvent, FrameEvent, _] = {
// Each of the stages here transforms frames to an Either[Message, ?], where Message is a close message indicating
// some sort of protocol failure. The handleProtocolFailures function then ensures that these messages skip the
// flow that we are wrapping, are sent to the client and the close procedure is implemented.
Flow[FrameEvent]
.via(aggregateFrames(bufferLimit))
.via(handleProtocolFailures(WebSocketFlowHandler.webSocketProtocol(bufferLimit).join(flow)))
.map(messageToFrameEvent)
}
/**
* Akka HTTP potentially splits frames into multiple frame events.
*
* This stage aggregates them so each frame is a full frame.
*
* @param bufferLimit The maximum size of frame data that should be buffered.
*/
private def aggregateFrames(bufferLimit: Int): GraphStage[FlowShape[FrameEvent, Either[Message, RawMessage]]] = {
new GraphStage[FlowShape[FrameEvent, Either[Message, RawMessage]]] {
val in = Inlet[FrameEvent]("WebSocketHandler.aggregateFrames.in")
val out = Outlet[Either[Message, RawMessage]]("WebSocketHandler.aggregateFrames.out")
override val shape = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
var currentFrameData: ByteString = null
var currentFrameHeader: FrameHeader = null
override def onPush(): Unit = {
val elem = grab(in)
elem match {
// FrameData error handling first
case unexpectedData: FrameData if currentFrameHeader == null =>
// Technically impossible, this indicates a bug in Akka HTTP,
// since it has sent the start of a frame before finishing
// the previous frame.
push(out, close(Protocol.CloseCodes.UnexpectedCondition, "Server error"))
case FrameData(data, _) if currentFrameData.size + data.size > bufferLimit =>
push(out, close(Protocol.CloseCodes.TooBig))
// FrameData handling
case FrameData(data, false) =>
currentFrameData ++= data
pull(in)
case FrameData(data, true) =>
val message = frameToRawMessage(currentFrameHeader, currentFrameData ++ data)
currentFrameHeader = null
currentFrameData = null
push(out, Right(message))
// Frame start error handling
case FrameStart(header, data) if currentFrameHeader != null =>
// Technically impossible, this indicates a bug in Akka HTTP,
// since it has sent the start of a frame before finishing
// the previous frame.
push(out, close(Protocol.CloseCodes.UnexpectedCondition, "Server error"))
// Frame start protocol errors
case FrameStart(header, _) if header.mask.isEmpty =>
push(out, close(Protocol.CloseCodes.ProtocolError, "Unmasked client frame"))
// Frame start
case fs @ FrameStart(header, data) if fs.lastPart =>
push(out, Right(frameToRawMessage(header, data)))
case FrameStart(header, data) =>
currentFrameHeader = header
currentFrameData = data
pull(in)
}
}
override def onPull(): Unit = pull(in)
setHandlers(in, out, this)
}
}
}
private def frameToRawMessage(header: FrameHeader, data: ByteString) = {
val unmasked = FrameEventParser.mask(data, header.mask)
RawMessage(
frameOpCodeToMessageType(header.opcode),
unmasked, header.fin)
}
/**
* Converts frames to Play messages.
*/
private def frameOpCodeToMessageType(opcode: Protocol.Opcode): MessageType.Type = opcode match {
case Protocol.Opcode.Binary =>
MessageType.Binary
case Protocol.Opcode.Text =>
MessageType.Text
case Protocol.Opcode.Close =>
MessageType.Close
case Protocol.Opcode.Ping =>
MessageType.Ping
case Protocol.Opcode.Pong =>
MessageType.Pong
case Protocol.Opcode.Continuation =>
MessageType.Continuation
}
/**
* Converts Play messages to Akka HTTP frame events.
*/
private def messageToFrameEvent(message: Message): FrameEvent = {
def frameEvent(opcode: Protocol.Opcode, data: ByteString) =
FrameEvent.fullFrame(opcode, None, data, fin = true)
message match {
case TextMessage(data) => frameEvent(Protocol.Opcode.Text, ByteString(data))
case BinaryMessage(data) => frameEvent(Protocol.Opcode.Binary, data)
case PingMessage(data) => frameEvent(Protocol.Opcode.Ping, data)
case PongMessage(data) => frameEvent(Protocol.Opcode.Pong, data)
case CloseMessage(Some(statusCode), reason) => FrameEvent.closeFrame(statusCode, reason)
case CloseMessage(None, _) => frameEvent(Protocol.Opcode.Close, ByteString.empty)
}
}
/**
* Handles the protocol failures by gracefully closing the connection.
*/
private def handleProtocolFailures: Flow[WebSocketFlowHandler.RawMessage, Message, _] => Flow[Either[Message, RawMessage], Message, _] = {
AkkaStreams.bypassWith(Flow[Either[Message, RawMessage]].via(
new GraphStage[FlowShape[Either[Message, RawMessage], Either[RawMessage, Message]]] {
val in = Inlet[Either[Message, RawMessage]]("WebSocketHandler.handleProtocolFailures.in")
val out = Outlet[Either[RawMessage, Message]]("WebSocketHandler.handleProtocolFailures.out")
override val shape = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler with OutHandler {
var closing = false
override def onPush(): Unit = {
val elem = grab(in)
elem match {
case _ if closing =>
completeStage()
case Right(message) =>
push(out, Left(message))
case Left(close) =>
closing = true
push(out, Right(close))
}
}
override def onPull(): Unit = pull(in)
setHandlers(in, out, this)
}
}), Merge(2, eagerComplete = true))
}
private case class Frame(header: FrameHeader, data: ByteString) {
def unmaskedData = FrameEventParser.mask(data, header.mask)
}
private def close(status: Int, message: String = "") = {
Left(new CloseMessage(Some(status), message))
}
}
| wsargent/playframework | framework/src/play-akka-http-server/src/main/scala/akka/http/play/WebSocketHandler.scala | Scala | apache-2.0 | 7,934 |
package io.github.benwhitehead.finch
import com.twitter.finagle.Service
import com.twitter.finagle.httpx.Method
import com.twitter.finagle.httpx.path.{->, /, Root}
import com.twitter.util.Future
import io.finch._
import io.finch.request.RequiredBody
import io.finch.response._
import io.github.benwhitehead.finch.request.DelegateService
/**
* @author Ben Whitehead
*/
object Echo extends HttpEndpoint {
def service(echo: String) = new Service[HttpRequest, HttpResponse] {
def apply(request: HttpRequest): Future[HttpResponse] = {
Ok(echo).toFuture
}
}
def route = {
case Method.Get -> Root / "echo" / echo => service(echo)
}
}
object JsonBlob extends HttpEndpoint {
lazy val logger = org.slf4j.LoggerFactory.getLogger(getClass.getName)
import io.finch.jackson._
import io.github.benwhitehead.finch.JacksonWrapper.mapper
lazy val service = DelegateService[HttpRequest, Map[String, Int]] {
(1 to 100).map { case i => s"$i" -> i }.toMap
}
lazy val handlePost = new Service[HttpRequest, HttpResponse] {
lazy val reader = for {
body <- RequiredBody[Map[String, Int]]
} yield body
def apply(request: HttpRequest): Future[HttpResponse] = {
reader(request) flatMap { case body =>
logger.info("body = {}", body)
Ok().toFuture
}
}
}
lazy val exceptionService = DelegateService {
throw new RuntimeException("unhandled exception")
}
def route = {
case Method.Get -> Root / "json" => service ! TurnIntoHttp[Map[String, Int]]
case Method.Post -> Root / "json" => handlePost
case Method.Get -> Root / "json" / "exception" => exceptionService
}
}
object TestingServer extends SimpleFinchServer {
override lazy val defaultHttpPort = 19990
override lazy val config = Config(port = 17070)
override lazy val serverName = "test-server"
def endpoint = {
Echo orElse JsonBlob
}
}
| samklr/finch-server | src/test/scala/io/github/benwhitehead/finch/TestingServer.scala | Scala | apache-2.0 | 1,903 |
package org.shelmet.heap.model
import org.shelmet.heap.util.Misc
import org.shelmet.heap.HeapId
import org.shelmet.heap.shared.InstanceId
import scala.collection.SortedSet
/**
* Represents an object that's allocated out of the Java heap. It occupies
* memory in the VM. It can be a
* JavaClass, a JavaObjectArray, a JavaValueArray or a JavaObject.
*/
abstract class JavaHeapObject(val heapId : HeapId,val objIdent : Option[InstanceId],snapshotV : Snapshot) extends Ordered[JavaHeapObject] {
var hardRefersSet : SortedSet[HeapId] = SortedSet.empty
var softRefersSet : SortedSet[HeapId] = SortedSet.empty
def referersSet : SortedSet[HeapId] =
if(softRefersSet.isEmpty)
hardRefersSet
else if(hardRefersSet.isEmpty)
softRefersSet
else
softRefersSet ++ hardRefersSet
var retainedCalculated = false
var retaining : Long = 0
def retainedSize = size + retaining
var minDepthToRoot = -1
var maxDepthToRoot = -1
def addDepth(depth : Int) {
if(minDepthToRoot == -1 || depth < minDepthToRoot)
minDepthToRoot = depth
if(maxDepthToRoot == -1 || depth > maxDepthToRoot)
maxDepthToRoot = depth
}
override def compare(that: JavaHeapObject): Int = heapId.compareTo(that.heapId)
def referers : SortedSet[JavaHeapObject] = referersSet.map(Snapshot.instance.findHeapObject(_).get)
def noRefers = softRefersSet.size + hardRefersSet.size
private[model] def addReferenceFrom(other: JavaHeapObject) {
if(other.refersOnlyWeaklyTo(this))
softRefersSet += other.heapId
else
hardRefersSet += other.heapId
}
def getClazz: JavaClass
override def equals(other : Any) = other match {
case j : JavaHeapObject => (j eq this) || j.heapId == this.heapId
case _ => false
}
/**
* Do any initialization this thing needs after its data is read in.
*/
def resolve(snapshot: Snapshot)
/**
* @return the id of this thing as hex string
*/
def getIdString: String = Misc.toHex(heapId.id)
override def toString: String = {
getClazz.displayName + (objIdent match {
case Some(ident) => " #" + ident.id
case None => "@" + getIdString
})
}
override val hashCode : Int = heapId.hashCode()
/**
* @return the StackTrace of the point of allocation of this object,
* or null if unknown
*/
def getAllocatedFrom: Option[StackTrace] = Snapshot.instance.getSiteTrace(this)
/**
* Tell the visitor about all of the objects we refer to
*/
def visitReferencedObjects(visit : JavaHeapObject => Unit,includeStatics : Boolean = true) {
visit(getClazz)
}
private[model] def addReferenceFromRoot(r: Root) {
Snapshot.instance.addReferenceFromRoot(r, this)
}
/**
* Return the set of root references to this object.
*/
def getRootReferences: Set[Root] = Snapshot.instance.getRoots(this)
/**
* Given other, which the caller promises is in referers, determines if
* the reference is only a weak reference.
*/
def refersOnlyWeaklyTo(other: JavaHeapObject): Boolean = false
/**
* Describe the reference that this thing has to target. This will only
* be called if target is in the array returned by getChildrenForRootset.
*/
def describeReferenceTo(target: JavaHeapObject): List[String] =
if(getClazz == target)
List("instance")
else
List.empty
/**
* @return the size of this object, in bytes, including VM overhead
* @return the size of this object, in bytes, including VM overhead
*/
def size: Int
} | rorygraves/shelmet | src/main/scala/org/shelmet/heap/model/JavaHeapObject.scala | Scala | gpl-2.0 | 3,533 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.core.util
import scala.annotation.tailrec
import scala.collection.mutable
final class RichArrayBuffer[A](val underlying: mutable.ArrayBuffer[A]) extends AnyVal {
def inplaceSortBy[B](f: A โ B)(implicit ord: Ordering[B]): Unit = {
val buf = underlying.asInstanceOf[mutable.ArrayBuffer[AnyRef]]
val array = buf.toArray
java.util.Arrays.sort(array, ord.on(f).asInstanceOf[Ordering[AnyRef]])
buf.clear()
buf ++= array
()
}
def removeWhere(f: A โ Boolean): Unit = {
@tailrec def rec(ix: Int): Unit =
if (ix >= 0) {
if (f(underlying(ix))) underlying.remove(ix)
rec(ix - 1)
}
rec(underlying.size - 1)
}
def removeIfPresent(elem: A): Unit =
underlying.indexOf(elem) match {
case -1 โ
case ix โ { underlying.remove(ix); () }
}
}
| sirthias/swave | core/src/main/scala/swave/core/util/RichArrayBuffer.scala | Scala | mpl-2.0 | 1,049 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api
import java.io.File
import java.net.URI
import java.net.URL
import java.util.Properties
import java.time.Period
import java.time.temporal.TemporalAmount
import com.typesafe.config._
import play.twirl.api.utils.StringEscapeUtils
import play.utils.PlayIO
import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal
/**
* This object provides a set of operations to create `Configuration` values.
*
* For example, to load a `Configuration` in a running application:
* {{{
* val config = Configuration.load()
* val foo = config.getString("foo").getOrElse("boo")
* }}}
*
* The underlying implementation is provided by https://github.com/typesafehub/config.
*/
object Configuration {
def load(
classLoader: ClassLoader,
properties: Properties,
directSettings: Map[String, AnyRef],
allowMissingApplicationConf: Boolean
): Configuration = {
try {
// Iterating through the system properties is prone to ConcurrentModificationExceptions
// (such as in unit tests), which is why Typesafe config maintains a cache for it.
// So, if the passed in properties *are* the system properties, don't parse it ourselves.
val userDefinedProperties = if (properties eq System.getProperties) {
ConfigFactory.empty()
} else {
ConfigFactory.parseProperties(properties)
}
// Inject our direct settings into the config.
val directConfig: Config = ConfigFactory.parseMap(directSettings.asJava)
// Resolve application.conf
val applicationConfig: Config = {
def setting(key: String): Option[String] =
directSettings.get(key).orElse(Option(properties.getProperty(key))).map(_.toString)
// The additional config.resource/config.file logic exists because
// ConfigFactory.defaultApplication will blow up if those are defined but the file is missing
// despite "setAllowMissing" (see DefaultConfigLoadingStrategy).
// In DevMode this is relevant for config.resource as reloader.currentApplicationClassLoader
// is null at the start of 'run', so the application classpath isn't available, which means
// the resource will be missing. For consistency (and historic behaviour) do config.file too.
{
setting("config.resource").map(resource => ConfigFactory.parseResources(classLoader, resource))
}.orElse {
setting("config.file").map(fileName => ConfigFactory.parseFileAnySyntax(new File(fileName)))
}
.getOrElse {
val parseOptions = ConfigParseOptions.defaults
.setClassLoader(classLoader)
.setAllowMissing(allowMissingApplicationConf)
ConfigFactory.defaultApplication(parseOptions)
}
}
// Resolve another .conf file so that we can override values in Akka's
// reference.conf, but still make it possible for users to override
// Play's values in their application.conf.
val playOverridesConfig: Config =
ConfigFactory.parseResources(classLoader, "play/reference-overrides.conf")
// Combine all the config together into one big config
val combinedConfig: Config = Seq(
userDefinedProperties,
directConfig,
applicationConfig,
playOverridesConfig,
).reduceLeft(_.withFallback(_))
// Resolve settings. Among other things, the `play.server.dir` setting defined in directConfig will
// be substituted into the default settings in referenceConfig.
val resolvedConfig = ConfigFactory.load(classLoader, combinedConfig)
Configuration(resolvedConfig)
} catch {
case e: ConfigException => throw configError(e.getMessage, Option(e.origin), Some(e))
}
}
/**
* Load a new Configuration from the Environment.
*/
def load(environment: Environment, devSettings: Map[String, AnyRef]): Configuration = {
val allowMissingApplicationConf = environment.mode == Mode.Test
load(environment.classLoader, System.getProperties, devSettings, allowMissingApplicationConf)
}
/**
* Load a new Configuration from the Environment.
*/
def load(environment: Environment): Configuration = load(environment, Map.empty)
/**
* Returns an empty Configuration object.
*/
def empty = Configuration(ConfigFactory.empty())
/**
* Returns the reference configuration object.
*/
def reference = Configuration(ConfigFactory.defaultReference())
/**
* Create a new Configuration from the data passed as a Map.
*/
def from(data: Map[String, Any]): Configuration = {
def toJava(data: Any): Any = data match {
case map: Map[_, _] => map.mapValues(toJava).toMap.asJava
case iterable: Iterable[_] => iterable.map(toJava).asJava
case v => v
}
Configuration(ConfigFactory.parseMap(toJava(data).asInstanceOf[java.util.Map[String, AnyRef]]))
}
/**
* Create a new Configuration from the given key-value pairs.
*/
def apply(data: (String, Any)*): Configuration = from(data.toMap)
private[api] def configError(
message: String,
origin: Option[ConfigOrigin] = None,
e: Option[Throwable] = None
): PlayException = {
/*
The stable values here help us from putting a reference to a ConfigOrigin inside the anonymous ExceptionSource.
This is necessary to keep the Exception serializable, because ConfigOrigin is not serializable.
*/
val originLine = origin.map(_.lineNumber: java.lang.Integer).orNull
val originSourceName = origin.map(_.filename).orNull
val originUrlOpt = origin.flatMap(o => Option(o.url))
new PlayException.ExceptionSource("Configuration error", message, e.orNull) {
def line = originLine
def position = null
def input = originUrlOpt.map(PlayIO.readUrlAsString).orNull
def sourceName = originSourceName
override def toString = "Configuration error: " + getMessage
}
}
private[Configuration] val logger = Logger(getClass)
}
/**
* A full configuration set.
*
* The underlying implementation is provided by https://github.com/typesafehub/config.
*
* @param underlying the underlying Config implementation
*/
case class Configuration(underlying: Config) {
import Configuration.logger
private[play] def reportDeprecation(path: String, deprecated: String): Unit = {
val origin = underlying.getValue(deprecated).origin
logger.warn(s"${origin.description}: $deprecated is deprecated, use $path instead")
}
/**
* Merge two configurations. The second configuration overrides the first configuration.
* This is the opposite direction of `Config`'s `withFallback` method.
*/
@deprecated("Use withFallback instead", since = "2.8.0")
def ++(other: Configuration): Configuration = {
Configuration(other.underlying.withFallback(underlying))
}
/**
* Merge two configurations. The second configuration will act as the fallback for the first
* configuration.
*/
def withFallback(other: Configuration): Configuration = {
Configuration(underlying.withFallback(other.underlying))
}
/**
* Reads a value from the underlying implementation.
* If the value is not set this will return None, otherwise returns Some.
*
* Does not check neither for incorrect type nor null value, but catches and wraps the error.
*/
private def readValue[T](path: String, v: => T): Option[T] = {
try {
if (underlying.hasPathOrNull(path)) Some(v) else None
} catch {
case NonFatal(e) => throw reportError(path, e.getMessage, Some(e))
}
}
/**
* Check if the given path exists.
*/
def has(path: String): Boolean = underlying.hasPath(path)
/**
* Get the config at the given path.
*/
def get[A](path: String)(implicit loader: ConfigLoader[A]): A = {
loader.load(underlying, path)
}
/**
* Get the config at the given path and validate against a set of valid values.
*/
def getAndValidate[A](path: String, values: Set[A])(implicit loader: ConfigLoader[A]): A = {
val value = get(path)
if (!values(value)) {
throw reportError(path, s"Incorrect value, one of (${values.mkString(", ")}) was expected.")
}
value
}
/**
* Get a value that may either not exist or be null. Note that this is not generally considered idiomatic Config
* usage. Instead you should define all config keys in a reference.conf file.
*/
def getOptional[A](path: String)(implicit loader: ConfigLoader[A]): Option[A] = {
try {
if (underlying.hasPath(path)) Some(get[A](path)) else None
} catch {
case NonFatal(e) => throw reportError(path, e.getMessage, Some(e))
}
}
/**
* Get a prototyped sequence of objects.
*
* Each object in the sequence will fallback to the object loaded from prototype.\\$path.
*/
def getPrototypedSeq(path: String, prototypePath: String = "prototype.$path"): Seq[Configuration] = {
val prototype = underlying.getConfig(prototypePath.replace("$path", path))
get[Seq[Config]](path).map { config =>
Configuration(config.withFallback(prototype))
}
}
/**
* Get a prototyped map of objects.
*
* Each value in the map will fallback to the object loaded from prototype.\\$path.
*/
def getPrototypedMap(path: String, prototypePath: String = "prototype.$path"): Map[String, Configuration] = {
val prototype = if (prototypePath.isEmpty) {
underlying
} else {
underlying.getConfig(prototypePath.replace("$path", path))
}
get[Map[String, Config]](path).map {
case (key, config) => key -> Configuration(config.withFallback(prototype))
}
}
/**
* Get a deprecated configuration item.
*
* If the deprecated configuration item is defined, it will be returned, and a warning will be logged.
*
* Otherwise, the configuration from path will be looked up.
*/
def getDeprecated[A: ConfigLoader](path: String, deprecatedPaths: String*): A = {
deprecatedPaths
.collectFirst {
case deprecated if underlying.hasPath(deprecated) =>
reportDeprecation(path, deprecated)
get[A](deprecated)
}
.getOrElse {
get[A](path)
}
}
/**
* Get a deprecated configuration.
*
* If the deprecated configuration is defined, it will be returned, falling back to the new configuration, and a
* warning will be logged.
*
* Otherwise, the configuration from path will be looked up and used as is.
*/
def getDeprecatedWithFallback(path: String, deprecated: String, parent: String = ""): Configuration = {
val config = get[Config](path)
val merged = if (underlying.hasPath(deprecated)) {
reportDeprecation(path, deprecated)
get[Config](deprecated).withFallback(config)
} else config
Configuration(merged)
}
/**
* Retrieves a configuration value as `Milliseconds`.
*
* For example:
* {{{
* val configuration = Configuration.load()
* val timeout = configuration.getMillis("engine.timeout")
* }}}
*
* The configuration must be provided as:
*
* {{{
* engine.timeout = 1 second
* }}}
*/
def getMillis(path: String): Long = get[Duration](path).toMillis
/**
* Retrieves a configuration value as `Milliseconds`.
*
* For example:
* {{{
* val configuration = Configuration.load()
* val timeout = configuration.getNanos("engine.timeout")
* }}}
*
* The configuration must be provided as:
*
* {{{
* engine.timeout = 1 second
* }}}
*/
def getNanos(path: String): Long = get[Duration](path).toNanos
/**
* Returns available keys.
*
* For example:
* {{{
* val configuration = Configuration.load()
* val keys = configuration.keys
* }}}
*
* @return the set of keys available in this configuration
*/
def keys: Set[String] = underlying.entrySet.asScala.map(_.getKey).toSet
/**
* Returns sub-keys.
*
* For example:
* {{{
* val configuration = Configuration.load()
* val subKeys = configuration.subKeys
* }}}
*
* @return the set of direct sub-keys available in this configuration
*/
def subKeys: Set[String] = underlying.root().keySet().asScala.toSet
/**
* Returns every path as a set of key to value pairs, by recursively iterating through the
* config objects.
*/
def entrySet: Set[(String, ConfigValue)] = underlying.entrySet().asScala.map(e => e.getKey -> e.getValue).toSet
/**
* Creates a configuration error for a specific configuration key.
*
* For example:
* {{{
* val configuration = Configuration.load()
* throw configuration.reportError("engine.connectionUrl", "Cannot connect!")
* }}}
*
* @param path the configuration key, related to this error
* @param message the error message
* @param e the related exception
* @return a configuration exception
*/
def reportError(path: String, message: String, e: Option[Throwable] = None): PlayException = {
val origin = Option(if (underlying.hasPath(path)) underlying.getValue(path).origin else underlying.root.origin)
Configuration.configError(message, origin, e)
}
/**
* Creates a configuration error for this configuration.
*
* For example:
* {{{
* val configuration = Configuration.load()
* throw configuration.globalError("Missing configuration key: [yop.url]")
* }}}
*
* @param message the error message
* @param e the related exception
* @return a configuration exception
*/
def globalError(message: String, e: Option[Throwable] = None): PlayException = {
Configuration.configError(message, Option(underlying.root.origin), e)
}
}
/**
* A config loader
*/
trait ConfigLoader[A] { self =>
def load(config: Config, path: String = ""): A
def map[B](f: A => B): ConfigLoader[B] = (config, path) => f(self.load(config, path))
}
object ConfigLoader {
def apply[A](f: Config => String => A): ConfigLoader[A] = f(_)(_)
implicit val stringLoader: ConfigLoader[String] = ConfigLoader(_.getString)
implicit val seqStringLoader: ConfigLoader[Seq[String]] = ConfigLoader(_.getStringList).map(_.asScala.toSeq)
implicit val intLoader: ConfigLoader[Int] = ConfigLoader(_.getInt)
implicit val seqIntLoader: ConfigLoader[Seq[Int]] = ConfigLoader(_.getIntList).map(_.asScala.map(_.toInt).toSeq)
implicit val booleanLoader: ConfigLoader[Boolean] = ConfigLoader(_.getBoolean)
implicit val seqBooleanLoader: ConfigLoader[Seq[Boolean]] =
ConfigLoader(_.getBooleanList).map(_.asScala.map(_.booleanValue).toSeq)
implicit val finiteDurationLoader: ConfigLoader[FiniteDuration] =
ConfigLoader(_.getDuration).map(javaDurationToScala)
implicit val seqFiniteDurationLoader: ConfigLoader[Seq[FiniteDuration]] =
ConfigLoader(_.getDurationList).map(_.asScala.map(javaDurationToScala).toSeq)
implicit val durationLoader: ConfigLoader[Duration] = ConfigLoader { config => path =>
if (config.getIsNull(path)) Duration.Inf
else if (config.getString(path) == "infinite") Duration.Inf
else finiteDurationLoader.load(config, path)
}
// Note: this does not support null values but it added for convenience
implicit val seqDurationLoader: ConfigLoader[Seq[Duration]] =
seqFiniteDurationLoader.map(identity[Seq[Duration]])
implicit val periodLoader: ConfigLoader[Period] = ConfigLoader(_.getPeriod)
implicit val temporalLoader: ConfigLoader[TemporalAmount] = ConfigLoader(_.getTemporal)
implicit val doubleLoader: ConfigLoader[Double] = ConfigLoader(_.getDouble)
implicit val seqDoubleLoader: ConfigLoader[Seq[Double]] =
ConfigLoader(_.getDoubleList).map(_.asScala.map(_.doubleValue).toSeq)
implicit val numberLoader: ConfigLoader[Number] = ConfigLoader(_.getNumber)
implicit val seqNumberLoader: ConfigLoader[Seq[Number]] = ConfigLoader(_.getNumberList).map(_.asScala.toSeq)
implicit val longLoader: ConfigLoader[Long] = ConfigLoader(_.getLong)
implicit val seqLongLoader: ConfigLoader[Seq[Long]] =
ConfigLoader(_.getLongList).map(_.asScala.map(_.longValue).toSeq)
implicit val bytesLoader: ConfigLoader[ConfigMemorySize] = ConfigLoader(_.getMemorySize)
implicit val seqBytesLoader: ConfigLoader[Seq[ConfigMemorySize]] =
ConfigLoader(_.getMemorySizeList).map(_.asScala.toSeq)
implicit val configLoader: ConfigLoader[Config] = ConfigLoader(_.getConfig)
implicit val configListLoader: ConfigLoader[ConfigList] = ConfigLoader(_.getList)
implicit val configObjectLoader: ConfigLoader[ConfigObject] = ConfigLoader(_.getObject)
implicit val seqConfigLoader: ConfigLoader[Seq[Config]] = ConfigLoader(_.getConfigList).map(_.asScala.toSeq)
implicit val configurationLoader: ConfigLoader[Configuration] = configLoader.map(Configuration(_))
implicit val seqConfigurationLoader: ConfigLoader[Seq[Configuration]] = seqConfigLoader.map(_.map(Configuration(_)))
implicit val urlLoader: ConfigLoader[URL] = ConfigLoader(_.getString).map(new URL(_))
implicit val uriLoader: ConfigLoader[URI] = ConfigLoader(_.getString).map(new URI(_))
private def javaDurationToScala(javaDuration: java.time.Duration): FiniteDuration =
Duration.fromNanos(javaDuration.toNanos)
/**
* Loads a value, interpreting a null value as None and any other value as Some(value).
*/
implicit def optionLoader[A](implicit valueLoader: ConfigLoader[A]): ConfigLoader[Option[A]] =
(config, path) => if (config.getIsNull(path)) None else Some(valueLoader.load(config, path))
implicit def mapLoader[A](implicit valueLoader: ConfigLoader[A]): ConfigLoader[Map[String, A]] =
(config, path) => {
val obj = config.getObject(path)
val conf = obj.toConfig
obj
.keySet()
.iterator()
.asScala
.map { key =>
// quote and escape the key in case it contains dots or special characters
val path = "\\"" + StringEscapeUtils.escapeEcmaScript(key) + "\\""
key -> valueLoader.load(conf, path)
}
.toMap
}
}
| benmccann/playframework | core/play/src/main/scala/play/api/Configuration.scala | Scala | apache-2.0 | 18,291 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.MapOutputStatistics
import org.apache.spark.internal.Logging
import org.apache.spark.sql.execution.{CoalescedPartitionSpec, PartialReducerPartitionSpec, ShufflePartitionSpec}
object ShufflePartitionsUtil extends Logging {
final val SMALL_PARTITION_FACTOR = 0.2
final val MERGED_PARTITION_FACTOR = 1.2
/**
* Coalesce the partitions from multiple shuffles, either in their original states, or applied
* with skew handling partition specs. If called on partitions containing skew partition specs,
* this method will keep the skew partition specs intact and only coalesce the partitions outside
* the skew sections.
*
* This method will return an empty result if the shuffles have been coalesced already, or if
* they do not have the same number of partitions, or if the coalesced result is the same as the
* input partition layout.
*
* @return A sequence of sequence of [[ShufflePartitionSpec]]s, which each inner sequence as the
* new partition specs for its corresponding shuffle after coalescing. If Nil is returned,
* then no coalescing is applied.
*/
def coalescePartitions(
mapOutputStatistics: Seq[Option[MapOutputStatistics]],
inputPartitionSpecs: Seq[Option[Seq[ShufflePartitionSpec]]],
advisoryTargetSize: Long,
minNumPartitions: Int): Seq[Seq[ShufflePartitionSpec]] = {
assert(mapOutputStatistics.length == inputPartitionSpecs.length)
if (mapOutputStatistics.isEmpty) {
return Seq.empty
}
// If `minNumPartitions` is very large, it is possible that we need to use a value less than
// `advisoryTargetSize` as the target size of a coalesced task.
val totalPostShuffleInputSize = mapOutputStatistics.flatMap(_.map(_.bytesByPartitionId.sum)).sum
// The max at here is to make sure that when we have an empty table, we only have a single
// coalesced partition.
// There is no particular reason that we pick 16. We just need a number to prevent
// `maxTargetSize` from being set to 0.
val maxTargetSize = math.max(
math.ceil(totalPostShuffleInputSize / minNumPartitions.toDouble).toLong, 16)
val targetSize = math.min(maxTargetSize, advisoryTargetSize)
val shuffleIds = mapOutputStatistics.flatMap(_.map(_.shuffleId)).mkString(", ")
logInfo(s"For shuffle($shuffleIds), advisory target size: $advisoryTargetSize, " +
s"actual target size $targetSize.")
// If `inputPartitionSpecs` are all empty, it means skew join optimization is not applied.
if (inputPartitionSpecs.forall(_.isEmpty)) {
coalescePartitionsWithoutSkew(mapOutputStatistics, targetSize)
} else {
coalescePartitionsWithSkew(mapOutputStatistics, inputPartitionSpecs, targetSize)
}
}
private def coalescePartitionsWithoutSkew(
mapOutputStatistics: Seq[Option[MapOutputStatistics]],
targetSize: Long): Seq[Seq[ShufflePartitionSpec]] = {
// `ShuffleQueryStageExec#mapStats` returns None when the input RDD has 0 partitions,
// we should skip it when calculating the `partitionStartIndices`.
val validMetrics = mapOutputStatistics.flatten
val numShuffles = mapOutputStatistics.length
// If all input RDDs have 0 partition, we create an empty partition for every shuffle reader.
if (validMetrics.isEmpty) {
return Seq.fill(numShuffles)(Seq(CoalescedPartitionSpec(0, 0)))
}
// We may have different pre-shuffle partition numbers, don't reduce shuffle partition number
// in that case. For example when we union fully aggregated data (data is arranged to a single
// partition) and a result of a SortMergeJoin (multiple partitions).
if (validMetrics.map(_.bytesByPartitionId.length).distinct.length > 1) {
return Seq.empty
}
val numPartitions = validMetrics.head.bytesByPartitionId.length
val newPartitionSpecs = coalescePartitions(0, numPartitions, validMetrics, targetSize)
if (newPartitionSpecs.length < numPartitions) {
Seq.fill(numShuffles)(newPartitionSpecs)
} else {
Seq.empty
}
}
private def coalescePartitionsWithSkew(
mapOutputStatistics: Seq[Option[MapOutputStatistics]],
inputPartitionSpecs: Seq[Option[Seq[ShufflePartitionSpec]]],
targetSize: Long): Seq[Seq[ShufflePartitionSpec]] = {
// Do not coalesce if any of the map output stats are missing or if not all shuffles have
// partition specs, which should not happen in practice.
if (!mapOutputStatistics.forall(_.isDefined) || !inputPartitionSpecs.forall(_.isDefined)) {
logWarning("Could not apply partition coalescing because of missing MapOutputStatistics " +
"or shuffle partition specs.")
return Seq.empty
}
val validMetrics = mapOutputStatistics.map(_.get)
// Extract the start indices of each partition spec. Give invalid index -1 to unexpected
// partition specs. When we reach here, it means skew join optimization has been applied.
val partitionIndicesSeq = inputPartitionSpecs.map(_.get.map {
case CoalescedPartitionSpec(start, end) if start + 1 == end => start
case PartialReducerPartitionSpec(reducerId, _, _, _) => reducerId
case _ => -1 // invalid
})
// There should be no unexpected partition specs and the start indices should be identical
// across all different shuffles.
assert(partitionIndicesSeq.distinct.length == 1 && partitionIndicesSeq.head.forall(_ >= 0),
s"Invalid shuffle partition specs: $inputPartitionSpecs")
// The indices may look like [0, 1, 2, 2, 2, 3, 4, 4, 5], and the repeated `2` and `4` mean
// skewed partitions.
val partitionIndices = partitionIndicesSeq.head
// The fist index must be 0.
assert(partitionIndices.head == 0)
val newPartitionSpecsSeq = Seq.fill(mapOutputStatistics.length)(
ArrayBuffer.empty[ShufflePartitionSpec])
val numPartitions = partitionIndices.length
var i = 1
var start = 0
while (i < numPartitions) {
if (partitionIndices(i - 1) == partitionIndices(i)) {
// a skew section detected, starting from partition(i - 1).
val repeatValue = partitionIndices(i)
// coalesce any partitions before partition(i - 1) and after the end of latest skew section.
if (i - 1 > start) {
val partitionSpecs = coalescePartitions(
partitionIndices(start), repeatValue, validMetrics, targetSize)
newPartitionSpecsSeq.foreach(_ ++= partitionSpecs)
}
// find the end of this skew section, skipping partition(i - 1) and partition(i).
var repeatIndex = i + 1
while (repeatIndex < numPartitions && partitionIndices(repeatIndex) == repeatValue) {
repeatIndex += 1
}
// copy the partition specs in the skew section to the new partition specs.
newPartitionSpecsSeq.zip(inputPartitionSpecs).foreach { case (newSpecs, oldSpecs) =>
newSpecs ++= oldSpecs.get.slice(i - 1, repeatIndex)
}
// start from after the skew section
start = repeatIndex
i = repeatIndex
} else {
// Indices outside of the skew section should be larger than the previous one by 1.
assert(partitionIndices(i - 1) + 1 == partitionIndices(i))
// no skew section detected, advance to the next index.
i += 1
}
}
// coalesce any partitions after the end of last skew section.
if (numPartitions > start) {
val partitionSpecs = coalescePartitions(
partitionIndices(start), partitionIndices.last + 1, validMetrics, targetSize)
newPartitionSpecsSeq.foreach(_ ++= partitionSpecs)
}
// only return coalesced result if any coalescing has happened.
if (newPartitionSpecsSeq.head.length < numPartitions) {
newPartitionSpecsSeq.map(_.toSeq)
} else {
Seq.empty
}
}
/**
* Coalesce the partitions of [start, end) from multiple shuffles. This method assumes that all
* the shuffles have the same number of partitions, and the partitions of same index will be read
* together by one task.
*
* The strategy used to determine the number of coalesced partitions is described as follows.
* To determine the number of coalesced partitions, we have a target size for a coalesced
* partition. Once we have size statistics of all shuffle partitions, we will do
* a pass of those statistics and pack shuffle partitions with continuous indices to a single
* coalesced partition until adding another shuffle partition would cause the size of a
* coalesced partition to be greater than the target size.
*
* For example, we have two shuffles with the following partition size statistics:
* - shuffle 1 (5 partitions): [100 MiB, 20 MiB, 100 MiB, 10MiB, 30 MiB]
* - shuffle 2 (5 partitions): [10 MiB, 10 MiB, 70 MiB, 5 MiB, 5 MiB]
* Assuming the target size is 128 MiB, we will have 4 coalesced partitions, which are:
* - coalesced partition 0: shuffle partition 0 (size 110 MiB)
* - coalesced partition 1: shuffle partition 1 (size 30 MiB)
* - coalesced partition 2: shuffle partition 2 (size 170 MiB)
* - coalesced partition 3: shuffle partition 3 and 4 (size 50 MiB)
*
* @return A sequence of [[CoalescedPartitionSpec]]s. For example, if partitions [0, 1, 2, 3, 4]
* split at indices [0, 2, 3], the returned partition specs will be:
* CoalescedPartitionSpec(0, 2), CoalescedPartitionSpec(2, 3) and
* CoalescedPartitionSpec(3, 5).
*/
private def coalescePartitions(
start: Int,
end: Int,
mapOutputStatistics: Seq[MapOutputStatistics],
targetSize: Long): Seq[CoalescedPartitionSpec] = {
val partitionSpecs = ArrayBuffer.empty[CoalescedPartitionSpec]
var coalescedSize = 0L
var i = start
var latestSplitPoint = i
def createPartitionSpec(forceCreate: Boolean = false): Unit = {
// Skip empty inputs, as it is a waste to launch an empty task.
if (coalescedSize > 0 || forceCreate) {
partitionSpecs += CoalescedPartitionSpec(latestSplitPoint, i)
}
}
while (i < end) {
// We calculate the total size of i-th shuffle partitions from all shuffles.
var totalSizeOfCurrentPartition = 0L
var j = 0
while (j < mapOutputStatistics.length) {
totalSizeOfCurrentPartition += mapOutputStatistics(j).bytesByPartitionId(i)
j += 1
}
// If including the `totalSizeOfCurrentPartition` would exceed the target size, then start a
// new coalesced partition.
if (i > latestSplitPoint && coalescedSize + totalSizeOfCurrentPartition > targetSize) {
createPartitionSpec()
latestSplitPoint = i
// reset postShuffleInputSize.
coalescedSize = totalSizeOfCurrentPartition
} else {
coalescedSize += totalSizeOfCurrentPartition
}
i += 1
}
// Create at least one partition if all partitions are empty.
createPartitionSpec(partitionSpecs.isEmpty)
partitionSpecs.toSeq
}
/**
* Given a list of size, return an array of indices to split the list into multiple partitions,
* so that the size sum of each partition is close to the target size. Each index indicates the
* start of a partition.
*/
def splitSizeListByTargetSize(sizes: Seq[Long], targetSize: Long): Array[Int] = {
val partitionStartIndices = ArrayBuffer[Int]()
partitionStartIndices += 0
var i = 0
var currentPartitionSize = 0L
var lastPartitionSize = -1L
def tryMergePartitions() = {
// When we are going to start a new partition, it's possible that the current partition or
// the previous partition is very small and it's better to merge the current partition into
// the previous partition.
val shouldMergePartitions = lastPartitionSize > -1 &&
((currentPartitionSize + lastPartitionSize) < targetSize * MERGED_PARTITION_FACTOR ||
(currentPartitionSize < targetSize * SMALL_PARTITION_FACTOR ||
lastPartitionSize < targetSize * SMALL_PARTITION_FACTOR))
if (shouldMergePartitions) {
// We decide to merge the current partition into the previous one, so the start index of
// the current partition should be removed.
partitionStartIndices.remove(partitionStartIndices.length - 1)
lastPartitionSize += currentPartitionSize
} else {
lastPartitionSize = currentPartitionSize
}
}
while (i < sizes.length) {
// If including the next size in the current partition exceeds the target size, package the
// current partition and start a new partition.
if (i > 0 && currentPartitionSize + sizes(i) > targetSize) {
tryMergePartitions()
partitionStartIndices += i
currentPartitionSize = sizes(i)
} else {
currentPartitionSize += sizes(i)
}
i += 1
}
tryMergePartitions()
partitionStartIndices.toArray
}
}
| cloud-fan/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/ShufflePartitionsUtil.scala | Scala | apache-2.0 | 13,867 |
/*
* Copyright 2015 MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import com.mongodb.async.SingleResultCallback
import com.mongodb.async.client.MapReduceIterable
import com.mongodb.client.model.MapReduceAction
import org.mongodb.scala.bson.conversions.Bson
import org.mongodb.scala.internal.ObservableHelper._
import org.mongodb.scala.model.Collation
/**
* Observable for map reduce.
*
* @define docsRef http://docs.mongodb.org/manual/reference
*
* @tparam TResult The type of the result.
* @since 1.0
*/
case class MapReduceObservable[TResult](wrapped: MapReduceIterable[TResult]) extends Observable[TResult] {
/**
* Sets the collectionName for the output of the MapReduce
*
* <p>The default action is replace the collection if it exists, to change this use [[action]].</p>
*
* @param collectionName the name of the collection that you want the map-reduce operation to write its output.
* @return this
*/
def collectionName(collectionName: String): MapReduceObservable[TResult] = {
wrapped.collectionName(collectionName)
this
}
/**
* Sets the JavaScript function that follows the reduce method and modifies the output.
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce#mapreduce-finalize-cmd Requirements for the finalize Function]]
* @param finalizeFunction the JavaScript function that follows the reduce method and modifies the output.
* @return this
*/
def finalizeFunction(finalizeFunction: String): MapReduceObservable[TResult] = {
wrapped.finalizeFunction(finalizeFunction)
this
}
/**
* Sets the global variables that are accessible in the map, reduce and finalize functions.
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce mapReduce]]
* @param scope the global variables that are accessible in the map, reduce and finalize functions.
* @return this
*/
def scope(scope: Bson): MapReduceObservable[TResult] = {
wrapped.scope(scope)
this
}
/**
* Sets the sort criteria to apply to the query.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.sort/ Sort]]
* @param sort the sort criteria, which may be null.
* @return this
*/
def sort(sort: Bson): MapReduceObservable[TResult] = {
wrapped.sort(sort)
this
}
/**
* Sets the query filter to apply to the query.
*
* [[http://docs.mongodb.org/manual/reference/method/db.collection.find/ Filter]]
* @param filter the filter to apply to the query.
* @return this
*/
def filter(filter: Bson): MapReduceObservable[TResult] = {
wrapped.filter(filter)
this
}
/**
* Sets the limit to apply.
*
* [[http://docs.mongodb.org/manual/reference/method/cursor.limit/#cursor.limit Limit]]
* @param limit the limit, which may be null
* @return this
*/
def limit(limit: Int): MapReduceObservable[TResult] = {
wrapped.limit(limit)
this
}
/**
* Sets the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and reduce
* functions. Defaults to false.
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce mapReduce]]
* @param jsMode the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and
* reduce functions
* @return jsMode
*/
def jsMode(jsMode: Boolean): MapReduceObservable[TResult] = {
wrapped.jsMode(jsMode)
this
}
/**
* Sets whether to include the timing information in the result information.
*
* @param verbose whether to include the timing information in the result information.
* @return this
*/
def verbose(verbose: Boolean): MapReduceObservable[TResult] = {
wrapped.verbose(verbose)
this
}
/**
* Sets the maximum execution time on the server for this operation.
*
* [[http://docs.mongodb.org/manual/reference/operator/meta/maxTimeMS/ Max Time]]
* @param duration the duration
* @return this
*/
def maxTime(duration: Duration): MapReduceObservable[TResult] = {
wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS)
this
}
/**
* Specify the `MapReduceAction` to be used when writing to a collection.
*
* @param action an [[model.MapReduceAction]] to perform on the collection
* @return this
*/
def action(action: MapReduceAction): MapReduceObservable[TResult] = {
wrapped.action(action)
this
}
/**
* Sets the name of the database to output into.
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce#output-to-a-collection-with-an-action output with an action]]
* @param databaseName the name of the database to output into.
* @return this
*/
def databaseName(databaseName: String): MapReduceObservable[TResult] = {
wrapped.databaseName(databaseName)
this
}
/**
* Sets if the output database is sharded
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce#output-to-a-collection-with-an-action output with an action]]
* @param sharded if the output database is sharded
* @return this
*/
def sharded(sharded: Boolean): MapReduceObservable[TResult] = {
wrapped.sharded(sharded)
this
}
/**
* Sets if the post-processing step will prevent MongoDB from locking the database.
*
* Valid only with the `MapReduceAction.MERGE` or `MapReduceAction.REDUCE` actions.
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce/#output-to-a-collection-with-an-action Output with an action]]
* @param nonAtomic if the post-processing step will prevent MongoDB from locking the database.
* @return this
*/
def nonAtomic(nonAtomic: Boolean): MapReduceObservable[TResult] = {
wrapped.nonAtomic(nonAtomic)
this
}
/**
* Sets the bypass document level validation flag.
*
* '''Note:''': This only applies when an `\\$out` stage is specified.
*
* [[http://docs.mongodb.org/manual/reference/command/mapReduce#output-to-a-collection-with-an-action output with an action]]
*
* @note Requires MongoDB 3.2 or greater
* @param bypassDocumentValidation If true, allows the write to opt-out of document level validation.
* @return this
* @since 1.1
*/
def bypassDocumentValidation(bypassDocumentValidation: Boolean): MapReduceObservable[TResult] = {
wrapped.bypassDocumentValidation(bypassDocumentValidation)
this
}
/**
* Sets the collation options
*
* @param collation the collation options to use
* @return this
* @since 1.2
* @note A null value represents the server default.
* @note Requires MongoDB 3.4 or greater
*/
def collation(collation: Collation): MapReduceObservable[TResult] = {
wrapped.collation(collation)
this
}
/**
* Aggregates documents to a collection according to the specified map-reduce function with the given options, which must specify a
* non-inline result.
*
* @return a Observable with a single element indicating when the operation has completed
* [[http://docs.mongodb.org/manual/aggregation/ Aggregation]]
*/
def toCollection(): Observable[Completed] = observeCompleted(wrapped.toCollection(_: SingleResultCallback[Void]))
override def subscribe(observer: Observer[_ >: TResult]): Unit = observe(wrapped).subscribe(observer)
}
| jCalamari/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/MapReduceObservable.scala | Scala | apache-2.0 | 7,976 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import kafka.api.ApiUtils._
import kafka.common.{TopicAndPartition, OffsetMetadataAndError}
import kafka.utils.Logging
object OffsetFetchResponse extends Logging {
val CurrentVersion: Short = 0
val DefaultClientId = ""
def readFrom(buffer: ByteBuffer): OffsetFetchResponse = {
// Read values from the envelope
val correlationId = buffer.getInt
val clientId = readShortString(buffer)
// Read the OffsetResponse
val topicCount = buffer.getInt
val pairs = (1 to topicCount).flatMap(_ => {
val topic = readShortString(buffer)
val partitionCount = buffer.getInt
(1 to partitionCount).map(_ => {
val partitionId = buffer.getInt
val offset = buffer.getLong
val metadata = readShortString(buffer)
val error = buffer.getShort
(TopicAndPartition(topic, partitionId), OffsetMetadataAndError(offset, metadata, error))
})
})
OffsetFetchResponse(Map(pairs:_*), correlationId, clientId)
}
}
case class OffsetFetchResponse(requestInfo: Map[TopicAndPartition, OffsetMetadataAndError],
override val correlationId: Int = 0,
clientId: String = OffsetFetchResponse.DefaultClientId)
extends RequestOrResponse(correlationId = correlationId) {
lazy val requestInfoGroupedByTopic = requestInfo.groupBy(_._1.topic)
def writeTo(buffer: ByteBuffer) {
// Write envelope
buffer.putInt(correlationId)
writeShortString(buffer, clientId)
// Write OffsetFetchResponse
buffer.putInt(requestInfoGroupedByTopic.size) // number of topics
requestInfoGroupedByTopic.foreach( t1 => { // topic -> Map[TopicAndPartition, OffsetMetadataAndError]
writeShortString(buffer, t1._1) // topic
buffer.putInt(t1._2.size) // number of partitions for this topic
t1._2.foreach( t2 => { // TopicAndPartition -> OffsetMetadataAndError
buffer.putInt(t2._1.partition)
buffer.putLong(t2._2.offset)
writeShortString(buffer, t2._2.metadata)
buffer.putShort(t2._2.error)
})
})
}
override def sizeInBytes =
4 + /* correlationId */
shortStringLength(clientId) +
4 + /* topic count */
requestInfoGroupedByTopic.foldLeft(0)((count, topicAndOffsets) => {
val (topic, offsets) = topicAndOffsets
count +
shortStringLength(topic) + /* topic */
4 + /* number of partitions */
offsets.foldLeft(0)((innerCount, offsetsAndMetadata) => {
innerCount +
4 /* partition */ +
8 /* offset */ +
shortStringLength(offsetsAndMetadata._2.metadata) +
2 /* error */
})
})
}
| akosiaris/kafka | core/src/main/scala/kafka/api/OffsetFetchResponse.scala | Scala | apache-2.0 | 3,513 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import org.apache.spark.annotation.{DeveloperApi, Unstable}
import org.apache.spark.deploy.k8s.KubernetesDriverConf
/**
* :: DeveloperApi ::
*
* A base interface to help user extend custom feature step in driver side.
* Note: If your custom feature step would be used only in driver or both in driver and executor,
* please use this.
*
* Example of driver feature step:
*
* {{{
* class DriverExampleFeatureStep extends KubernetesDriverCustomFeatureConfigStep {
* private var driverConf: KubernetesDriverConf = _
*
* override def init(conf: KubernetesDriverConf): Unit = {
* driverConf = conf
* }
*
* // Implements methods of `KubernetesFeatureConfigStep`, such as `configurePod`
* override def configurePod(pod: SparkPod): SparkPod = {
* // Apply modifications on the given pod in accordance to this feature.
* }
* }
* }}}
*
* Example of feature step for both driver and executor:
*
* {{{
* class DriverAndExecutorExampleFeatureStep extends KubernetesDriverCustomFeatureConfigStep
* with KubernetesExecutorCustomFeatureConfigStep {
* private var kubernetesConf: KubernetesConf = _
*
* override def init(conf: KubernetesDriverConf): Unit = {
* kubernetesConf = conf
* }
*
* override def init(conf: KubernetesExecutorConf): Unit = {
* kubernetesConf = conf
* }
*
* // Implements methods of `KubernetesFeatureConfigStep`, such as `configurePod`
* override def configurePod(pod: SparkPod): SparkPod = {
* // Apply modifications on the given pod in accordance to this feature.
* }
* }
* }}}
*/
@Unstable
@DeveloperApi
trait KubernetesDriverCustomFeatureConfigStep extends KubernetesFeatureConfigStep {
/**
* Initialize the configuration for driver user feature step, this only applicable when user
* specified `spark.kubernetes.driver.pod.featureSteps`, the init will be called after feature
* step loading.
*/
def init(config: KubernetesDriverConf): Unit
}
| ueshin/apache-spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/features/KubernetesDriverCustomFeatureConfigStep.scala | Scala | apache-2.0 | 2,861 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.plan.{RelOptCluster, RelOptCost, RelOptPlanner, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.flink.api.common.functions.GroupReduceFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.DataSet
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.BatchTableEnvironment
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.AggregationCodeGenerator
import org.apache.flink.table.plan.nodes.CommonAggregate
import org.apache.flink.table.runtime.aggregate.{AggregateUtil, DataSetPreAggFunction}
import org.apache.flink.table.runtime.aggregate.AggregateUtil.CalcitePair
import org.apache.flink.types.Row
/**
* Flink RelNode which matches along with a LogicalAggregate.
*/
class DataSetAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputNode: RelNode,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
rowRelDataType: RelDataType,
inputType: RelDataType,
grouping: Array[Int],
inGroupingSet: Boolean)
extends SingleRel(cluster, traitSet, inputNode) with CommonAggregate with DataSetRel {
override def deriveRowType(): RelDataType = rowRelDataType
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataSetAggregate(
cluster,
traitSet,
inputs.get(0),
namedAggregates,
getRowType,
inputType,
grouping,
inGroupingSet)
}
override def toString: String = {
s"Aggregate(${
if (!grouping.isEmpty) {
s"groupBy: (${groupingToString(inputType, grouping)}), "
} else {
""
}
}select: (${aggregationToString(inputType, grouping, getRowType, namedAggregates, Nil)}))"
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.itemIf("groupBy", groupingToString(inputType, grouping), !grouping.isEmpty)
.item("select", aggregationToString(inputType, grouping, getRowType, namedAggregates, Nil))
}
override def computeSelfCost(planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val child = this.getInput
val rowCnt = metadata.getRowCount(child)
val rowSize = this.estimateRowSize(child.getRowType)
val aggCnt = this.namedAggregates.size
planner.getCostFactory.makeCost(rowCnt, rowCnt * aggCnt, rowCnt * rowSize)
}
override def translateToPlan(tableEnv: BatchTableEnvironment): DataSet[Row] = {
val input = inputNode.asInstanceOf[DataSetRel]
val inputDS = input.translateToPlan(tableEnv)
val rowTypeInfo = FlinkTypeFactory.toInternalRowTypeInfo(getRowType).asInstanceOf[RowTypeInfo]
val generator = new AggregationCodeGenerator(
tableEnv.getConfig,
false,
inputDS.getType)
val (
preAgg: Option[DataSetPreAggFunction],
preAggType: Option[TypeInformation[Row]],
finalAgg: GroupReduceFunction[Row, Row]
) = AggregateUtil.createDataSetAggregateFunctions(
generator,
namedAggregates,
input.getRowType,
inputDS.getType.asInstanceOf[RowTypeInfo].getFieldTypes,
rowRelDataType,
grouping,
inGroupingSet)
val aggString = aggregationToString(inputType, grouping, getRowType, namedAggregates, Nil)
if (grouping.length > 0) {
// grouped aggregation
val aggOpName = s"groupBy: (${groupingToString(inputType, grouping)}), " +
s"select: ($aggString)"
if (preAgg.isDefined) {
inputDS
// pre-aggregation
.groupBy(grouping: _*)
.combineGroup(preAgg.get)
.returns(preAggType.get)
.name(aggOpName)
// final aggregation
.groupBy(grouping.indices: _*)
.reduceGroup(finalAgg)
.returns(rowTypeInfo)
.name(aggOpName)
} else {
inputDS
.groupBy(grouping: _*)
.reduceGroup(finalAgg)
.returns(rowTypeInfo)
.name(aggOpName)
}
}
else {
// global aggregation
val aggOpName = s"select:($aggString)"
if (preAgg.isDefined) {
inputDS
// pre-aggregation
.mapPartition(preAgg.get)
.returns(preAggType.get)
.name(aggOpName)
// final aggregation
.reduceGroup(finalAgg)
.returns(rowTypeInfo)
.name(aggOpName)
} else {
inputDS
.reduceGroup(finalAgg)
.returns(rowTypeInfo)
.name(aggOpName)
}
}
}
}
| zohar-mizrahi/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetAggregate.scala | Scala | apache-2.0 | 5,634 |
package debop4s.data.slick
// NOTE : SlickExampleDatabase._ ์ SlickExampleDatabase.driver.simple._ ์ ๊ผญ import ํด์ค์ผ ํฉ๋๋ค.
import debop4s.data.slick.SlickExampleDatabase._
import debop4s.data.slick.SlickExampleDatabase.driver.simple._
import scala.util.Try
/**
* SlickComponentFunSuite
* @author [email protected] 15. 3. 22.
*/
class SlickExampleDatabaseFunSuite extends AbstractSlickFunSuite {
class CodeT(tag: Tag) extends Table[(Int, String, String)](tag, "implicits_codes") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name", O.NotNull, O.Length(254, varying = true))
def value = column[String]("value", O.NotNull, O.Length(254, varying = true))
def * = (id, name, value)
}
// ์๋์ Codes ๋ณ์์ ๊ฐ์ ๊ธฐ๋ฅ์ ์ํํ๋ค.
object CodeRepository extends TableQuery(new CodeT(_)) {
lazy val byName = this.findBy(_.name)
}
lazy val Codes = TableQuery[CodeT]
lazy val ranges = Range(0, 5000)
override def beforeAll(): Unit = {
super.beforeAll()
withTransaction { implicit session =>
Try { CodeRepository.ddl.drop }
}
withTransaction { implicit session =>
CodeRepository.ddl.create
}
}
override def afterAll(): Unit = {
withTransaction { implicit session =>
Try { CodeRepository.ddl.drop }
}
super.afterAll()
}
private def insertSamples(): Unit = {
// 100๊ฐ ๋จ์๋ก ๋๋ Insert๋ฅผ ์ํํฉ๋๋ค.
ranges.grouped(100).toSeq.par.foreach { is =>
withTransaction { implicit session =>
CodeRepository.map(c => (c.name, c.value)) ++= is.map(i => (s"name-$i", s"value-$i")).toSet
}
}
}
test("with transaction") {
// 100๊ฐ ๋จ์๋ก ๋๋ Insert๋ฅผ ์ํํฉ๋๋ค.
ranges.grouped(100).toSeq.par.foreach { is =>
withTransaction { implicit session =>
CodeRepository.map(c => (c.name, c.value)) ++= is.map(i => (s"name-$i", s"value-$i")).toSet
}
}
}
test("with dynamic transaction") {
// 100๊ฐ ๋จ์๋ก ๋๋ Insert๋ฅผ ์ํํฉ๋๋ค.
ranges.grouped(100).toSeq.par.foreach { is =>
withDynTransaction { implicit session =>
CodeRepository.map(c => (c.name, c.value)) ++= is.map(i => (s"name-$i", s"value-$i")).toSet
}
}
}
test("with rollback") {
// 100๊ฐ ๋จ์๋ก ๋๋ Insert๋ฅผ ์ํํฉ๋๋ค.
ranges.grouped(100).toSeq.par.foreach { is =>
withRollback { implicit session =>
CodeRepository.map(c => (c.name, c.value)) ++= is.map(i => (s"name-$i", s"value-$i")).toSet
}
}
}
test("with dynamic rollback") {
// 100๊ฐ ๋จ์๋ก ๋๋ Insert๋ฅผ ์ํํฉ๋๋ค.
ranges.grouped(100).toSeq.par.foreach { is =>
withDynRollback { implicit session =>
CodeRepository.map(c => (c.name, c.value)) ++= is.map(i => (s"name-$i", s"value-$i")).toSet
}
}
}
test("with readonly") {
insertSamples()
ranges.grouped(100).toSeq.par.foreach { is =>
withReadOnly { implicit session =>
val codes = CodeRepository.filter(_.id inSet is.toSet).run.toSet
codes.foreach(x => LOG.debug(x.toString))
}
}
}
test("with dynamic readonly") {
insertSamples()
ranges.grouped(100).toSeq.par.foreach { is =>
withDynReadOnly { implicit session =>
val codes = CodeRepository.filter(_.id inSet is.toSet).run.toSet
codes.foreach(x => LOG.debug(x.toString))
}
}
}
}
| debop/debop4s | debop4s-data-slick/src/test/scala/debop4s/data/slick/SlickExampleDatabaseFunSuite.scala | Scala | apache-2.0 | 3,480 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.lang._
import leon.lang.synthesis._
import leon.annotation._
object Complete {
sealed abstract class List
case class Cons(head: BigInt, tail: List) extends List
case object Nil extends List
def size(l: List) : BigInt = (l match {
case Nil => 0
case Cons(_, t) => 1 + size(t)
}) ensuring(res => res >= 0)
def content(l: List): Set[BigInt] = l match {
case Nil => Set.empty[BigInt]
case Cons(i, t) => Set(i) ++ content(t)
}
def isSorted(list : List) : Boolean = list match {
case Nil => true
case Cons(_, Nil) => true
case Cons(x1, Cons(x2, _)) if(x1 > x2) => false
case Cons(_, xs) => isSorted(xs)
}
def delete(in1: List, v: BigInt): List = {
require(isSorted(in1))
in1 match {
case Cons(h,t) =>
if (h < v) {
Cons(h, delete(t, v))
} else if (h == v) {
delete(t, v)
} else {
in1
}
case Nil =>
Nil
}
} ensuring { res => content(res) == content(in1) -- Set(v) && isSorted(res) }
def diff(in1: List, in2: List) = {
require(isSorted(in1) && isSorted(in2))
choose { (out: List) =>
(content(out) == content(in1) -- content(in2)) && isSorted(out)
}
}
}
| regb/leon | src/test/resources/regression/synthesis/SortedList/Diff.scala | Scala | gpl-3.0 | 1,279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.