code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import java.util.UUID
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scala.language.existentials
import org.apache.hadoop.fs.Path
import org.json4s.{DefaultFormats, JObject, _}
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkContext
import org.apache.spark.annotation.Since
import org.apache.spark.ml._
import org.apache.spark.ml.attribute._
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.{Param, ParamMap, ParamPair, Params}
import org.apache.spark.ml.param.shared.{HasParallelism, HasWeightCol}
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.ThreadUtils
private[ml] trait ClassifierTypeTrait {
// scalastyle:off structural.type
type ClassifierType = Classifier[F, E, M] forSome {
type F
type M <: ClassificationModel[F, M]
type E <: Classifier[F, E, M]
}
// scalastyle:on structural.type
}
/**
* Params for [[OneVsRest]].
*/
private[ml] trait OneVsRestParams extends ClassifierParams
with ClassifierTypeTrait with HasWeightCol {
/**
* param for the base binary classifier that we reduce multiclass classification into.
* The base classifier input and output columns are ignored in favor of
* the ones specified in [[OneVsRest]].
* @group param
*/
val classifier: Param[ClassifierType] = new Param(this, "classifier", "base binary classifier")
/** @group getParam */
def getClassifier: ClassifierType = $(classifier)
}
private[ml] object OneVsRestParams extends ClassifierTypeTrait {
def validateParams(instance: OneVsRestParams): Unit = {
def checkElement(elem: Params, name: String): Unit = elem match {
case stage: MLWritable => // good
case other =>
throw new UnsupportedOperationException("OneVsRest write will fail " +
s" because it contains $name which does not implement MLWritable." +
s" Non-Writable $name: ${other.uid} of type ${other.getClass}")
}
instance match {
case ovrModel: OneVsRestModel => ovrModel.models.foreach(checkElement(_, "model"))
case _ => // no need to check OneVsRest here
}
checkElement(instance.getClassifier, "classifier")
}
def saveImpl(
path: String,
instance: OneVsRestParams,
sc: SparkContext,
extraMetadata: Option[JObject] = None): Unit = {
val params = instance.extractParamMap().toSeq
val jsonParams = render(params
.filter { case ParamPair(p, v) => p.name != "classifier" }
.map { case ParamPair(p, v) => p.name -> parse(p.jsonEncode(v)) }
.toList)
DefaultParamsWriter.saveMetadata(instance, path, sc, extraMetadata, Some(jsonParams))
val classifierPath = new Path(path, "classifier").toString
instance.getClassifier.asInstanceOf[MLWritable].save(classifierPath)
}
def loadImpl(
path: String,
sc: SparkContext,
expectedClassName: String): (DefaultParamsReader.Metadata, ClassifierType) = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, expectedClassName)
val classifierPath = new Path(path, "classifier").toString
val estimator = DefaultParamsReader.loadParamsInstance[ClassifierType](classifierPath, sc)
(metadata, estimator)
}
}
/**
* Model produced by [[OneVsRest]].
* This stores the models resulting from training k binary classifiers: one for each class.
* Each example is scored against all k models, and the model with the highest score
* is picked to label the example.
*
* @param labelMetadata Metadata of label column if it exists, or Nominal attribute
* representing the number of classes in training dataset otherwise.
* @param models The binary classification models for the reduction.
* The i-th model is produced by testing the i-th class (taking label 1) vs the rest
* (taking label 0).
*/
@Since("1.4.0")
final class OneVsRestModel private[ml] (
@Since("1.4.0") override val uid: String,
private[ml] val labelMetadata: Metadata,
@Since("1.4.0") val models: Array[_ <: ClassificationModel[_, _]])
extends Model[OneVsRestModel] with OneVsRestParams with MLWritable {
require(models.nonEmpty, "OneVsRestModel requires at least one model for one class")
@Since("2.4.0")
val numClasses: Int = models.length
@Since("2.4.0")
val numFeatures: Int = models.head.numFeatures
/** @group setParam */
@Since("2.1.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.1.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.4.0")
def setRawPredictionCol(value: String): this.type = set(rawPredictionCol, value)
@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema, fitting = false, getClassifier.featuresDataType)
}
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
// Check schema
transformSchema(dataset.schema, logging = true)
// determine the input columns: these need to be passed through
val origCols = dataset.schema.map(f => col(f.name))
// add an accumulator column to store predictions of all the models
val accColName = "mbc$acc" + UUID.randomUUID().toString
val initUDF = udf { () => Map[Int, Double]() }
val newDataset = dataset.withColumn(accColName, initUDF())
// persist if underlying dataset is not persistent.
val handlePersistence = !dataset.isStreaming && dataset.storageLevel == StorageLevel.NONE
if (handlePersistence) {
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
}
// update the accumulator column with the result of prediction of models
val aggregatedDataset = models.zipWithIndex.foldLeft[DataFrame](newDataset) {
case (df, (model, index)) =>
val rawPredictionCol = model.getRawPredictionCol
val columns = origCols ++ List(col(rawPredictionCol), col(accColName))
// add temporary column to store intermediate scores and update
val tmpColName = "mbc$tmp" + UUID.randomUUID().toString
val updateUDF = udf { (predictions: Map[Int, Double], prediction: Vector) =>
predictions + ((index, prediction(1)))
}
model.setFeaturesCol($(featuresCol))
val transformedDataset = model.transform(df).select(columns: _*)
val updatedDataset = transformedDataset
.withColumn(tmpColName, updateUDF(col(accColName), col(rawPredictionCol)))
val newColumns = origCols ++ List(col(tmpColName))
// switch out the intermediate column with the accumulator column
updatedDataset.select(newColumns: _*).withColumnRenamed(tmpColName, accColName)
}
if (handlePersistence) {
newDataset.unpersist()
}
if (getRawPredictionCol != "") {
val numClass = models.length
// output the RawPrediction as vector
val rawPredictionUDF = udf { (predictions: Map[Int, Double]) =>
val predArray = Array.fill[Double](numClass)(0.0)
predictions.foreach { case (idx, value) => predArray(idx) = value }
Vectors.dense(predArray)
}
// output the index of the classifier with highest confidence as prediction
val labelUDF = udf { (rawPredictions: Vector) => rawPredictions.argmax.toDouble }
// output confidence as raw prediction, label and label metadata as prediction
aggregatedDataset
.withColumn(getRawPredictionCol, rawPredictionUDF(col(accColName)))
.withColumn(getPredictionCol, labelUDF(col(getRawPredictionCol)), labelMetadata)
.drop(accColName)
} else {
// output the index of the classifier with highest confidence as prediction
val labelUDF = udf { (predictions: Map[Int, Double]) =>
predictions.maxBy(_._2)._1.toDouble
}
// output label and label metadata as prediction
aggregatedDataset
.withColumn(getPredictionCol, labelUDF(col(accColName)), labelMetadata)
.drop(accColName)
}
}
@Since("1.4.1")
override def copy(extra: ParamMap): OneVsRestModel = {
val copied = new OneVsRestModel(
uid, labelMetadata, models.map(_.copy(extra).asInstanceOf[ClassificationModel[_, _]]))
copyValues(copied, extra).setParent(parent)
}
@Since("2.0.0")
override def write: MLWriter = new OneVsRestModel.OneVsRestModelWriter(this)
}
@Since("2.0.0")
object OneVsRestModel extends MLReadable[OneVsRestModel] {
@Since("2.0.0")
override def read: MLReader[OneVsRestModel] = new OneVsRestModelReader
@Since("2.0.0")
override def load(path: String): OneVsRestModel = super.load(path)
/** [[MLWriter]] instance for [[OneVsRestModel]] */
private[OneVsRestModel] class OneVsRestModelWriter(instance: OneVsRestModel) extends MLWriter {
OneVsRestParams.validateParams(instance)
override protected def saveImpl(path: String): Unit = {
val extraJson = ("labelMetadata" -> instance.labelMetadata.json) ~
("numClasses" -> instance.models.length)
OneVsRestParams.saveImpl(path, instance, sc, Some(extraJson))
instance.models.map(_.asInstanceOf[MLWritable]).zipWithIndex.foreach { case (model, idx) =>
val modelPath = new Path(path, s"model_$idx").toString
model.save(modelPath)
}
}
}
private class OneVsRestModelReader extends MLReader[OneVsRestModel] {
/** Checked against metadata when loading model */
private val className = classOf[OneVsRestModel].getName
override def load(path: String): OneVsRestModel = {
implicit val format = DefaultFormats
val (metadata, classifier) = OneVsRestParams.loadImpl(path, sc, className)
val labelMetadata = Metadata.fromJson((metadata.metadata \\ "labelMetadata").extract[String])
val numClasses = (metadata.metadata \\ "numClasses").extract[Int]
val models = Range(0, numClasses).toArray.map { idx =>
val modelPath = new Path(path, s"model_$idx").toString
DefaultParamsReader.loadParamsInstance[ClassificationModel[_, _]](modelPath, sc)
}
val ovrModel = new OneVsRestModel(metadata.uid, labelMetadata, models)
metadata.getAndSetParams(ovrModel)
ovrModel.set("classifier", classifier)
ovrModel
}
}
}
/**
* Reduction of Multiclass Classification to Binary Classification.
* Performs reduction using one against all strategy.
* For a multiclass classification with k classes, train k models (one per class).
* Each example is scored against all k models and the model with highest score
* is picked to label the example.
*/
@Since("1.4.0")
final class OneVsRest @Since("1.4.0") (
@Since("1.4.0") override val uid: String)
extends Estimator[OneVsRestModel] with OneVsRestParams with HasParallelism with MLWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("oneVsRest"))
/** @group setParam */
@Since("1.4.0")
def setClassifier(value: Classifier[_, _, _]): this.type = {
set(classifier, value.asInstanceOf[ClassifierType])
}
/** @group setParam */
@Since("1.5.0")
def setLabelCol(value: String): this.type = set(labelCol, value)
/** @group setParam */
@Since("1.5.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("1.5.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.4.0")
def setRawPredictionCol(value: String): this.type = set(rawPredictionCol, value)
/**
* The implementation of parallel one vs. rest runs the classification for
* each class in a separate threads.
*
* @group expertSetParam
*/
@Since("2.3.0")
def setParallelism(value: Int): this.type = {
set(parallelism, value)
}
/**
* Sets the value of param [[weightCol]].
*
* This is ignored if weight is not supported by [[classifier]].
* If this is not set or empty, we treat all instance weights as 1.0.
* Default is not set, so all instances have weight one.
*
* @group setParam
*/
@Since("2.3.0")
def setWeightCol(value: String): this.type = set(weightCol, value)
@Since("1.4.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema, fitting = true, getClassifier.featuresDataType)
}
@Since("2.0.0")
override def fit(dataset: Dataset[_]): OneVsRestModel = instrumented { instr =>
transformSchema(dataset.schema)
instr.logPipelineStage(this)
instr.logDataset(dataset)
instr.logParams(this, labelCol, featuresCol, predictionCol, parallelism, rawPredictionCol)
instr.logNamedValue("classifier", $(classifier).getClass.getCanonicalName)
// determine number of classes either from metadata if provided, or via computation.
val labelSchema = dataset.schema($(labelCol))
val computeNumClasses: () => Int = () => {
val Row(maxLabelIndex: Double) = dataset.agg(max(col($(labelCol)).cast(DoubleType))).head()
// classes are assumed to be numbered from 0,...,maxLabelIndex
maxLabelIndex.toInt + 1
}
val numClasses = MetadataUtils.getNumClasses(labelSchema).fold(computeNumClasses())(identity)
instr.logNumClasses(numClasses)
val weightColIsUsed = isDefined(weightCol) && $(weightCol).nonEmpty && {
getClassifier match {
case _: HasWeightCol => true
case c =>
instr.logWarning(s"weightCol is ignored, as it is not supported by $c now.")
false
}
}
val multiclassLabeled = if (weightColIsUsed) {
dataset.select($(labelCol), $(featuresCol), $(weightCol))
} else {
dataset.select($(labelCol), $(featuresCol))
}
// persist if underlying dataset is not persistent.
val handlePersistence = dataset.storageLevel == StorageLevel.NONE
if (handlePersistence) {
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
}
val executionContext = getExecutionContext
// create k columns, one for each binary classifier.
val modelFutures = Range(0, numClasses).map { index =>
// generate new label metadata for the binary problem.
val newLabelMeta = BinaryAttribute.defaultAttr.withName("label").toMetadata()
val labelColName = "mc2b$" + index
val trainingDataset = multiclassLabeled.withColumn(
labelColName, when(col($(labelCol)) === index.toDouble, 1.0).otherwise(0.0), newLabelMeta)
val classifier = getClassifier
val paramMap = new ParamMap()
paramMap.put(classifier.labelCol -> labelColName)
paramMap.put(classifier.featuresCol -> getFeaturesCol)
paramMap.put(classifier.predictionCol -> getPredictionCol)
Future {
if (weightColIsUsed) {
val classifier_ = classifier.asInstanceOf[ClassifierType with HasWeightCol]
paramMap.put(classifier_.weightCol -> getWeightCol)
classifier_.fit(trainingDataset, paramMap)
} else {
classifier.fit(trainingDataset, paramMap)
}
}(executionContext)
}
val models = modelFutures
.map(ThreadUtils.awaitResult(_, Duration.Inf)).toArray[ClassificationModel[_, _]]
instr.logNumFeatures(models.head.numFeatures)
if (handlePersistence) {
multiclassLabeled.unpersist()
}
// extract label metadata from label column if present, or create a nominal attribute
// to output the number of labels
val labelAttribute = Attribute.fromStructField(labelSchema) match {
case _: NumericAttribute | UnresolvedAttribute =>
NominalAttribute.defaultAttr.withName("label").withNumValues(numClasses)
case attr: Attribute => attr
}
val model = new OneVsRestModel(uid, labelAttribute.toMetadata(), models).setParent(this)
copyValues(model)
}
@Since("1.4.1")
override def copy(extra: ParamMap): OneVsRest = {
val copied = defaultCopy(extra).asInstanceOf[OneVsRest]
if (isDefined(classifier)) {
copied.setClassifier($(classifier).copy(extra))
}
copied
}
@Since("2.0.0")
override def write: MLWriter = new OneVsRest.OneVsRestWriter(this)
}
@Since("2.0.0")
object OneVsRest extends MLReadable[OneVsRest] {
@Since("2.0.0")
override def read: MLReader[OneVsRest] = new OneVsRestReader
@Since("2.0.0")
override def load(path: String): OneVsRest = super.load(path)
/** [[MLWriter]] instance for [[OneVsRest]] */
private[OneVsRest] class OneVsRestWriter(instance: OneVsRest) extends MLWriter {
OneVsRestParams.validateParams(instance)
override protected def saveImpl(path: String): Unit = {
OneVsRestParams.saveImpl(path, instance, sc)
}
}
private class OneVsRestReader extends MLReader[OneVsRest] {
/** Checked against metadata when loading model */
private val className = classOf[OneVsRest].getName
override def load(path: String): OneVsRest = {
val (metadata, classifier) = OneVsRestParams.loadImpl(path, sc, className)
val ovr = new OneVsRest(metadata.uid)
metadata.getAndSetParams(ovr)
ovr.setClassifier(classifier)
}
}
}
| michalsenkyr/spark | mllib/src/main/scala/org/apache/spark/ml/classification/OneVsRest.scala | Scala | apache-2.0 | 18,213 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.util
import java.util.Properties
import kafka.admin.ConfigCommand.ConfigCommandOptions
import kafka.api.ApiVersion
import kafka.cluster.{Broker, EndPoint}
import kafka.server.{ConfigEntityName, ConfigType, KafkaConfig}
import kafka.utils.{Exit, Logging}
import kafka.zk.{AdminZkClient, BrokerInfo, KafkaZkClient, ZooKeeperTestHarness}
import org.apache.kafka.clients.admin._
import org.apache.kafka.common.Node
import org.apache.kafka.common.config.{ConfigException, ConfigResource}
import org.apache.kafka.common.errors.InvalidConfigurationException
import org.apache.kafka.common.internals.KafkaFutureImpl
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent}
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.security.scram.internals.ScramCredentialUtils
import org.apache.kafka.common.utils.Sanitizer
import org.apache.kafka.test.TestUtils
import org.easymock.EasyMock
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Test
import scala.collection.{Seq, mutable}
import scala.jdk.CollectionConverters._
class ConfigCommandTest extends ZooKeeperTestHarness with Logging {
@Test
def shouldExitWithNonZeroStatusOnArgError(): Unit = {
assertNonZeroStatusExit(Array("--blah"))
}
@Test
def shouldExitWithNonZeroStatusOnUpdatingUnallowedConfigViaZk(): Unit = {
assertNonZeroStatusExit(Array(
"--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "security.inter.broker.protocol=PLAINTEXT"))
}
@Test
def shouldExitWithNonZeroStatusOnZkCommandWithTopicsEntity(): Unit = {
assertNonZeroStatusExit(Array(
"--zookeeper", zkConnect,
"--entity-type", "topics",
"--describe"))
}
@Test
def shouldExitWithNonZeroStatusOnZkCommandWithClientsEntity(): Unit = {
assertNonZeroStatusExit(Array(
"--zookeeper", zkConnect,
"--entity-type", "clients",
"--describe"))
}
@Test
def shouldExitWithNonZeroStatusOnZkCommandWithIpsEntity(): Unit = {
assertNonZeroStatusExit(Array(
"--zookeeper", zkConnect,
"--entity-type", "ips",
"--describe"))
}
@Test
def shouldExitWithNonZeroStatusOnZkCommandAlterUserQuota(): Unit = {
assertNonZeroStatusExit(Array(
"--zookeeper", zkConnect,
"--entity-type", "users",
"--entity-name", "admin",
"--alter", "--add-config", "consumer_byte_rate=20000"))
}
@Test
def shouldExitWithNonZeroStatusAlterUserQuotaWithoutEntityName(): Unit = {
assertNonZeroStatusExit(Array(
"--bootstrap-server", "localhost:9092",
"--entity-type", "users",
"--alter", "--add-config", "consumer_byte_rate=20000"))
}
@Test
def shouldExitWithNonZeroStatusOnBrokerCommandError(): Unit = {
assertNonZeroStatusExit(Array(
"--bootstrap-server", "invalid host",
"--entity-type", "brokers",
"--entity-name", "1",
"--describe"))
}
@Test
def shouldExitWithNonZeroStatusOnBrokerCommandWithZkTlsConfigFile(): Unit = {
assertNonZeroStatusExit(Array(
"--bootstrap-server", "invalid host",
"--entity-type", "users",
"--zk-tls-config-file", "zk_tls_config.properties",
"--describe"))
}
private def assertNonZeroStatusExit(args: Array[String]): Unit = {
var exitStatus: Option[Int] = None
Exit.setExitProcedure { (status, _) =>
exitStatus = Some(status)
throw new RuntimeException
}
try {
ConfigCommand.main(args)
} catch {
case e: RuntimeException =>
} finally {
Exit.resetExitProcedure()
}
assertEquals(Some(1), exitStatus)
}
@Test
def shouldFailParseArgumentsForClientsEntityTypeUsingZookeeper(): Unit = {
assertThrows(classOf[IllegalArgumentException], () => testArgumentParse("clients", zkConfig = true))
}
@Test
def shouldParseArgumentsForClientsEntityType(): Unit = {
testArgumentParse("clients", zkConfig = false)
}
@Test
def shouldParseArgumentsForUsersEntityTypeUsingZookeeper(): Unit = {
testArgumentParse("users", zkConfig = true)
}
@Test
def shouldParseArgumentsForUsersEntityType(): Unit = {
testArgumentParse("users", zkConfig = false)
}
@Test
def shouldFailParseArgumentsForTopicsEntityTypeUsingZookeeper(): Unit = {
assertThrows(classOf[IllegalArgumentException], () => testArgumentParse("topics", zkConfig = true))
}
@Test
def shouldParseArgumentsForTopicsEntityType(): Unit = {
testArgumentParse("topics", zkConfig = false)
}
@Test
def shouldParseArgumentsForBrokersEntityTypeUsingZookeeper(): Unit = {
testArgumentParse("brokers", zkConfig = true)
}
@Test
def shouldParseArgumentsForBrokersEntityType(): Unit = {
testArgumentParse("brokers", zkConfig = false)
}
@Test
def shouldParseArgumentsForBrokerLoggersEntityType(): Unit = {
testArgumentParse("broker-loggers", zkConfig = false)
}
@Test
def shouldFailParseArgumentsForIpEntityTypeUsingZookeeper(): Unit = {
assertThrows(classOf[IllegalArgumentException], () => testArgumentParse("ips", zkConfig = true))
}
@Test
def shouldParseArgumentsForIpEntityType(): Unit = {
testArgumentParse("ips", zkConfig = false)
}
def testArgumentParse(entityType: String, zkConfig: Boolean): Unit = {
val shortFlag: String = s"--${entityType.dropRight(1)}"
val connectOpts = if (zkConfig)
("--zookeeper", zkConnect)
else
("--bootstrap-server", "localhost:9092")
// Should parse correctly
var createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
"--entity-name", "1",
"--entity-type", entityType,
"--describe"))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
shortFlag, "1",
"--describe"))
createOpts.checkArgs()
// For --alter and added config
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
"--entity-name", "1",
"--entity-type", entityType,
"--alter",
"--add-config", "a=b,c=d"))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
"--entity-name", "1",
"--entity-type", entityType,
"--alter",
"--add-config-file", "/tmp/new.properties"))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
shortFlag, "1",
"--alter",
"--add-config", "a=b,c=d"))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
shortFlag, "1",
"--alter",
"--add-config-file", "/tmp/new.properties"))
createOpts.checkArgs()
// For alter and deleted config
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
"--entity-name", "1",
"--entity-type", entityType,
"--alter",
"--delete-config", "a,b,c"))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
shortFlag, "1",
"--alter",
"--delete-config", "a,b,c"))
createOpts.checkArgs()
// For alter and both added, deleted config
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
"--entity-name", "1",
"--entity-type", entityType,
"--alter",
"--add-config", "a=b,c=d",
"--delete-config", "a"))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
shortFlag, "1",
"--alter",
"--add-config", "a=b,c=d",
"--delete-config", "a"))
createOpts.checkArgs()
val addedProps = ConfigCommand.parseConfigsToBeAdded(createOpts)
assertEquals(2, addedProps.size())
assertEquals("b", addedProps.getProperty("a"))
assertEquals("d", addedProps.getProperty("c"))
val deletedProps = ConfigCommand.parseConfigsToBeDeleted(createOpts)
assertEquals(1, deletedProps.size)
assertEquals("a", deletedProps.head)
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
"--entity-name", "1",
"--entity-type", entityType,
"--alter",
"--add-config", "a=b,c=,d=e,f="))
createOpts.checkArgs()
createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2,
shortFlag, "1",
"--alter",
"--add-config", "a=b,c=,d=e,f="))
createOpts.checkArgs()
val addedProps2 = ConfigCommand.parseConfigsToBeAdded(createOpts)
assertEquals(4, addedProps2.size())
assertEquals("b", addedProps2.getProperty("a"))
assertEquals("e", addedProps2.getProperty("d"))
assertTrue(addedProps2.getProperty("c").isEmpty)
assertTrue(addedProps2.getProperty("f").isEmpty)
}
@Test
def shouldFailIfAddAndAddFile(): Unit = {
// Should not parse correctly
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "a=b,c=d",
"--add-config-file", "/tmp/new.properties"
))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def testParseConfigsToBeAddedForAddConfigFile(): Unit = {
val fileContents =
"""a=b
|c = d
|json = {"key": "val"}
|nested = [[1, 2], [3, 4]]
|""".stripMargin
val file = TestUtils.tempFile(fileContents)
val addConfigFileArgs = Array("--add-config-file", file.getPath)
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "1",
"--entity-type", "brokers",
"--alter")
++ addConfigFileArgs)
createOpts.checkArgs()
val addedProps = ConfigCommand.parseConfigsToBeAdded(createOpts)
assertEquals(4, addedProps.size())
assertEquals("b", addedProps.getProperty("a"))
assertEquals("d", addedProps.getProperty("c"))
assertEquals("{\\"key\\": \\"val\\"}", addedProps.getProperty("json"))
assertEquals("[[1, 2], [3, 4]]", addedProps.getProperty("nested"))
}
def doTestOptionEntityTypeNames(zkConfig: Boolean): Unit = {
val connectOpts = if (zkConfig)
("--zookeeper", zkConnect)
else
("--bootstrap-server", "localhost:9092")
def testExpectedEntityTypeNames(expectedTypes: List[String], expectedNames: List[String], args: String*): Unit = {
val createOpts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2, "--describe") ++ args)
createOpts.checkArgs()
assertEquals(createOpts.entityTypes, expectedTypes)
assertEquals(createOpts.entityNames, expectedNames)
}
// zookeeper config only supports "users" and "brokers" entity type
if (!zkConfig) {
testExpectedEntityTypeNames(List(ConfigType.Topic), List("A"), "--entity-type", "topics", "--entity-name", "A")
testExpectedEntityTypeNames(List(ConfigType.Ip), List("1.2.3.4"), "--entity-name", "1.2.3.4", "--entity-type", "ips")
testExpectedEntityTypeNames(List(ConfigType.User, ConfigType.Client), List("A", ""),
"--entity-type", "users", "--entity-type", "clients", "--entity-name", "A", "--entity-default")
testExpectedEntityTypeNames(List(ConfigType.User, ConfigType.Client), List("", "B"),
"--entity-default", "--entity-name", "B", "--entity-type", "users", "--entity-type", "clients")
testExpectedEntityTypeNames(List(ConfigType.Topic), List("A"), "--topic", "A")
testExpectedEntityTypeNames(List(ConfigType.Ip), List("1.2.3.4"), "--ip", "1.2.3.4")
testExpectedEntityTypeNames(List(ConfigType.Client, ConfigType.User), List("B", "A"), "--client", "B", "--user", "A")
testExpectedEntityTypeNames(List(ConfigType.Client, ConfigType.User), List("B", ""), "--client", "B", "--user-defaults")
testExpectedEntityTypeNames(List(ConfigType.Client, ConfigType.User), List("A"),
"--entity-type", "clients", "--entity-type", "users", "--entity-name", "A")
testExpectedEntityTypeNames(List(ConfigType.Topic), List.empty, "--entity-type", "topics")
testExpectedEntityTypeNames(List(ConfigType.Ip), List.empty, "--entity-type", "ips")
}
testExpectedEntityTypeNames(List(ConfigType.Broker), List("0"), "--entity-name", "0", "--entity-type", "brokers")
testExpectedEntityTypeNames(List(ConfigType.Broker), List("0"), "--broker", "0")
testExpectedEntityTypeNames(List(ConfigType.User), List.empty, "--entity-type", "users")
testExpectedEntityTypeNames(List(ConfigType.Broker), List.empty, "--entity-type", "brokers")
}
@Test
def testOptionEntityTypeNamesUsingZookeeper(): Unit = {
doTestOptionEntityTypeNames(zkConfig = true)
}
@Test
def testOptionEntityTypeNames(): Unit = {
doTestOptionEntityTypeNames(zkConfig = false)
}
@Test
def shouldFailIfUnrecognisedEntityTypeUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "client", "--entity-type", "not-recognised", "--alter", "--add-config", "a=b,c=d"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldFailIfUnrecognisedEntityType(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "client", "--entity-type", "not-recognised", "--alter", "--add-config", "a=b,c=d"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts))
}
@Test
def shouldFailIfBrokerEntityTypeIsNotAnIntegerUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "A", "--entity-type", "brokers", "--alter", "--add-config", "a=b,c=d"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldFailIfBrokerEntityTypeIsNotAnInteger(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "A", "--entity-type", "brokers", "--alter", "--add-config", "a=b,c=d"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts))
}
@Test
def shouldFailIfShortBrokerEntityTypeIsNotAnIntegerUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--broker", "A", "--alter", "--add-config", "a=b,c=d"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldFailIfShortBrokerEntityTypeIsNotAnInteger(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--broker", "A", "--alter", "--add-config", "a=b,c=d"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts))
}
@Test
def shouldFailIfMixedEntityTypeFlagsUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "A", "--entity-type", "users", "--client", "B", "--describe"))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def shouldFailIfMixedEntityTypeFlags(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "A", "--entity-type", "users", "--client", "B", "--describe"))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def shouldFailIfInvalidHost(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "A,B", "--entity-type", "ips", "--describe"))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def shouldFailIfInvalidHostUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "A,B", "--entity-type", "ips", "--describe"))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def shouldFailIfUnresolvableHost(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "admin", "--entity-type", "ips", "--describe"))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def shouldFailIfUnresolvableHostUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "admin", "--entity-type", "ips", "--describe"))
assertThrows(classOf[IllegalArgumentException], () => createOpts.checkArgs())
}
@Test
def shouldAddClientConfigUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "my-client-id",
"--entity-type", "clients",
"--alter",
"--add-config", "a=b,c=d"))
class TestAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeClientIdConfig(clientId: String, configChange: Properties): Unit = {
assertEquals("my-client-id", clientId)
assertEquals("b", configChange.get("a"))
assertEquals("d", configChange.get("c"))
}
}
ConfigCommand.alterConfigWithZk(null, createOpts, new TestAdminZkClient(zkClient))
}
@Test
def shouldAddIpConfigsUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1.2.3.4",
"--entity-type", "ips",
"--alter",
"--add-config", "a=b,c=d"))
class TestAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeIpConfig(ip: String, configChange: Properties): Unit = {
assertEquals("1.2.3.4", ip)
assertEquals("b", configChange.get("a"))
assertEquals("d", configChange.get("c"))
}
}
ConfigCommand.alterConfigWithZk(null, createOpts, new TestAdminZkClient(zkClient))
}
private def toValues(entityName: Option[String], entityType: String): (Array[String], Map[String, String]) = {
val command = entityType match {
case ClientQuotaEntity.USER => "users"
case ClientQuotaEntity.CLIENT_ID => "clients"
case ClientQuotaEntity.IP => "ips"
}
entityName match {
case Some(null) =>
(Array("--entity-type", command, "--entity-default"), Map(entityType -> null))
case Some(name) =>
(Array("--entity-type", command, "--entity-name", name), Map(entityType -> name))
case None => (Array.empty, Map.empty)
}
}
private def verifyAlterCommandFails(expectedErrorMessage: String, alterOpts: Seq[String]): Unit = {
val mockAdminClient: Admin = EasyMock.createStrictMock(classOf[Admin])
val opts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--alter") ++ alterOpts)
val e = assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(mockAdminClient, opts))
assertTrue(e.getMessage.contains(expectedErrorMessage), s"Unexpected exception: $e")
}
@Test
def shouldNotAlterNonQuotaIpConfigsUsingBootstrapServer(): Unit = {
// when using --bootstrap-server, it should be illegal to alter anything that is not a connection quota
// for ip entities
val ipEntityOpts = List("--entity-type", "ips", "--entity-name", "127.0.0.1")
val invalidProp = "some_config"
verifyAlterCommandFails(invalidProp, ipEntityOpts ++ List("--add-config", "connection_creation_rate=10000,some_config=10"))
verifyAlterCommandFails(invalidProp, ipEntityOpts ++ List("--add-config", "some_config=10"))
verifyAlterCommandFails(invalidProp, ipEntityOpts ++ List("--delete-config", "connection_creation_rate=10000,some_config=10"))
verifyAlterCommandFails(invalidProp, ipEntityOpts ++ List("--delete-config", "some_config=10"))
}
private def verifyDescribeQuotas(describeArgs: List[String], expectedFilter: ClientQuotaFilter): Unit = {
val describeOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--describe") ++ describeArgs)
val describeFuture = new KafkaFutureImpl[util.Map[ClientQuotaEntity, util.Map[String, java.lang.Double]]]
describeFuture.complete(Map.empty[ClientQuotaEntity, util.Map[String, java.lang.Double]].asJava)
val describeResult: DescribeClientQuotasResult = EasyMock.createNiceMock(classOf[DescribeClientQuotasResult])
EasyMock.expect(describeResult.entities()).andReturn(describeFuture)
var describedConfigs = false
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeClientQuotas(filter: ClientQuotaFilter, options: DescribeClientQuotasOptions): DescribeClientQuotasResult = {
assertTrue(filter.strict)
assertEquals(expectedFilter.components().asScala.toSet, filter.components.asScala.toSet)
describedConfigs = true
describeResult
}
}
EasyMock.replay(describeResult)
ConfigCommand.describeConfig(mockAdminClient, describeOpts)
assertTrue(describedConfigs)
}
@Test
def testDescribeIpConfigs(): Unit = {
val entityType = ClientQuotaEntity.IP
val knownHost = "1.2.3.4"
val defaultIpFilter = ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofDefaultEntity(entityType)).asJava)
val singleIpFilter = ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntity(entityType, knownHost)).asJava)
val allIpsFilter = ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntityType(entityType)).asJava)
verifyDescribeQuotas(List("--entity-default", "--entity-type", "ips"), defaultIpFilter)
verifyDescribeQuotas(List("--ip-defaults"), defaultIpFilter)
verifyDescribeQuotas(List("--entity-type", "ips", "--entity-name", knownHost), singleIpFilter)
verifyDescribeQuotas(List("--ip", knownHost), singleIpFilter)
verifyDescribeQuotas(List("--entity-type", "ips"), allIpsFilter)
}
def verifyAlterQuotas(alterOpts: Seq[String], expectedAlterEntity: ClientQuotaEntity,
expectedProps: Map[String, java.lang.Double], expectedAlterOps: Set[ClientQuotaAlteration.Op]): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--alter") ++ alterOpts)
var describedConfigs = false
val describeFuture = new KafkaFutureImpl[util.Map[ClientQuotaEntity, util.Map[String, java.lang.Double]]]
describeFuture.complete(Map(expectedAlterEntity -> expectedProps.asJava).asJava)
val describeResult: DescribeClientQuotasResult = EasyMock.createNiceMock(classOf[DescribeClientQuotasResult])
EasyMock.expect(describeResult.entities()).andReturn(describeFuture)
val expectedFilterComponents = expectedAlterEntity.entries.asScala.map { case (entityType, entityName) =>
if (entityName == null)
ClientQuotaFilterComponent.ofDefaultEntity(entityType)
else
ClientQuotaFilterComponent.ofEntity(entityType, entityName)
}.toSet
var alteredConfigs = false
val alterFuture = new KafkaFutureImpl[Void]
alterFuture.complete(null)
val alterResult: AlterClientQuotasResult = EasyMock.createNiceMock(classOf[AlterClientQuotasResult])
EasyMock.expect(alterResult.all()).andReturn(alterFuture)
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeClientQuotas(filter: ClientQuotaFilter, options: DescribeClientQuotasOptions): DescribeClientQuotasResult = {
assertTrue(filter.strict)
assertEquals(expectedFilterComponents, filter.components().asScala.toSet)
describedConfigs = true
describeResult
}
override def alterClientQuotas(entries: util.Collection[ClientQuotaAlteration], options: AlterClientQuotasOptions): AlterClientQuotasResult = {
assertFalse(options.validateOnly)
assertEquals(1, entries.size)
val alteration = entries.asScala.head
assertEquals(expectedAlterEntity, alteration.entity)
val ops = alteration.ops.asScala
assertEquals(expectedAlterOps, ops.toSet)
alteredConfigs = true
alterResult
}
}
EasyMock.replay(alterResult, describeResult)
ConfigCommand.alterConfig(mockAdminClient, createOpts)
assertTrue(describedConfigs)
assertTrue(alteredConfigs)
}
@Test
def testAlterIpConfig(): Unit = {
val (singleIpArgs, singleIpEntry) = toValues(Some("1.2.3.4"), ClientQuotaEntity.IP)
val singleIpEntity = new ClientQuotaEntity(singleIpEntry.asJava)
val (defaultIpArgs, defaultIpEntry) = toValues(Some(null), ClientQuotaEntity.IP)
val defaultIpEntity = new ClientQuotaEntity(defaultIpEntry.asJava)
val deleteArgs = List("--delete-config", "connection_creation_rate")
val deleteAlterationOps = Set(new ClientQuotaAlteration.Op("connection_creation_rate", null))
val propsToDelete = Map("connection_creation_rate" -> Double.box(50.0))
val addArgs = List("--add-config", "connection_creation_rate=100")
val addAlterationOps = Set(new ClientQuotaAlteration.Op("connection_creation_rate", 100.0))
verifyAlterQuotas(singleIpArgs ++ deleteArgs, singleIpEntity, propsToDelete, deleteAlterationOps)
verifyAlterQuotas(singleIpArgs ++ addArgs, singleIpEntity, Map.empty, addAlterationOps)
verifyAlterQuotas(defaultIpArgs ++ deleteArgs, defaultIpEntity, propsToDelete, deleteAlterationOps)
verifyAlterQuotas(defaultIpArgs ++ addArgs, defaultIpEntity, Map.empty, addAlterationOps)
}
@Test
def shouldAddClientConfig(): Unit = {
val alterArgs = List("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000",
"--delete-config", "request_percentage")
val propsToDelete = Map("request_percentage" -> Double.box(50.0))
val alterationOps = Set(
new ClientQuotaAlteration.Op("consumer_byte_rate", Double.box(20000)),
new ClientQuotaAlteration.Op("producer_byte_rate", Double.box(10000)),
new ClientQuotaAlteration.Op("request_percentage", null)
)
def verifyAlterUserClientQuotas(userOpt: Option[String], clientOpt: Option[String]): Unit = {
val (userArgs, userEntry) = toValues(userOpt, ClientQuotaEntity.USER)
val (clientArgs, clientEntry) = toValues(clientOpt, ClientQuotaEntity.CLIENT_ID)
val commandArgs = alterArgs ++ userArgs ++ clientArgs
val clientQuotaEntity = new ClientQuotaEntity((userEntry ++ clientEntry).asJava)
verifyAlterQuotas(commandArgs, clientQuotaEntity, propsToDelete, alterationOps)
}
verifyAlterUserClientQuotas(Some("test-user-1"), Some("test-client-1"))
verifyAlterUserClientQuotas(Some("test-user-2"), Some(null))
verifyAlterUserClientQuotas(Some("test-user-3"), None)
verifyAlterUserClientQuotas(Some(null), Some("test-client-2"))
verifyAlterUserClientQuotas(Some(null), Some(null))
verifyAlterUserClientQuotas(Some(null), None)
verifyAlterUserClientQuotas(None, Some("test-client-3"))
verifyAlterUserClientQuotas(None, Some(null))
}
private val userEntityOpts = List("--entity-type", "users", "--entity-name", "admin")
private val clientEntityOpts = List("--entity-type", "clients", "--entity-name", "admin")
private val addScramOpts = List("--add-config", "SCRAM-SHA-256=[iterations=8192,password=foo-secret]")
private val deleteScramOpts = List("--delete-config", "SCRAM-SHA-256")
@Test
def shouldNotAlterNonQuotaNonScramUserOrClientConfigUsingBootstrapServer(): Unit = {
// when using --bootstrap-server, it should be illegal to alter anything that is not a quota and not a SCRAM credential
// for both user and client entities
val invalidProp = "some_config"
verifyAlterCommandFails(invalidProp, userEntityOpts ++
List("-add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10"))
verifyAlterCommandFails(invalidProp, userEntityOpts ++
List("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10"))
verifyAlterCommandFails(invalidProp, clientEntityOpts ++ List("--add-config", "some_config=10"))
verifyAlterCommandFails(invalidProp, userEntityOpts ++ List("--delete-config", "consumer_byte_rate,some_config"))
verifyAlterCommandFails(invalidProp, userEntityOpts ++ List("--delete-config", "SCRAM-SHA-256,some_config"))
verifyAlterCommandFails(invalidProp, clientEntityOpts ++ List("--delete-config", "some_config"))
}
@Test
def shouldNotAlterScramClientConfigUsingBootstrapServer(): Unit = {
// when using --bootstrap-server, it should be illegal to alter SCRAM credentials for client entities
verifyAlterCommandFails("SCRAM-SHA-256", clientEntityOpts ++ addScramOpts)
verifyAlterCommandFails("SCRAM-SHA-256", clientEntityOpts ++ deleteScramOpts)
}
@Test
def shouldNotCreateUserScramCredentialConfigWithUnderMinimumIterationsUsingBootstrapServer(): Unit = {
// when using --bootstrap-server, it should be illegal to create a SCRAM credential for a user
// with an iterations value less than the minimum
verifyAlterCommandFails("SCRAM-SHA-256", userEntityOpts ++ List("--add-config", "SCRAM-SHA-256=[iterations=100,password=foo-secret]"))
}
@Test
def shouldNotAlterUserScramCredentialAndClientQuotaConfigsSimultaneouslyUsingBootstrapServer(): Unit = {
// when using --bootstrap-server, it should be illegal to alter both SCRAM credentials and quotas for user entities
val expectedErrorMessage = "SCRAM-SHA-256"
val secondUserEntityOpts = List("--entity-type", "users", "--entity-name", "admin1")
val addQuotaOpts = List("--add-config", "consumer_byte_rate=20000")
val deleteQuotaOpts = List("--delete-config", "consumer_byte_rate")
verifyAlterCommandFails(expectedErrorMessage, userEntityOpts ++ addScramOpts ++ userEntityOpts ++ deleteQuotaOpts)
verifyAlterCommandFails(expectedErrorMessage, userEntityOpts ++ addScramOpts ++ secondUserEntityOpts ++ deleteQuotaOpts)
verifyAlterCommandFails(expectedErrorMessage, userEntityOpts ++ deleteScramOpts ++ userEntityOpts ++ addQuotaOpts)
verifyAlterCommandFails(expectedErrorMessage, userEntityOpts ++ deleteScramOpts ++ secondUserEntityOpts ++ addQuotaOpts)
// change order of quota/SCRAM commands, verify alter still fails
verifyAlterCommandFails(expectedErrorMessage, userEntityOpts ++ deleteQuotaOpts ++ userEntityOpts ++ addScramOpts)
verifyAlterCommandFails(expectedErrorMessage, secondUserEntityOpts ++ deleteQuotaOpts ++ userEntityOpts ++ addScramOpts)
verifyAlterCommandFails(expectedErrorMessage, userEntityOpts ++ addQuotaOpts ++ userEntityOpts ++ deleteScramOpts)
verifyAlterCommandFails(expectedErrorMessage, secondUserEntityOpts ++ addQuotaOpts ++ userEntityOpts ++ deleteScramOpts)
}
@Test
def shouldNotDescribeUserScramCredentialsWithEntityDefaultUsingBootstrapServer(): Unit = {
def verifyUserScramCredentialsNotDescribed(requestOpts: List[String]): Unit = {
// User SCRAM credentials should not be described when specifying
// --describe --entity-type users --entity-default (or --user-defaults) with --bootstrap-server
val describeFuture = new KafkaFutureImpl[util.Map[ClientQuotaEntity, util.Map[String, java.lang.Double]]]
describeFuture.complete(Map((new ClientQuotaEntity(Map("" -> "").asJava) -> Map(("request_percentage" -> Double.box(50.0))).asJava)).asJava)
val describeClientQuotasResult: DescribeClientQuotasResult = EasyMock.createNiceMock(classOf[DescribeClientQuotasResult])
EasyMock.expect(describeClientQuotasResult.entities()).andReturn(describeFuture)
EasyMock.replay(describeClientQuotasResult)
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeClientQuotas(filter: ClientQuotaFilter, options: DescribeClientQuotasOptions): DescribeClientQuotasResult = {
describeClientQuotasResult
}
override def describeUserScramCredentials(users: util.List[String], options: DescribeUserScramCredentialsOptions): DescribeUserScramCredentialsResult = {
throw new IllegalStateException("Incorrectly described SCRAM credentials when specifying --entity-default with --bootstrap-server")
}
}
val opts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092", "--describe") ++ requestOpts)
ConfigCommand.describeConfig(mockAdminClient, opts) // fails if describeUserScramCredentials() is invoked
}
val expectedMsg = "The use of --entity-default or --user-defaults is not allowed with User SCRAM Credentials using --bootstrap-server."
val defaultUserOpt = List("--user-defaults")
val verboseDefaultUserOpts = List("--entity-type", "users", "--entity-default")
verifyAlterCommandFails(expectedMsg, verboseDefaultUserOpts ++ addScramOpts)
verifyAlterCommandFails(expectedMsg, verboseDefaultUserOpts ++ deleteScramOpts)
verifyUserScramCredentialsNotDescribed(verboseDefaultUserOpts)
verifyAlterCommandFails(expectedMsg, defaultUserOpt ++ addScramOpts)
verifyAlterCommandFails(expectedMsg, defaultUserOpt ++ deleteScramOpts)
verifyUserScramCredentialsNotDescribed(defaultUserOpt)
}
@Test
def shouldAddTopicConfigUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "my-topic",
"--entity-type", "topics",
"--alter",
"--add-config", "a=b,c=d"))
class TestAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
assertEquals("my-topic", topic)
assertEquals("b", configChange.get("a"))
assertEquals("d", configChange.get("c"))
}
}
ConfigCommand.alterConfigWithZk(null, createOpts, new TestAdminZkClient(zkClient))
}
@Test
def shouldAlterTopicConfig(): Unit = {
doShouldAlterTopicConfig(false)
}
@Test
def shouldAlterTopicConfigFile(): Unit = {
doShouldAlterTopicConfig(true)
}
def doShouldAlterTopicConfig(file: Boolean): Unit = {
var filePath = ""
val addedConfigs = Seq("delete.retention.ms=1000000", "min.insync.replicas=2")
if (file) {
val file = TestUtils.tempFile(addedConfigs.mkString("\\n"))
filePath = file.getPath
}
val resourceName = "my-topic"
val alterOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", resourceName,
"--entity-type", "topics",
"--alter",
if (file) "--add-config-file" else "--add-config",
if (file) filePath else addedConfigs.mkString(","),
"--delete-config", "unclean.leader.election.enable"))
var alteredConfigs = false
def newConfigEntry(name: String, value: String): ConfigEntry =
ConfigTest.newConfigEntry(name, value, ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, List.empty[ConfigEntry.ConfigSynonym].asJava)
val resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName)
val configEntries = List(newConfigEntry("min.insync.replicas", "1"), newConfigEntry("unclean.leader.election.enable", "1")).asJava
val future = new KafkaFutureImpl[util.Map[ConfigResource, Config]]
future.complete(util.Collections.singletonMap(resource, new Config(configEntries)))
val describeResult: DescribeConfigsResult = EasyMock.createNiceMock(classOf[DescribeConfigsResult])
EasyMock.expect(describeResult.all()).andReturn(future).once()
val alterFuture = new KafkaFutureImpl[Void]
alterFuture.complete(null)
val alterResult: AlterConfigsResult = EasyMock.createNiceMock(classOf[AlterConfigsResult])
EasyMock.expect(alterResult.all()).andReturn(alterFuture)
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult = {
assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily")
assertEquals(1, resources.size)
val resource = resources.iterator.next
assertEquals(resource.`type`, ConfigResource.Type.TOPIC)
assertEquals(resource.name, resourceName)
describeResult
}
override def incrementalAlterConfigs(configs: util.Map[ConfigResource, util.Collection[AlterConfigOp]], options: AlterConfigsOptions): AlterConfigsResult = {
assertEquals(1, configs.size)
val entry = configs.entrySet.iterator.next
val resource = entry.getKey
val alterConfigOps = entry.getValue
assertEquals(ConfigResource.Type.TOPIC, resource.`type`)
assertEquals(3, alterConfigOps.size)
val expectedConfigOps = Set(
new AlterConfigOp(newConfigEntry("delete.retention.ms", "1000000"), AlterConfigOp.OpType.SET),
new AlterConfigOp(newConfigEntry("min.insync.replicas", "2"), AlterConfigOp.OpType.SET),
new AlterConfigOp(newConfigEntry("unclean.leader.election.enable", ""), AlterConfigOp.OpType.DELETE)
)
assertEquals(expectedConfigOps.size, alterConfigOps.size)
expectedConfigOps.foreach { expectedOp =>
val actual = alterConfigOps.asScala.find(_.configEntry.name == expectedOp.configEntry.name)
assertNotEquals(actual, None)
assertEquals(expectedOp.opType, actual.get.opType)
assertEquals(expectedOp.configEntry.name, actual.get.configEntry.name)
assertEquals(expectedOp.configEntry.value, actual.get.configEntry.value)
}
alteredConfigs = true
alterResult
}
}
EasyMock.replay(alterResult, describeResult)
ConfigCommand.alterConfig(mockAdminClient, alterOpts)
assertTrue(alteredConfigs)
EasyMock.reset(alterResult, describeResult)
}
@Test
def shouldDescribeConfigSynonyms(): Unit = {
val resourceName = "my-topic"
val describeOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", resourceName,
"--entity-type", "topics",
"--describe",
"--all"))
val resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName)
val future = new KafkaFutureImpl[util.Map[ConfigResource, Config]]
future.complete(util.Collections.singletonMap(resource, new Config(util.Collections.emptyList[ConfigEntry])))
val describeResult: DescribeConfigsResult = EasyMock.createNiceMock(classOf[DescribeConfigsResult])
EasyMock.expect(describeResult.all()).andReturn(future).once()
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult = {
assertTrue(options.includeSynonyms(), "Synonyms not requested")
assertEquals(Set(resource), resources.asScala.toSet)
describeResult
}
}
EasyMock.replay(describeResult)
ConfigCommand.describeConfig(mockAdminClient, describeOpts)
EasyMock.reset(describeResult)
}
@Test
def shouldNotAllowAddBrokerQuotaConfigWhileBrokerUpUsingZookeeper(): Unit = {
val alterOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "leader.replication.throttled.rate=10,follower.replication.throttled.rate=20"))
val mockZkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
val mockBroker: Broker = EasyMock.createNiceMock(classOf[Broker])
EasyMock.expect(mockZkClient.getBroker(1)).andReturn(Option(mockBroker))
EasyMock.replay(mockZkClient)
assertThrows(classOf[IllegalArgumentException],
() => ConfigCommand.alterConfigWithZk(mockZkClient, alterOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldNotAllowDescribeBrokerWhileBrokerUpUsingZookeeper(): Unit = {
val describeOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--describe"))
val mockZkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
val mockBroker: Broker = EasyMock.createNiceMock(classOf[Broker])
EasyMock.expect(mockZkClient.getBroker(1)).andReturn(Option(mockBroker))
EasyMock.replay(mockZkClient)
assertThrows(classOf[IllegalArgumentException],
() => ConfigCommand.describeConfigWithZk(mockZkClient, describeOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldSupportDescribeBrokerBeforeBrokerUpUsingZookeeper(): Unit = {
val describeOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--describe"))
class TestAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def fetchEntityConfig(rootEntityType: String, sanitizedEntityName: String): Properties = {
assertEquals("brokers", rootEntityType)
assertEquals("1", sanitizedEntityName)
new Properties()
}
}
val mockZkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
EasyMock.expect(mockZkClient.getBroker(1)).andReturn(None)
EasyMock.replay(mockZkClient)
ConfigCommand.describeConfigWithZk(mockZkClient, describeOpts, new TestAdminZkClient(zkClient))
}
@Test
def shouldAddBrokerLoggerConfig(): Unit = {
val node = new Node(1, "localhost", 9092)
verifyAlterBrokerLoggerConfig(node, "1", "1", List(
new ConfigEntry("kafka.log.LogCleaner", "INFO"),
new ConfigEntry("kafka.server.ReplicaManager", "INFO"),
new ConfigEntry("kafka.server.KafkaApi", "INFO")
))
}
@Test
def testNoSpecifiedEntityOptionWithDescribeBrokersInZKIsAllowed(): Unit = {
val optsList = List("--zookeeper", zkConnect,
"--entity-type", ConfigType.Broker,
"--describe"
)
new ConfigCommandOptions(optsList.toArray).checkArgs()
}
@Test
def testNoSpecifiedEntityOptionWithDescribeBrokersInBootstrapServerIsAllowed(): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", ConfigType.Broker,
"--describe"
)
new ConfigCommandOptions(optsList.toArray).checkArgs()
}
@Test
def testDescribeAllBrokerConfig(): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", ConfigType.Broker,
"--entity-name", "1",
"--describe",
"--all")
new ConfigCommandOptions(optsList.toArray).checkArgs()
}
@Test
def testDescribeAllTopicConfig(): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", ConfigType.Topic,
"--entity-name", "foo",
"--describe",
"--all")
new ConfigCommandOptions(optsList.toArray).checkArgs()
}
@Test
def testDescribeAllBrokerConfigBootstrapServerRequired(): Unit = {
val optsList = List("--zookeeper", zkConnect,
"--entity-type", ConfigType.Broker,
"--entity-name", "1",
"--describe",
"--all")
assertThrows(classOf[IllegalArgumentException], () => new ConfigCommandOptions(optsList.toArray).checkArgs())
}
@Test
def testEntityDefaultOptionWithDescribeBrokerLoggerIsNotAllowed(): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", ConfigCommand.BrokerLoggerConfigType,
"--entity-default",
"--describe"
)
assertThrows(classOf[IllegalArgumentException], () => new ConfigCommandOptions(optsList.toArray).checkArgs())
}
@Test
def testEntityDefaultOptionWithAlterBrokerLoggerIsNotAllowed(): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", ConfigCommand.BrokerLoggerConfigType,
"--entity-default",
"--alter",
"--add-config", "kafka.log.LogCleaner=DEBUG"
)
assertThrows(classOf[IllegalArgumentException], () => new ConfigCommandOptions(optsList.toArray).checkArgs())
}
@Test
def shouldRaiseInvalidConfigurationExceptionWhenAddingInvalidBrokerLoggerConfig(): Unit = {
val node = new Node(1, "localhost", 9092)
// verifyAlterBrokerLoggerConfig tries to alter kafka.log.LogCleaner, kafka.server.ReplicaManager and kafka.server.KafkaApi
// yet, we make it so DescribeConfigs returns only one logger, implying that kafka.server.ReplicaManager and kafka.log.LogCleaner are invalid
assertThrows(classOf[InvalidConfigurationException], () => verifyAlterBrokerLoggerConfig(node, "1", "1", List(
new ConfigEntry("kafka.server.KafkaApi", "INFO")
)))
}
@Test
def shouldAddDefaultBrokerDynamicConfig(): Unit = {
val node = new Node(1, "localhost", 9092)
verifyAlterBrokerConfig(node, "", List("--entity-default"))
}
@Test
def shouldAddBrokerDynamicConfig(): Unit = {
val node = new Node(1, "localhost", 9092)
verifyAlterBrokerConfig(node, "1", List("--entity-name", "1"))
}
def verifyAlterBrokerConfig(node: Node, resourceName: String, resourceOpts: List[String]): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", "brokers",
"--alter",
"--add-config", "message.max.bytes=10,leader.replication.throttled.rate=10") ++ resourceOpts
val alterOpts = new ConfigCommandOptions(optsList.toArray)
val brokerConfigs = mutable.Map[String, String]("num.io.threads" -> "5")
val resource = new ConfigResource(ConfigResource.Type.BROKER, resourceName)
val configEntries = util.Collections.singletonList(new ConfigEntry("num.io.threads", "5"))
val future = new KafkaFutureImpl[util.Map[ConfigResource, Config]]
future.complete(util.Collections.singletonMap(resource, new Config(configEntries)))
val describeResult: DescribeConfigsResult = EasyMock.createNiceMock(classOf[DescribeConfigsResult])
EasyMock.expect(describeResult.all()).andReturn(future).once()
val alterFuture = new KafkaFutureImpl[Void]
alterFuture.complete(null)
val alterResult: AlterConfigsResult = EasyMock.createNiceMock(classOf[AlterConfigsResult])
EasyMock.expect(alterResult.all()).andReturn(alterFuture)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult = {
assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily")
assertEquals(1, resources.size)
val resource = resources.iterator.next
assertEquals(ConfigResource.Type.BROKER, resource.`type`)
assertEquals(resourceName, resource.name)
describeResult
}
override def alterConfigs(configs: util.Map[ConfigResource, Config], options: AlterConfigsOptions): AlterConfigsResult = {
assertEquals(1, configs.size)
val entry = configs.entrySet.iterator.next
val resource = entry.getKey
val config = entry.getValue
assertEquals(ConfigResource.Type.BROKER, resource.`type`)
config.entries.forEach { e => brokerConfigs.put(e.name, e.value) }
alterResult
}
}
EasyMock.replay(alterResult, describeResult)
ConfigCommand.alterConfig(mockAdminClient, alterOpts)
assertEquals(Map("message.max.bytes" -> "10", "num.io.threads" -> "5", "leader.replication.throttled.rate" -> "10"),
brokerConfigs.toMap)
EasyMock.reset(alterResult, describeResult)
}
@Test
def shouldDescribeConfigBrokerWithoutEntityName(): Unit = {
val describeOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-type", "brokers",
"--describe"))
val BrokerDefaultEntityName = ""
val resourceCustom = new ConfigResource(ConfigResource.Type.BROKER, "1")
val resourceDefault = new ConfigResource(ConfigResource.Type.BROKER, BrokerDefaultEntityName)
val future = new KafkaFutureImpl[util.Map[ConfigResource, Config]]
val emptyConfig = new Config(util.Collections.emptyList[ConfigEntry])
val resultMap = Map(resourceCustom -> emptyConfig, resourceDefault -> emptyConfig).asJava
future.complete(resultMap)
val describeResult: DescribeConfigsResult = EasyMock.createNiceMock(classOf[DescribeConfigsResult])
// make sure it will be called 2 times: (1) for broker "1" (2) for default broker ""
EasyMock.expect(describeResult.all()).andReturn(future).times(2)
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult = {
assertTrue(options.includeSynonyms(), "Synonyms not requested")
val resource = resources.iterator.next
assertEquals(ConfigResource.Type.BROKER, resource.`type`)
assertTrue(resourceCustom.name == resource.name || resourceDefault.name == resource.name)
assertEquals(1, resources.size)
describeResult
}
}
EasyMock.replay(describeResult)
ConfigCommand.describeConfig(mockAdminClient, describeOpts)
EasyMock.verify(describeResult)
EasyMock.reset(describeResult)
}
private def verifyAlterBrokerLoggerConfig(node: Node, resourceName: String, entityName: String,
describeConfigEntries: List[ConfigEntry]): Unit = {
val optsList = List("--bootstrap-server", "localhost:9092",
"--entity-type", ConfigCommand.BrokerLoggerConfigType,
"--alter",
"--entity-name", entityName,
"--add-config", "kafka.log.LogCleaner=DEBUG",
"--delete-config", "kafka.server.ReplicaManager,kafka.server.KafkaApi")
val alterOpts = new ConfigCommandOptions(optsList.toArray)
var alteredConfigs = false
val resource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, resourceName)
val future = new KafkaFutureImpl[util.Map[ConfigResource, Config]]
future.complete(util.Collections.singletonMap(resource, new Config(describeConfigEntries.asJava)))
val describeResult: DescribeConfigsResult = EasyMock.createNiceMock(classOf[DescribeConfigsResult])
EasyMock.expect(describeResult.all()).andReturn(future).once()
val alterFuture = new KafkaFutureImpl[Void]
alterFuture.complete(null)
val alterResult: AlterConfigsResult = EasyMock.createNiceMock(classOf[AlterConfigsResult])
EasyMock.expect(alterResult.all()).andReturn(alterFuture)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult = {
assertEquals(1, resources.size)
val resource = resources.iterator.next
assertEquals(ConfigResource.Type.BROKER_LOGGER, resource.`type`)
assertEquals(resourceName, resource.name)
describeResult
}
override def incrementalAlterConfigs(configs: util.Map[ConfigResource, util.Collection[AlterConfigOp]], options: AlterConfigsOptions): AlterConfigsResult = {
assertEquals(1, configs.size)
val entry = configs.entrySet.iterator.next
val resource = entry.getKey
val alterConfigOps = entry.getValue
assertEquals(ConfigResource.Type.BROKER_LOGGER, resource.`type`)
assertEquals(3, alterConfigOps.size)
val expectedConfigOps = List(
new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", "DEBUG"), AlterConfigOp.OpType.SET),
new AlterConfigOp(new ConfigEntry("kafka.server.ReplicaManager", ""), AlterConfigOp.OpType.DELETE),
new AlterConfigOp(new ConfigEntry("kafka.server.KafkaApi", ""), AlterConfigOp.OpType.DELETE)
)
assertEquals(expectedConfigOps, alterConfigOps.asScala.toList)
alteredConfigs = true
alterResult
}
}
EasyMock.replay(alterResult, describeResult)
ConfigCommand.alterConfig(mockAdminClient, alterOpts)
assertTrue(alteredConfigs)
EasyMock.reset(alterResult, describeResult)
}
@Test
def shouldSupportCommaSeparatedValuesUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "my-topic",
"--entity-type", "topics",
"--alter",
"--add-config", "a=b,c=[d,e ,f],g=[h,i]"))
class TestAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeTopicConfig(topic: String, configChange: Properties): Unit = {
assertEquals("my-topic", topic)
assertEquals("b", configChange.get("a"))
assertEquals("d,e ,f", configChange.get("c"))
assertEquals("h,i", configChange.get("g"))
}
}
ConfigCommand.alterConfigWithZk(null, createOpts, new TestAdminZkClient(zkClient))
}
@Test
def shouldNotUpdateBrokerConfigIfMalformedEntityNameUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1,2,3", //Don't support multiple brokers currently
"--entity-type", "brokers",
"--alter",
"--add-config", "leader.replication.throttled.rate=10"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldNotUpdateBrokerConfigIfMalformedEntityName(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "1,2,3", //Don't support multiple brokers currently
"--entity-type", "brokers",
"--alter",
"--add-config", "leader.replication.throttled.rate=10"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts))
}
@Test
def testDynamicBrokerConfigUpdateUsingZooKeeper(): Unit = {
val brokerId = "1"
val adminZkClient = new AdminZkClient(zkClient)
val alterOpts = Array("--zookeeper", zkConnect, "--entity-type", "brokers", "--alter")
def entityOpt(brokerId: Option[String]): Array[String] = {
brokerId.map(id => Array("--entity-name", id)).getOrElse(Array("--entity-default"))
}
def alterConfigWithZk(configs: Map[String, String], brokerId: Option[String],
encoderConfigs: Map[String, String] = Map.empty): Unit = {
val configStr = (configs ++ encoderConfigs).map { case (k, v) => s"$k=$v" }.mkString(",")
val addOpts = new ConfigCommandOptions(alterOpts ++ entityOpt(brokerId) ++ Array("--add-config", configStr))
ConfigCommand.alterConfigWithZk(zkClient, addOpts, adminZkClient)
}
def verifyConfig(configs: Map[String, String], brokerId: Option[String]): Unit = {
val entityConfigs = zkClient.getEntityConfigs("brokers", brokerId.getOrElse(ConfigEntityName.Default))
assertEquals(configs, entityConfigs.asScala)
}
def alterAndVerifyConfig(configs: Map[String, String], brokerId: Option[String]): Unit = {
alterConfigWithZk(configs, brokerId)
verifyConfig(configs, brokerId)
}
def deleteAndVerifyConfig(configNames: Set[String], brokerId: Option[String]): Unit = {
val deleteOpts = new ConfigCommandOptions(alterOpts ++ entityOpt(brokerId) ++
Array("--delete-config", configNames.mkString(",")))
ConfigCommand.alterConfigWithZk(zkClient, deleteOpts, adminZkClient)
verifyConfig(Map.empty, brokerId)
}
// Add config
alterAndVerifyConfig(Map("message.max.size" -> "110000"), Some(brokerId))
alterAndVerifyConfig(Map("message.max.size" -> "120000"), None)
// Change config
alterAndVerifyConfig(Map("message.max.size" -> "130000"), Some(brokerId))
alterAndVerifyConfig(Map("message.max.size" -> "140000"), None)
// Delete config
deleteAndVerifyConfig(Set("message.max.size"), Some(brokerId))
deleteAndVerifyConfig(Set("message.max.size"), None)
// Listener configs: should work only with listener name
alterAndVerifyConfig(Map("listener.name.external.ssl.keystore.location" -> "/tmp/test.jks"), Some(brokerId))
assertThrows(classOf[ConfigException], () => alterConfigWithZk(Map("ssl.keystore.location" -> "/tmp/test.jks"), Some(brokerId)))
// Per-broker config configured at default cluster-level should fail
assertThrows(classOf[ConfigException], () => alterConfigWithZk(Map("listener.name.external.ssl.keystore.location" -> "/tmp/test.jks"), None))
deleteAndVerifyConfig(Set("listener.name.external.ssl.keystore.location"), Some(brokerId))
// Password config update without encoder secret should fail
assertThrows(classOf[IllegalArgumentException], () => alterConfigWithZk(Map("listener.name.external.ssl.keystore.password" -> "secret"), Some(brokerId)))
// Password config update with encoder secret should succeed and encoded password must be stored in ZK
val configs = Map("listener.name.external.ssl.keystore.password" -> "secret", "log.cleaner.threads" -> "2")
val encoderConfigs = Map(KafkaConfig.PasswordEncoderSecretProp -> "encoder-secret")
alterConfigWithZk(configs, Some(brokerId), encoderConfigs)
val brokerConfigs = zkClient.getEntityConfigs("brokers", brokerId)
assertFalse(brokerConfigs.contains(KafkaConfig.PasswordEncoderSecretProp), "Encoder secret stored in ZooKeeper")
assertEquals("2", brokerConfigs.getProperty("log.cleaner.threads")) // not encoded
val encodedPassword = brokerConfigs.getProperty("listener.name.external.ssl.keystore.password")
val passwordEncoder = ConfigCommand.createPasswordEncoder(encoderConfigs)
assertEquals("secret", passwordEncoder.decode(encodedPassword).value)
assertEquals(configs.size, brokerConfigs.size)
// Password config update with overrides for encoder parameters
val configs2 = Map("listener.name.internal.ssl.keystore.password" -> "secret2")
val encoderConfigs2 = Map(KafkaConfig.PasswordEncoderSecretProp -> "encoder-secret",
KafkaConfig.PasswordEncoderCipherAlgorithmProp -> "DES/CBC/PKCS5Padding",
KafkaConfig.PasswordEncoderIterationsProp -> "1024",
KafkaConfig.PasswordEncoderKeyFactoryAlgorithmProp -> "PBKDF2WithHmacSHA1",
KafkaConfig.PasswordEncoderKeyLengthProp -> "64")
alterConfigWithZk(configs2, Some(brokerId), encoderConfigs2)
val brokerConfigs2 = zkClient.getEntityConfigs("brokers", brokerId)
val encodedPassword2 = brokerConfigs2.getProperty("listener.name.internal.ssl.keystore.password")
assertEquals("secret2", ConfigCommand.createPasswordEncoder(encoderConfigs).decode(encodedPassword2).value)
assertEquals("secret2", ConfigCommand.createPasswordEncoder(encoderConfigs2).decode(encodedPassword2).value)
// Password config update at default cluster-level should fail
assertThrows(classOf[ConfigException], () => alterConfigWithZk(configs, None, encoderConfigs))
// Dynamic config updates using ZK should fail if broker is running.
registerBrokerInZk(brokerId.toInt)
assertThrows(classOf[IllegalArgumentException], () => alterConfigWithZk(Map("message.max.size" -> "210000"), Some(brokerId)))
assertThrows(classOf[IllegalArgumentException], () => alterConfigWithZk(Map("message.max.size" -> "220000"), None))
// Dynamic config updates using ZK should for a different broker that is not running should succeed
alterAndVerifyConfig(Map("message.max.size" -> "230000"), Some("2"))
}
@Test
def shouldNotUpdateBrokerConfigIfMalformedConfigUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "a=="))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldNotUpdateBrokerConfigIfMalformedConfig(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "a=="))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts))
}
@Test
def shouldNotUpdateBrokerConfigIfMalformedBracketConfigUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "a=[b,c,d=e"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldNotUpdateBrokerConfigIfMalformedBracketConfig(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--add-config", "a=[b,c,d=e"))
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts))
}
@Test
def shouldNotUpdateConfigIfNonExistingConfigIsDeletedUsingZookeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "my-topic",
"--entity-type", "topics",
"--alter",
"--delete-config", "missing_config1, missing_config2"))
assertThrows(classOf[InvalidConfigurationException], () => ConfigCommand.alterConfigWithZk(null, createOpts, new DummyAdminZkClient(zkClient)))
}
@Test
def shouldNotUpdateConfigIfNonExistingConfigIsDeleted(): Unit = {
val resourceName = "my-topic"
val createOpts = new ConfigCommandOptions(Array("--bootstrap-server", "localhost:9092",
"--entity-name", resourceName,
"--entity-type", "topics",
"--alter",
"--delete-config", "missing_config1, missing_config2"))
val resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName)
val configEntries = List.empty[ConfigEntry].asJava
val future = new KafkaFutureImpl[util.Map[ConfigResource, Config]]
future.complete(util.Collections.singletonMap(resource, new Config(configEntries)))
val describeResult: DescribeConfigsResult = EasyMock.createNiceMock(classOf[DescribeConfigsResult])
EasyMock.expect(describeResult.all()).andReturn(future).once()
val node = new Node(1, "localhost", 9092)
val mockAdminClient = new MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult = {
assertEquals(1, resources.size)
val resource = resources.iterator.next
assertEquals(resource.`type`, ConfigResource.Type.TOPIC)
assertEquals(resource.name, resourceName)
describeResult
}
}
EasyMock.replay(describeResult)
assertThrows(classOf[InvalidConfigurationException], () => ConfigCommand.alterConfig(mockAdminClient, createOpts))
EasyMock.reset(describeResult)
}
@Test
def shouldNotDeleteBrokerConfigWhileBrokerUpUsingZookeeper(): Unit = {
val createOpts = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", "1",
"--entity-type", "brokers",
"--alter",
"--delete-config", "a,c"))
class TestAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {
val properties: Properties = new Properties
properties.put("a", "b")
properties.put("c", "d")
properties.put("e", "f")
properties
}
override def changeBrokerConfig(brokerIds: Seq[Int], configChange: Properties): Unit = {
assertEquals("f", configChange.get("e"))
assertEquals(1, configChange.size())
}
}
val mockZkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
val mockBroker: Broker = EasyMock.createNiceMock(classOf[Broker])
EasyMock.expect(mockZkClient.getBroker(1)).andReturn(Option(mockBroker))
EasyMock.replay(mockZkClient)
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.alterConfigWithZk(mockZkClient, createOpts, new TestAdminZkClient(zkClient)))
}
@Test
def testScramCredentials(): Unit = {
def createOpts(user: String, config: String): ConfigCommandOptions = {
new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", user,
"--entity-type", "users",
"--alter",
"--add-config", config))
}
def deleteOpts(user: String, mechanism: String) = new ConfigCommandOptions(Array("--zookeeper", zkConnect,
"--entity-name", user,
"--entity-type", "users",
"--alter",
"--delete-config", mechanism))
val credentials = mutable.Map[String, Properties]()
case class CredentialChange(user: String, mechanisms: Set[String], iterations: Int) extends AdminZkClient(zkClient) {
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {
credentials.getOrElse(entityName, new Properties())
}
override def changeUserOrUserClientIdConfig(sanitizedEntityName: String, configChange: Properties): Unit = {
assertEquals(user, sanitizedEntityName)
assertEquals(mechanisms, configChange.keySet().asScala)
for (mechanism <- mechanisms) {
val value = configChange.getProperty(mechanism)
assertEquals(-1, value.indexOf("password="))
val scramCredential = ScramCredentialUtils.credentialFromString(value)
assertEquals(iterations, scramCredential.iterations)
if (configChange != null)
credentials.put(user, configChange)
}
}
}
val optsA = createOpts("userA", "SCRAM-SHA-256=[iterations=8192,password=abc, def]")
ConfigCommand.alterConfigWithZk(null, optsA, CredentialChange("userA", Set("SCRAM-SHA-256"), 8192))
val optsB = createOpts("userB", "SCRAM-SHA-256=[iterations=4096,password=abc, def],SCRAM-SHA-512=[password=1234=abc]")
ConfigCommand.alterConfigWithZk(null, optsB, CredentialChange("userB", Set("SCRAM-SHA-256", "SCRAM-SHA-512"), 4096))
val del256 = deleteOpts("userB", "SCRAM-SHA-256")
ConfigCommand.alterConfigWithZk(null, del256, CredentialChange("userB", Set("SCRAM-SHA-512"), 4096))
val del512 = deleteOpts("userB", "SCRAM-SHA-512")
ConfigCommand.alterConfigWithZk(null, del512, CredentialChange("userB", Set(), 4096))
}
@Test
def testQuotaConfigEntityUsingZookeeperNotAllowed(): Unit = {
assertThrows(classOf[IllegalArgumentException], () => doTestQuotaConfigEntity(zkConfig = true))
}
def doTestQuotaConfigEntity(zkConfig: Boolean): Unit = {
val connectOpts = if (zkConfig)
("--zookeeper", zkConnect)
else
("--bootstrap-server", "localhost:9092")
def createOpts(entityType: String, entityName: Option[String], otherArgs: Array[String]) : ConfigCommandOptions = {
val optArray = Array(connectOpts._1, connectOpts._2, "--entity-type", entityType)
val nameArray = entityName match {
case Some(name) => Array("--entity-name", name)
case None => Array[String]()
}
new ConfigCommandOptions(optArray ++ nameArray ++ otherArgs)
}
def checkEntity(entityType: String, entityName: Option[String], expectedEntityName: String, otherArgs: Array[String]): Unit = {
val opts = createOpts(entityType, entityName, otherArgs)
opts.checkArgs()
val entity = ConfigCommand.parseEntity(opts)
assertEquals(entityType, entity.root.entityType)
assertEquals(expectedEntityName, entity.fullSanitizedName)
}
def checkInvalidArgs(entityType: String, entityName: Option[String], otherArgs: Array[String]): Unit = {
val opts = createOpts(entityType, entityName, otherArgs)
assertThrows(classOf[IllegalArgumentException], () => opts.checkArgs())
}
def checkInvalidEntity(entityType: String, entityName: Option[String], otherArgs: Array[String]): Unit = {
val opts = createOpts(entityType, entityName, otherArgs)
opts.checkArgs()
assertThrows(classOf[IllegalArgumentException], () => ConfigCommand.parseEntity(opts))
}
val describeOpts = Array("--describe")
val alterOpts = Array("--alter", "--add-config", "a=b,c=d")
// <client-id> quota
val clientId = "client-1"
for (opts <- Seq(describeOpts, alterOpts)) {
checkEntity("clients", Some(clientId), clientId, opts)
checkEntity("clients", Some(""), ConfigEntityName.Default, opts)
}
checkEntity("clients", None, "", describeOpts)
checkInvalidArgs("clients", None, alterOpts)
// <user> quota
val principal = "CN=ConfigCommandTest,O=Apache,L=<default>"
val sanitizedPrincipal = Sanitizer.sanitize(principal)
assertEquals(-1, sanitizedPrincipal.indexOf('='))
assertEquals(principal, Sanitizer.desanitize(sanitizedPrincipal))
for (opts <- Seq(describeOpts, alterOpts)) {
checkEntity("users", Some(principal), sanitizedPrincipal, opts)
checkEntity("users", Some(""), ConfigEntityName.Default, opts)
}
checkEntity("users", None, "", describeOpts)
checkInvalidArgs("users", None, alterOpts)
// <user, client-id> quota
val userClient = sanitizedPrincipal + "/clients/" + clientId
def clientIdOpts(name: String) = Array("--entity-type", "clients", "--entity-name", name)
for (opts <- Seq(describeOpts, alterOpts)) {
checkEntity("users", Some(principal), userClient, opts ++ clientIdOpts(clientId))
checkEntity("users", Some(principal), sanitizedPrincipal + "/clients/" + ConfigEntityName.Default, opts ++ clientIdOpts(""))
checkEntity("users", Some(""), ConfigEntityName.Default + "/clients/" + clientId, describeOpts ++ clientIdOpts(clientId))
checkEntity("users", Some(""), ConfigEntityName.Default + "/clients/" + ConfigEntityName.Default, opts ++ clientIdOpts(""))
}
checkEntity("users", Some(principal), sanitizedPrincipal + "/clients", describeOpts ++ Array("--entity-type", "clients"))
// Both user and client-id must be provided for alter
checkInvalidEntity("users", Some(principal), alterOpts ++ Array("--entity-type", "clients"))
checkInvalidEntity("users", None, alterOpts ++ clientIdOpts(clientId))
checkInvalidArgs("users", None, alterOpts ++ Array("--entity-type", "clients"))
}
@Test
def testQuotaConfigEntity(): Unit = {
doTestQuotaConfigEntity(zkConfig = false)
}
@Test
def testUserClientQuotaOptsUsingZookeeperNotAllowed(): Unit = {
assertThrows(classOf[IllegalArgumentException], () => doTestUserClientQuotaOpts(zkConfig = true))
}
def doTestUserClientQuotaOpts(zkConfig: Boolean): Unit = {
val connectOpts = if (zkConfig)
("--zookeeper", zkConnect)
else
("--bootstrap-server", "localhost:9092")
def checkEntity(expectedEntityType: String, expectedEntityName: String, args: String*): Unit = {
val opts = new ConfigCommandOptions(Array(connectOpts._1, connectOpts._2) ++ args)
opts.checkArgs()
val entity = ConfigCommand.parseEntity(opts)
assertEquals(expectedEntityType, entity.root.entityType)
assertEquals(expectedEntityName, entity.fullSanitizedName)
}
// <default> is a valid user principal and client-id (can be handled with URL-encoding),
checkEntity("users", Sanitizer.sanitize("<default>"),
"--entity-type", "users", "--entity-name", "<default>",
"--alter", "--add-config", "a=b,c=d")
checkEntity("clients", Sanitizer.sanitize("<default>"),
"--entity-type", "clients", "--entity-name", "<default>",
"--alter", "--add-config", "a=b,c=d")
checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1",
"--entity-type", "users", "--entity-name", "CN=user1", "--entity-type", "clients", "--entity-name", "client1",
"--alter", "--add-config", "a=b,c=d")
checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1",
"--entity-name", "CN=user1", "--entity-type", "users", "--entity-name", "client1", "--entity-type", "clients",
"--alter", "--add-config", "a=b,c=d")
checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1",
"--entity-type", "clients", "--entity-name", "client1", "--entity-type", "users", "--entity-name", "CN=user1",
"--alter", "--add-config", "a=b,c=d")
checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/client1",
"--entity-name", "client1", "--entity-type", "clients", "--entity-name", "CN=user1", "--entity-type", "users",
"--alter", "--add-config", "a=b,c=d")
checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients",
"--entity-type", "clients", "--entity-name", "CN=user1", "--entity-type", "users",
"--describe")
checkEntity("users", "/clients",
"--entity-type", "clients", "--entity-type", "users",
"--describe")
checkEntity("users", Sanitizer.sanitize("CN=user1") + "/clients/" + Sanitizer.sanitize("client1?@%"),
"--entity-name", "client1?@%", "--entity-type", "clients", "--entity-name", "CN=user1", "--entity-type", "users",
"--alter", "--add-config", "a=b,c=d")
}
@Test
def testUserClientQuotaOpts(): Unit = {
doTestUserClientQuotaOpts(zkConfig = false)
}
@Test
def testQuotaDescribeEntities(): Unit = {
val zkClient: KafkaZkClient = EasyMock.createNiceMock(classOf[KafkaZkClient])
def checkEntities(opts: Array[String], expectedFetches: Map[String, Seq[String]], expectedEntityNames: Seq[String]): Unit = {
val entity = ConfigCommand.parseEntity(new ConfigCommandOptions(opts :+ "--describe"))
expectedFetches.foreach {
case (name, values) => EasyMock.expect(zkClient.getAllEntitiesWithConfig(name)).andReturn(values)
}
EasyMock.replay(zkClient)
val entities = entity.getAllEntities(zkClient)
assertEquals(expectedEntityNames, entities.map(e => e.fullSanitizedName))
EasyMock.reset(zkClient)
}
val clientId = "a-client"
val principal = "CN=ConfigCommandTest.testQuotaDescribeEntities , O=Apache, L=<default>"
val sanitizedPrincipal = Sanitizer.sanitize(principal)
val userClient = sanitizedPrincipal + "/clients/" + clientId
var opts = Array("--entity-type", "clients", "--entity-name", clientId)
checkEntities(opts, Map.empty, Seq(clientId))
opts = Array("--entity-type", "clients", "--entity-default")
checkEntities(opts, Map.empty, Seq("<default>"))
opts = Array("--entity-type", "clients")
checkEntities(opts, Map("clients" -> Seq(clientId)), Seq(clientId))
opts = Array("--entity-type", "users", "--entity-name", principal)
checkEntities(opts, Map.empty, Seq(sanitizedPrincipal))
opts = Array("--entity-type", "users", "--entity-default")
checkEntities(opts, Map.empty, Seq("<default>"))
opts = Array("--entity-type", "users")
checkEntities(opts, Map("users" -> Seq("<default>", sanitizedPrincipal)), Seq("<default>", sanitizedPrincipal))
opts = Array("--entity-type", "users", "--entity-name", principal, "--entity-type", "clients", "--entity-name", clientId)
checkEntities(opts, Map.empty, Seq(userClient))
opts = Array("--entity-type", "users", "--entity-name", principal, "--entity-type", "clients", "--entity-default")
checkEntities(opts, Map.empty, Seq(sanitizedPrincipal + "/clients/<default>"))
opts = Array("--entity-type", "users", "--entity-name", principal, "--entity-type", "clients")
checkEntities(opts,
Map("users/" + sanitizedPrincipal + "/clients" -> Seq("client-4")),
Seq(sanitizedPrincipal + "/clients/client-4"))
opts = Array("--entity-type", "users", "--entity-default", "--entity-type", "clients")
checkEntities(opts,
Map("users/<default>/clients" -> Seq("client-5")),
Seq("<default>/clients/client-5"))
opts = Array("--entity-type", "users", "--entity-type", "clients")
val userMap = Map("users/" + sanitizedPrincipal + "/clients" -> Seq("client-2"))
val defaultUserMap = Map("users/<default>/clients" -> Seq("client-3"))
checkEntities(opts,
Map("users" -> Seq("<default>", sanitizedPrincipal)) ++ defaultUserMap ++ userMap,
Seq("<default>/clients/client-3", sanitizedPrincipal + "/clients/client-2"))
}
private def registerBrokerInZk(id: Int): Unit = {
zkClient.createTopLevelPaths()
val securityProtocol = SecurityProtocol.PLAINTEXT
val endpoint = new EndPoint("localhost", 9092, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)
val brokerInfo = BrokerInfo(Broker(id, Seq(endpoint), rack = None), ApiVersion.latestVersion, jmxPort = 9192)
zkClient.registerBroker(brokerInfo)
}
class DummyAdminZkClient(zkClient: KafkaZkClient) extends AdminZkClient(zkClient) {
override def changeBrokerConfig(brokerIds: Seq[Int], configs: Properties): Unit = {}
override def fetchEntityConfig(entityType: String, entityName: String): Properties = {new Properties}
override def changeClientIdConfig(clientId: String, configs: Properties): Unit = {}
override def changeUserOrUserClientIdConfig(sanitizedEntityName: String, configs: Properties): Unit = {}
override def changeTopicConfig(topic: String, configs: Properties): Unit = {}
}
class DummyAdminClient(node: Node) extends MockAdminClient(util.Collections.singletonList(node), node) {
override def describeConfigs(resources: util.Collection[ConfigResource], options: DescribeConfigsOptions): DescribeConfigsResult =
EasyMock.createNiceMock(classOf[DescribeConfigsResult])
override def incrementalAlterConfigs(configs: util.Map[ConfigResource, util.Collection[AlterConfigOp]],
options: AlterConfigsOptions): AlterConfigsResult = EasyMock.createNiceMock(classOf[AlterConfigsResult])
override def alterConfigs(configs: util.Map[ConfigResource, Config], options: AlterConfigsOptions): AlterConfigsResult =
EasyMock.createNiceMock(classOf[AlterConfigsResult])
override def describeClientQuotas(filter: ClientQuotaFilter, options: DescribeClientQuotasOptions): DescribeClientQuotasResult =
EasyMock.createNiceMock(classOf[DescribeClientQuotasResult])
override def alterClientQuotas(entries: util.Collection[ClientQuotaAlteration],
options: AlterClientQuotasOptions): AlterClientQuotasResult =
EasyMock.createNiceMock(classOf[AlterClientQuotasResult])
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/admin/ConfigCommandTest.scala | Scala | apache-2.0 | 80,618 |
package org.jetbrains.plugins.scala.project
import com.intellij.util.net.HttpConfigurable
import org.jetbrains.plugins.scala.buildinfo.BuildInfo
import org.jetbrains.plugins.scala.project.Platform.{Dotty, Scala}
import org.jetbrains.sbt.Sbt
import scala.io.Source
import scala.util.Try
import scala.util.matching.Regex
/**
* @author Pavel Fatin
*/
object Versions {
val DefaultScalaVersion: Version = Entity.Scala.defaultVersion
val DefaultDottyVersion: Version = Entity.Dotty.defaultVersion
val DefaultSbtVersion: Version = Entity.Sbt1.defaultVersion
def loadScalaVersions(platform: Platform): Array[String] = platform match {
case Scala => loadVersionsOf(Entity.Scala)
case Dotty => loadVersionsOf(Entity.Dotty)
}
def loadSbtVersions: Array[String] = loadVersionsOf(Entity.Sbt013, Entity.Sbt1)
private def loadVersionsOf(entities: Entity*): Array[String] = {
val allVersions = entities.flatMap { entity =>
val loaded = loadVersionsFrom(entity.url, {
case entity.pattern(number) => number
})
loaded
.getOrElse(entity.hardcodedVersions)
.filter(_ >= entity.minVersion)
}
allVersions
.sortWith(_ >= _)
.map(_.presentation)
.toArray
}
private def loadVersionsFrom(url: String, filter: PartialFunction[String, String]): Try[Seq[Version]] = {
loadLinesFrom(url).map { lines => lines.collect(filter).map(Version.apply) }
}
def loadLinesFrom(url: String): Try[Seq[String]] = {
Try(HttpConfigurable.getInstance().openHttpConnection(url)).map { connection =>
try {
Source.fromInputStream(connection.getInputStream).getLines().toVector
} finally {
connection.disconnect()
}
}
}
private case class Entity(url: String, pattern: Regex, minVersion: Version, hardcodedVersions: Seq[Version]) {
def defaultVersion: Version = hardcodedVersions.last
}
private object Entity {
val Scala = Entity("http://repo1.maven.org/maven2/org/scala-lang/scala-compiler/",
".+>(\\d+\\.\\d+\\.\\d+)/<.*".r,
Version("2.10.0"),
Seq("2.10.7", "2.11.12", BuildInfo.scalaVersion).map(Version.apply))
val Sbt013 = Entity("https://dl.bintray.com/typesafe/ivy-releases/org.scala-sbt/sbt-launch/",
".+>(\\d+\\.\\d+\\.\\d+)/<.*".r,
Version("0.13.5"),
Seq(Sbt.Latest_0_13))
val Sbt1 = Entity("https://dl.bintray.com/sbt/maven-releases/org/scala-sbt/sbt-launch/",
".+>(\\d+\\.\\d+\\.\\d+)/<.*".r,
Version("1.0.0"),
Seq(Sbt.Latest_1_0, Sbt.LatestVersion).distinct)
val Dotty = Entity("https://repo1.maven.org/maven2/ch/epfl/lamp/dotty_0.2/",
""".+>(\d+.\d+.+)/<.*""".r,
Version("0.2.0"),
Seq(Version("0.2.0")))
}
} | jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/project/Versions.scala | Scala | apache-2.0 | 2,739 |
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.server.actors
import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import org.scalatest.{FunSuiteLike, Matchers}
import org.scassandra.codec._
class OptionsHandlerTest extends TestKit(ActorSystem("TestSystem")) with ProtocolActorTest with FunSuiteLike
with ImplicitSender with Matchers {
test("Should send supported message on any Options message") {
val message = protocolMessage(Options)
val underTest = TestActorRef(new OptionsHandler)
underTest ! message
expectMsgPF() {
case ProtocolResponse(_, Supported(_)) => true
}
}
}
| mikefero/cpp-driver | gtests/src/integration/scassandra/server/server/src/test/scala/org/scassandra/server/actors/OptionsHandlerTest.scala | Scala | apache-2.0 | 1,244 |
package filodb.core.memstore
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import filodb.core._
import filodb.core.binaryrecord2.RecordBuilder
import filodb.memory._
import filodb.memory.format.UnsafeUtils
class PartitionSetSpec extends MemFactoryCleanupTest with ScalaFutures {
import MachineMetricsData._
import TimeSeriesPartitionSpec._
implicit override val patienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(50, Millis))
val config = ConfigFactory.load("application_test.conf").getConfig("filodb")
val chunkRetentionHours = 72
var part: TimeSeriesPartition = null
val reclaimer = new ReclaimListener {
def onReclaim(metaAddr: Long, numBytes: Int): Unit = {
assert(numBytes == dataset2.blockMetaSize)
val partID = UnsafeUtils.getInt(metaAddr)
val chunkID = UnsafeUtils.getLong(metaAddr + 4)
part.removeChunksAt(chunkID)
}
}
private val blockStore = new PageAlignedBlockManager(100 * 1024 * 1024,
new MemoryStats(Map("test"-> "test")), reclaimer, 1)
protected val bufferPool = new WriteBufferPool(memFactory, dataset2, TestData.storeConf)
private val ingestBlockHolder = new BlockMemFactory(blockStore, None, dataset2.blockMetaSize, true)
val builder = new RecordBuilder(memFactory, dataset2.ingestionSchema)
val partSet = PartitionSet.empty(dataset2.ingestionSchema, dataset2.comparator)
before {
partSet.clear()
}
val tenRecords = withMap(linearMultiSeries(), extraTags=extraTags).take(10)
addToBuilder(builder, tenRecords)
val ingestRecordAddrs = builder.allContainers.head.allOffsets
// println(s"XXX container base = ${builder.allContainers.head.base}")
// println(s"ingestRecordAddrs=$ingestRecordAddrs")
// println(s"\n---\n${ingestRecordAddrs.foreach(a => println(dataset2.ingestionSchema.stringify(a)))}")
val partKeyBuilder = new RecordBuilder(memFactory, dataset2.partKeySchema)
ingestRecordAddrs.foreach { addr =>
dataset2.comparator.buildPartKeyFromIngest(null, addr, partKeyBuilder)
}
val partKeyAddrs = partKeyBuilder.allContainers.head.allOffsets
// println(s"partKeyAddrs=$partKeyAddrs")
// println(s"\n---\n${partKeyAddrs.foreach(a => println(dataset2.partKeySchema.stringify(a)))}")
it("+=/add should add TSPartitions only if its not already part of the set") {
partSet.size shouldEqual 0
partSet.isEmpty shouldEqual true
val part = makePart(0, dataset2, partKeyAddrs(0), bufferPool)
partSet += part
partSet.size shouldEqual 1
partSet.isEmpty shouldEqual false
partSet(part) shouldEqual true
// Now adding it again should not succeed
partSet.add(part) shouldEqual false
partSet.size shouldEqual 1
partSet.isEmpty shouldEqual false
partSet(part) shouldEqual true
}
it("should get existing TSPartitions with getOrAddWithIngestBR") {
val part = makePart(0, dataset2, partKeyAddrs(0), bufferPool)
partSet += part
partSet.size shouldEqual 1
val got = partSet.getOrAddWithIngestBR(null, ingestRecordAddrs(0), { throw new RuntimeException("error")} )
got shouldEqual part
partSet.size shouldEqual 1
}
it("should add new TSPartition if one doesnt exist with getOrAddWithIngestBR") {
partSet.isEmpty shouldEqual true
partSet.getWithPartKeyBR(null, partKeyAddrs(0)) shouldEqual None
partSet.getWithIngestBR(null, ingestRecordAddrs(0)) shouldEqual null
val part = makePart(0, dataset2, partKeyAddrs(0), bufferPool)
val got = partSet.getOrAddWithIngestBR(null, ingestRecordAddrs(0), part)
partSet.size shouldEqual 1
partSet.isEmpty shouldEqual false
got shouldEqual part
partSet.getWithPartKeyBR(null, partKeyAddrs(0)) shouldEqual Some(part)
partSet.getWithIngestBR(null, ingestRecordAddrs(0)) shouldEqual part
}
it("should not add new TSPartition if function returns null") {
partSet.isEmpty shouldEqual true
partSet.getWithPartKeyBR(null, partKeyAddrs(0)) shouldEqual None
val got = partSet.getOrAddWithIngestBR(null, ingestRecordAddrs(0), null)
got shouldEqual null
partSet.isEmpty shouldEqual true
partSet.getWithPartKeyBR(null, partKeyAddrs(0)) shouldEqual None
}
it("should remove TSPartitions correctly") {
val part = makePart(0, dataset2, partKeyAddrs(0), bufferPool)
partSet += part
partSet.size shouldEqual 1
partSet.remove(part)
partSet.size shouldEqual 0
partSet.isEmpty shouldEqual true
}
} | velvia/FiloDB | core/src/test/scala/filodb.core/memstore/PartitionSetSpec.scala | Scala | apache-2.0 | 4,530 |
/*
* Copyright (c) 2016 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.regex
import scala.annotation.tailrec
package object tdfa {
type State = Int
implicit class RichSeq[T](val seq: Seq[T]) extends AnyVal {
def foldCut[U](zero: U)(f: (U, T) => (U, Boolean)): U = {
@tailrec
def loop(seq: Seq[T], acc: U): U =
if (seq.isEmpty) {
acc
} else {
val elt = seq.head
val (acc1, cont) = f(acc, elt)
if (cont) {
loop(seq.tail, acc1)
} else {
acc1
}
}
loop(seq, zero)
}
}
implicit class RichSet[T](val set: Set[T]) extends AnyVal {
def foldCut[U](zero: U)(f: (U, T) => (U, Boolean)): U = {
@tailrec
def loop(set: Set[T], acc: U): U =
if (set.isEmpty) {
acc
} else {
val elt = set.head
val (acc1, cont) = f(acc, elt)
if (cont) {
val rest = set - elt
loop(rest, acc1)
} else {
acc1
}
}
loop(set, zero)
}
def foldIfAll[U](zero: U)(f: (U, T) => Option[U]): Option[U] = {
@tailrec
def loop(set: Set[T], acc: U): Option[U] =
if (set.isEmpty) {
Some(acc)
} else {
val elt = set.head
f(acc, elt) match {
case Some(acc) =>
val rest = set - elt
loop(rest, acc)
case None =>
None
}
}
loop(set, zero)
}
}
}
| gnieh/tekstlib | src/main/scala/gnieh/regex/tdfa/package.scala | Scala | apache-2.0 | 2,069 |
package com.ponkotuy.queries
case class LoginEmail(email: String, password: String)
| ponkotuy/aggregate-exif | app/com/ponkotuy/queries/LoginEmail.scala | Scala | apache-2.0 | 85 |
package io.hydrosphere.mist.master.interfaces.http
import java.lang.management._
import java.time.LocalDateTime
import io.hydrosphere.mist.core.FunctionInfoData
import io.hydrosphere.mist.master.models.{ContextConfig, RunMode}
import mist.api._
import scala.concurrent.duration.Duration
case class HttpJobInfo(
name: String,
execute: Option[Map[String, HttpJobArg]] = None,
serve: Option[Map[String, HttpJobArg]] = None,
isHiveJob: Boolean = false,
isSqlJob: Boolean = false,
isStreamingJob: Boolean = false,
isMLJob: Boolean = false,
isPython: Boolean = false
)
object HttpJobInfo {
def forPython(name: String) = HttpJobInfo(name = name, isPython = true)
def convert(info: FunctionInfoData): HttpJobInfo = {
val argsMap = info.execute
.collect { case u: UserInputArgument => u }
.map { a => a.name -> HttpJobArg.convert(a.t) }
.toMap
val jobInfo = HttpJobInfo(
name = info.name,
isPython = info.lang == FunctionInfoData.PythonLang
)
if (info.isServe)
jobInfo.copy(serve = Some(argsMap))
else jobInfo.copy(execute = Some(argsMap))
}
}
case class HttpJobArg(
`type`: String,
args: Option[Seq[HttpJobArg]],
fields: Option[Map[String, HttpJobArg]] = None
)
object HttpJobArg {
import cats.syntax.option._
def plain(`type`: String): HttpJobArg =
HttpJobArg(`type`, None, None)
def withTypeArgs(`type`: String, args: Seq[HttpJobArg]): HttpJobArg =
HttpJobArg(`type`, args.some)
def complex(`type`: String, fields: Map[String, HttpJobArg]): HttpJobArg =
HttpJobArg(`type`, None, fields.some)
def convert(argType: ArgType): HttpJobArg = {
val t = argType.getClass.getSimpleName.replace("$", "")
argType match {
case x@(MBoolean | MInt | MDouble | MString | MAny) => plain(t)
case x: MMap => withTypeArgs(t, Seq(x.k, x.v).map(convert))
case x: MList => withTypeArgs(t, Seq(convert(x.v)))
case x: MOption => withTypeArgs(t, Seq(convert(x.v)))
case x: MObj => complex(t, x.fields.map({ case (k, v) => k -> convert(v) }).toMap)
}
}
}
case class HttpFunctionInfoV2(
name: String,
lang: String,
execute: Map[String, HttpJobArg] = Map.empty,
tags: Seq[String] = Seq.empty,
path: String,
className: String,
defaultContext: String
)
object HttpFunctionInfoV2 {
def convert(info: FunctionInfoData): HttpFunctionInfoV2 = {
HttpFunctionInfoV2(
name = info.name,
path = info.path,
className = info.className,
tags = info.tags,
defaultContext = info.defaultContext,
execute = info.execute
.map(a => a.name -> HttpJobArg.convert(a.t))
.toMap,
lang = info.lang
)
}
}
case class EndpointCreateRequest(
name: String,
path: String,
className: String,
nameSpace: String
)
case class ContextCreateRequest(
name: String,
sparkConf: Option[Map[String, String]] = None,
downtime: Option[Duration] = None,
maxJobs: Option[Int] = None,
precreated: Option[Boolean] = None,
workerMode: Option[RunMode] = None,
runOptions: Option[String] = None,
streamingDuration: Option[Duration] = None,
maxConnFailures: Option[Int] = None
) {
def toContextWithFallback(other: ContextConfig): ContextConfig =
ContextConfig(
name,
sparkConf.getOrElse(other.sparkConf),
downtime.getOrElse(other.downtime),
maxJobs.getOrElse(other.maxJobs),
precreated.getOrElse(other.precreated),
runOptions.getOrElse(other.runOptions),
workerMode.getOrElse(other.workerMode),
streamingDuration.getOrElse(other.streamingDuration),
maxConnFailures.getOrElse(other.maxConnFailures)
)
}
object ContextCreateRequest {
val AvailableRunMode = Set("shared", "exclusive")
}
case class MistStatus(
mistVersion: String,
sparkVersion: String,
started: LocalDateTime,
gc: Map[String, GCMetrics],
memory: HeapMetrics,
threads: ThreadMetrics,
javaVersion: JavaVersionInfo
)
object MistStatus {
import io.hydrosphere.mist.BuildInfo
import scala.collection.JavaConverters._
val Started = LocalDateTime.now()
val SparkVersion = BuildInfo.sparkVersion
def create: MistStatus = {
val beans = ManagementFactory.getGarbageCollectorMXBeans.asScala
val memoryMXBean = ManagementFactory.getMemoryMXBean
val threadMXBean = ManagementFactory.getThreadMXBean
val gCMetrics = beans.map(gc => s"${gc.getName}" -> GCMetrics.create(gc)).toMap
MistStatus(
BuildInfo.version,
SparkVersion,
Started,
gCMetrics,
HeapMetrics.create(memoryMXBean),
ThreadMetrics.create(threadMXBean),
JavaVersionInfo.create
)
}
}
case class GCMetrics(collectionCount: Long, collectionTimeInSec: Long)
object GCMetrics {
def create(gc: GarbageCollectorMXBean): GCMetrics = GCMetrics(
gc.getCollectionCount, gc.getCollectionTime / 1000
)
}
case class Heap(used: Long, commited: Long, max: Long, init: Long)
object Heap {
val BytesPerMegabyte = 1024 * 1024
def create(memory: MemoryUsage): Heap = {
Heap(
memory.getUsed / BytesPerMegabyte,
memory.getCommitted / BytesPerMegabyte,
memory.getMax / BytesPerMegabyte,
memory.getInit / BytesPerMegabyte
)
}
}
case class HeapMetrics(heap: Heap, nonHeap: Heap)
object HeapMetrics {
def create(memoryMXBean: MemoryMXBean): HeapMetrics = {
HeapMetrics(
Heap.create(memoryMXBean.getHeapMemoryUsage),
Heap.create(memoryMXBean.getNonHeapMemoryUsage)
)
}
}
case class ThreadMetrics(
count: Long,
daemon: Long,
peak: Long,
startedTotal: Long,
deadlocked: Option[Long],
deadlockedMonitor: Option[Long]
)
object ThreadMetrics {
def create(threadMXBean: ThreadMXBean): ThreadMetrics = {
ThreadMetrics(
threadMXBean.getThreadCount.toLong,
threadMXBean.getDaemonThreadCount.toLong,
threadMXBean.getPeakThreadCount.toLong,
threadMXBean.getTotalStartedThreadCount,
Option(threadMXBean.findDeadlockedThreads()).map(_.length.toLong),
Option(threadMXBean.findMonitorDeadlockedThreads()).map(_.length.toLong)
)
}
}
case class JavaVersionInfo(runtimeVersion: String, vmVendor: String)
object JavaVersionInfo {
def create: JavaVersionInfo = {
JavaVersionInfo(
System.getProperty("java.runtime.version", "unknown"),
System.getProperty("java.vm.vendor", "unknown")
)
}
}
| Hydrospheredata/mist | mist/master/src/main/scala/io/hydrosphere/mist/master/interfaces/http/models.scala | Scala | apache-2.0 | 6,389 |
package text.kanji
/**
* @author ynupc
* Created on 2016/07/26
*/
object JISLevel4KanjiCharacter extends KanjiCharacter {
override val kanji: Seq[String] = readKanjiCSV("jis_level_4")
} | ynupc/scalastringcourseday7 | src/main/scala/text/kanji/JISLevel4KanjiCharacter.scala | Scala | apache-2.0 | 202 |
package mr.merc.economics
import mr.merc.politics.{Party, PopulationElectionReport, RegionElectionReport, State, StateElectionReport}
import org.scalatest.funsuite.AnyFunSuite
import Party._
class PoliticalSystemTest extends AnyFunSuite {
private val state = new State("", Culture.LatinHuman, 0, Party.Absolute, 0)
private def electionResults(map:Map[Party, Double]) = StateElectionReport(List(RegionElectionReport(null,
List(PopulationElectionReport(null, map)))))
test("findCoalition") {
val politicalSystem = new PoliticalSystem(Conservative, state, 0)
val coalition1 = politicalSystem.findCoalition(Map(Aristocratic -> 0.51, Benevolent -> 0.4, Capitalistic -> 0.09))
assert(coalition1 === Set(Aristocratic))
val coalition2 = politicalSystem.findCoalition(Map(Aristocratic -> 0.2, Benevolent -> 0.4, SocialDemocratic -> 0.4))
assert(coalition2 === Set(Benevolent, Aristocratic))
}
test("applyElectionResults") {
val politicalSystem = new PoliticalSystem(Conservative, state, 0)
politicalSystem.applyElectionResults(electionResults(Map(
Manufactorers -> 300,
Capitalistic -> 400,
Theocratic -> 300
)), 0)
assert(politicalSystem.rulingParty === Capitalistic)
assert(politicalSystem.parliament === Some(ParliamentParties(Map(
Manufactorers -> 0.3, Capitalistic -> 0.4, Theocratic -> 0.3),
Set(Manufactorers, Capitalistic))))
}
test("changeAbsoluteRulingParty") {
val politicalSystem = new PoliticalSystem(Absolute, state, 0)
politicalSystem.changeAbsoluteRulingParty(Benevolent)
assert(politicalSystem.rulingParty === Benevolent)
assert(politicalSystem.parliament === None)
intercept[RuntimeException] {
val ps2 = new PoliticalSystem(Conservative, state, 0)
ps2.changeAbsoluteRulingParty(Benevolent)
}
intercept[RuntimeException] {
politicalSystem.changeAbsoluteRulingParty(Capitalistic)
}
}
test("usurpPower to absolute") {
val politicalSystem = new PoliticalSystem(Capitalistic, state, 0)
politicalSystem.usurpPower(Benevolent, 0)
assert(politicalSystem.parliament === None)
assert(politicalSystem.rulingParty === Benevolent)
val ps2 = new PoliticalSystem(Capitalistic, state, 0)
intercept[RuntimeException] {
ps2.usurpPower(Magocratic, 0)
}
}
test("usurpPower to constitutional") {
val politicalSystem = new PoliticalSystem(Conservative, state, 0)
politicalSystem.usurpPower(Magocratic, 0)
assert(politicalSystem.parliament.isDefined === true)
assert(politicalSystem.parliament.get.coalition === Set(Magocratic))
assert(politicalSystem.parliament.get.parties(Magocratic) > 0.5)
assert(politicalSystem.rulingParty === Magocratic)
intercept[RuntimeException] {
val ps2 = new PoliticalSystem(Conservative, state, 0)
ps2.usurpPower(Absolute, 0)
}
}
test("giveUpPower to constitutional") {
val politicalSystem = new PoliticalSystem(Benevolent, state, 0)
politicalSystem.giveUpPower(Magocratic, 2)
assert(politicalSystem.parliament.isDefined === true)
assert(politicalSystem.parliament.get.coalition === Set(Magocratic))
assert(politicalSystem.parliament.get.parties ===Map(Magocratic -> 1d))
assert(politicalSystem.rulingParty === Magocratic)
intercept[RuntimeException] {
val ps2 = new PoliticalSystem(Benevolent, state, 0)
ps2.giveUpPower(Conservative, 2)
}
}
test("giveUpPower to democracy") {
val politicalSystem = new PoliticalSystem(Magocratic, state, 0)
politicalSystem.giveUpPower(Conservative, 2)
assert(politicalSystem.parliament.isDefined === true)
assert(politicalSystem.parliament.get.coalition === Set(Conservative))
assert(politicalSystem.parliament.get.parties(Conservative) > 0.5)
assert(politicalSystem.rulingParty === Conservative)
intercept[RuntimeException] {
val ps2 = new PoliticalSystem(Magocratic, state, 0)
ps2.giveUpPower(Benevolent,2)
}
}
test("new political system") {
val ps = new PoliticalSystem(Absolute, state, 0)
assert(ps.parliament === None)
assert(ps.rulingParty === Absolute)
val ps2 = new PoliticalSystem(Magocratic, state, 0)
assert(ps2.parliament === Some(ParliamentParties(Map(Magocratic -> 1d), Set(Magocratic))))
}
}
| RenualdMarch/merc | src/test/scala/mr/merc/economics/PoliticalSystemTest.scala | Scala | gpl-3.0 | 4,335 |
// scalastyle:off
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api._
import com.mohiva.play.silhouette.api.Silhouette
import com.mohiva.play.silhouette.api.exceptions.ProviderException
import com.mohiva.play.silhouette.impl.providers.CredentialsProvider
import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository
import com.mohiva.play.silhouette.api.services.AvatarService
import com.mohiva.play.silhouette.api.util.{Credentials, PasswordInfo, PasswordHasher}
import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator
import controllers.PasswordChangeController.ChangeInfo
import play.api.data.Form
import play.api.data.Forms._
import play.api.data.validation.Constraints._
import play.api.i18n.{ MessagesApi, Messages }
import play.api.mvc._
import play.api.{Logger}
import play.api.libs.concurrent.Execution.Implicits._
import utils.{MailService, Mailer}
import scala.language.postfixOps
import scala.concurrent.Future
import scala.reflect.ClassTag
import models.{TokenUser, User}
import models.services.{TokenService, UserService}
/**
* A controller to provide password change functionality
*/
class PasswordChangeController @Inject() (
val messagesApi: MessagesApi,
val env: Environment[User, CookieAuthenticator],
userService: UserService,
authInfoRepository: AuthInfoRepository,
credentialsProvider: CredentialsProvider,
avatarService: AvatarService,
passwordHasher: PasswordHasher,
tokenService: TokenService[TokenUser],
mailService: MailService)
extends Silhouette[User, CookieAuthenticator] {
val providerId = CredentialsProvider.ID
val Email = "email"
val passwordValidation = nonEmptyText(minLength = 6)
/*
* PASSWORD RESET - When user has forgotten their password and can't login
*/
val pwResetForm = Form[String] (
Email -> email.verifying( nonEmpty )
)
val passwordsForm = Form( tuple(
"password1" -> passwordValidation,
"password2" -> nonEmptyText,
"token" -> nonEmptyText
) verifying(Messages("passwords.not.equal"), passwords => passwords._2 == passwords._1 ))
private def notFoundDefault (implicit request: RequestHeader) =
Future.successful(NotFound(views.html.auth.invalidToken()))
def startResetPassword = Action.async { implicit request =>
Future.successful(Ok(views.html.auth.startResetPassword(pwResetForm)))
}
def handleStartResetPassword = Action.async { implicit request =>
pwResetForm.bindFromRequest.fold (
errors => Future.successful(BadRequest(views.html.auth.startResetPassword(errors))),
email => {
authInfoRepository.find(LoginInfo(CredentialsProvider.ID,email))(ClassTag(classOf[PasswordInfo])).map {
case Some(user) => {
val token = TokenUser(email)
tokenService.create(token)
Mailer.forgotPassword(email, link = routes.PasswordChangeController.specifyResetPassword(token.id).absoluteURL())(mailService)
}
case None => {
Mailer.forgotPasswordUnknowAddress(email)(mailService)
}
}
Future.successful(Ok(views.html.auth.sentResetPassword(email)))
}
)
}
/**
* Confirms the user's link based on the token and shows them a form to reset the password
*/
def specifyResetPassword (tokenId: String) = Action.async { implicit request =>
tokenService.retrieve(tokenId).flatMap {
case Some(token) if (!token.isSignUp && !token.isExpired) => {
Future.successful(Ok(views.html.auth.specifyResetPassword(tokenId, passwordsForm)))
}
case Some(token) => {
tokenService.consume(tokenId)
notFoundDefault
}
case None => {
notFoundDefault
}
}
}
/**
* Saves the new password and authenticates the user
*/
def handleResetPassword = Action.async { implicit request =>
passwordsForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.auth.specifyResetPassword(formWithErrors.data("token"), formWithErrors))),
passwords => {
val tokenId = passwords._3
tokenService.retrieve(tokenId).flatMap {
case Some(token) if (!token.isSignUp && !token.isExpired) => {
val loginInfo = LoginInfo(CredentialsProvider.ID, token.email)
userService.retrieve(loginInfo).flatMap {
case Some(user) => {
val authInfo = passwordHasher.hash(passwords._1)
authInfoRepository.save(loginInfo, authInfo)
env.authenticatorService.create(user.loginInfo).flatMap { authenticator =>
env.eventBus.publish(LoginEvent(user, request, request2Messages))
tokenService.consume(tokenId)
env.authenticatorService.init(authenticator)
Future.successful(Ok(views.html.auth.confirmResetPassword(user)))
}
}
case None => Future.failed(new RuntimeException("Couldn't find user"))
}
}
case Some(token) => {
tokenService.consume(tokenId)
notFoundDefault
}
case None => {
notFoundDefault
}
}
}
)
}
/*
* CHANGE PASSWORD - Can only be done whilst user is logged in
*/
val changePasswordForm = Form[ChangeInfo](
mapping(
"currentPassword" -> nonEmptyText,
"newPassword" -> tuple(
"password1" -> passwordValidation,
"password2" -> nonEmptyText
).verifying( Messages("passwords.not.equal"), newPassword => newPassword._2 == newPassword._1 )
)
((currentPassword, newPassword) => ChangeInfo(currentPassword, newPassword._1)) //apply
(data => Some((data.currentPassword, (data.newPassword, data.newPassword)))) //unapply
)
def startChangePassword = SecuredAction.async { implicit request =>
Future.successful(Ok(views.html.auth.changePassword(request.identity,changePasswordForm)))
}
/**
* Saves the new password and authenticates the user
*/
def handleChangePassword = SecuredAction.async { implicit request =>
changePasswordForm.bindFromRequest.fold(
formWithErrors => Future.successful(BadRequest(views.html.auth.changePassword(request.identity,formWithErrors))),
changeInfo => {
val user = request.identity
credentialsProvider.authenticate( Credentials(user.email.getOrElse(""), changeInfo.currentPassword) ).flatMap { loginInfo =>
authInfoRepository.save(loginInfo, passwordHasher.hash(changeInfo.newPassword))
env.authenticatorService.create(user.loginInfo).flatMap { authenticator =>
env.eventBus.publish(LoginEvent(user, request, request2Messages))
env.authenticatorService.init(authenticator)
Future.successful(Ok(views.html.auth.confirmResetPassword(user)))
}
}.recover {
case e: ProviderException =>
BadRequest(views.html.auth.changePassword(request.identity,changePasswordForm.withError("currentPassword","Does not match current password!")))
}
}
)
}
}
object PasswordChangeController {
case class ChangeInfo(currentPassword: String, newPassword: String)
}
// scalastyle:on | glidester/play-silhouette-seed | app/controllers/PasswordChangeController.scala | Scala | apache-2.0 | 7,284 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import scala.annotation.meta.param
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import scala.language.reflectiveCalls
import scala.util.control.NonFatal
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._
import org.apache.spark._
import org.apache.spark.broadcast.BroadcastManager
import org.apache.spark.rdd.RDD
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.shuffle.{FetchFailedException, MetadataFetchFailedException}
import org.apache.spark.storage.{BlockId, BlockManagerId, BlockManagerMaster}
import org.apache.spark.util.{AccumulatorContext, AccumulatorV2, CallSite, LongAccumulator, Utils}
class DAGSchedulerEventProcessLoopTester(dagScheduler: DAGScheduler)
extends DAGSchedulerEventProcessLoop(dagScheduler) {
override def post(event: DAGSchedulerEvent): Unit = {
try {
// Forward event to `onReceive` directly to avoid processing event asynchronously.
onReceive(event)
} catch {
case NonFatal(e) => onError(e)
}
}
override def onError(e: Throwable): Unit = {
logError("Error in DAGSchedulerEventLoop: ", e)
dagScheduler.stop()
throw e
}
}
/**
* An RDD for passing to DAGScheduler. These RDDs will use the dependencies and
* preferredLocations (if any) that are passed to them. They are deliberately not executable
* so we can test that DAGScheduler does not try to execute RDDs locally.
*
* Optionally, one can pass in a list of locations to use as preferred locations for each task,
* and a MapOutputTrackerMaster to enable reduce task locality. We pass the tracker separately
* because, in this test suite, it won't be the same as sc.env.mapOutputTracker.
*/
class MyRDD(
sc: SparkContext,
numPartitions: Int,
dependencies: List[Dependency[_]],
locations: Seq[Seq[String]] = Nil,
@(transient @param) tracker: MapOutputTrackerMaster = null)
extends RDD[(Int, Int)](sc, dependencies) with Serializable {
override def compute(split: Partition, context: TaskContext): Iterator[(Int, Int)] =
throw new RuntimeException("should not be reached")
override def getPartitions: Array[Partition] = (0 until numPartitions).map(i => new Partition {
override def index: Int = i
}).toArray
override def getPreferredLocations(partition: Partition): Seq[String] = {
if (locations.isDefinedAt(partition.index)) {
locations(partition.index)
} else if (tracker != null && dependencies.size == 1 &&
dependencies(0).isInstanceOf[ShuffleDependency[_, _, _]]) {
// If we have only one shuffle dependency, use the same code path as ShuffledRDD for locality
val dep = dependencies(0).asInstanceOf[ShuffleDependency[_, _, _]]
tracker.getPreferredLocationsForShuffle(dep, partition.index)
} else {
Nil
}
}
override def toString: String = "DAGSchedulerSuiteRDD " + id
}
class DAGSchedulerSuiteDummyException extends Exception
class DAGSchedulerSuite extends SparkFunSuite with LocalSparkContext with Timeouts {
import DAGSchedulerSuite._
val conf = new SparkConf
/** Set of TaskSets the DAGScheduler has requested executed. */
val taskSets = scala.collection.mutable.Buffer[TaskSet]()
/** Stages for which the DAGScheduler has called TaskScheduler.cancelTasks(). */
val cancelledStages = new HashSet[Int]()
val taskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start() = {}
override def stop() = {}
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId): Boolean = true
override def submitTasks(taskSet: TaskSet) = {
// normally done by TaskSetManager
taskSet.tasks.foreach(_.epoch = mapOutputTracker.getEpoch)
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean) {
cancelledStages += stageId
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = false
override def setDAGScheduler(dagScheduler: DAGScheduler) = {}
override def defaultParallelism() = 2
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
/** Length of time to wait while draining listener events. */
val WAIT_TIMEOUT_MILLIS = 10000
val sparkListener = new SparkListener() {
val submittedStageInfos = new HashSet[StageInfo]
val successfulStages = new HashSet[Int]
val failedStages = new ArrayBuffer[Int]
val stageByOrderOfExecution = new ArrayBuffer[Int]
val endedTasks = new HashSet[Long]
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted) {
submittedStageInfos += stageSubmitted.stageInfo
}
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted) {
val stageInfo = stageCompleted.stageInfo
stageByOrderOfExecution += stageInfo.stageId
if (stageInfo.failureReason.isEmpty) {
successfulStages += stageInfo.stageId
} else {
failedStages += stageInfo.stageId
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
endedTasks += taskEnd.taskInfo.taskId
}
}
var mapOutputTracker: MapOutputTrackerMaster = null
var broadcastManager: BroadcastManager = null
var securityMgr: SecurityManager = null
var scheduler: DAGScheduler = null
var dagEventProcessLoopTester: DAGSchedulerEventProcessLoop = null
/**
* Set of cache locations to return from our mock BlockManagerMaster.
* Keys are (rdd ID, partition ID). Anything not present will return an empty
* list of cache locations silently.
*/
val cacheLocations = new HashMap[(Int, Int), Seq[BlockManagerId]]
// stub out BlockManagerMaster.getLocations to use our cacheLocations
val blockManagerMaster = new BlockManagerMaster(null, conf, true) {
override def getLocations(blockIds: Array[BlockId]): IndexedSeq[Seq[BlockManagerId]] = {
blockIds.map {
_.asRDDId.map(id => (id.rddId -> id.splitIndex)).flatMap(key => cacheLocations.get(key)).
getOrElse(Seq())
}.toIndexedSeq
}
override def removeExecutor(execId: String) {
// don't need to propagate to the driver, which we don't have
}
}
/** The list of results that DAGScheduler has collected. */
val results = new HashMap[Int, Any]()
var failure: Exception = _
val jobListener = new JobListener() {
override def taskSucceeded(index: Int, result: Any) = results.put(index, result)
override def jobFailed(exception: Exception) = { failure = exception }
}
/** A simple helper class for creating custom JobListeners */
class SimpleListener extends JobListener {
val results = new HashMap[Int, Any]
var failure: Exception = null
override def taskSucceeded(index: Int, result: Any): Unit = results.put(index, result)
override def jobFailed(exception: Exception): Unit = { failure = exception }
}
override def beforeEach(): Unit = {
super.beforeEach()
init(new SparkConf())
}
private def init(testConf: SparkConf): Unit = {
sc = new SparkContext("local", "DAGSchedulerSuite", testConf)
sparkListener.submittedStageInfos.clear()
sparkListener.successfulStages.clear()
sparkListener.failedStages.clear()
sparkListener.endedTasks.clear()
failure = null
sc.addSparkListener(sparkListener)
taskSets.clear()
cancelledStages.clear()
cacheLocations.clear()
results.clear()
securityMgr = new SecurityManager(conf)
broadcastManager = new BroadcastManager(true, conf, securityMgr)
mapOutputTracker = new MapOutputTrackerMaster(conf, broadcastManager, true) {
override def sendTracker(message: Any): Unit = {
// no-op, just so we can stop this to avoid leaking threads
}
}
scheduler = new DAGScheduler(
sc,
taskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(scheduler)
}
override def afterEach(): Unit = {
try {
scheduler.stop()
dagEventProcessLoopTester.stop()
mapOutputTracker.stop()
broadcastManager.stop()
} finally {
super.afterEach()
}
}
override def afterAll() {
super.afterAll()
}
/**
* Type of RDD we use for testing. Note that we should never call the real RDD compute methods.
* This is a pair RDD type so it can always be used in ShuffleDependencies.
*/
type PairOfIntsRDD = RDD[(Int, Int)]
/**
* Process the supplied event as if it were the top of the DAGScheduler event queue, expecting
* the scheduler not to exit.
*
* After processing the event, submit waiting stages as is done on most iterations of the
* DAGScheduler event loop.
*/
private def runEvent(event: DAGSchedulerEvent) {
dagEventProcessLoopTester.post(event)
}
/**
* When we submit dummy Jobs, this is the compute function we supply. Except in a local test
* below, we do not expect this function to ever be executed; instead, we will return results
* directly through CompletionEvents.
*/
private val jobComputeFunc = (context: TaskContext, it: Iterator[(_)]) =>
it.next.asInstanceOf[Tuple2[_, _]]._1
/** Send the given CompletionEvent messages for the tasks in the TaskSet. */
private def complete(taskSet: TaskSet, results: Seq[(TaskEndReason, Any)]) {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(taskSet.tasks(i), result._1, result._2))
}
}
}
private def completeWithAccumulator(
accumId: Long,
taskSet: TaskSet,
results: Seq[(TaskEndReason, Any)]) {
assert(taskSet.tasks.size >= results.size)
for ((result, i) <- results.zipWithIndex) {
if (i < taskSet.tasks.size) {
runEvent(makeCompletionEvent(
taskSet.tasks(i),
result._1,
result._2,
Seq(AccumulatorSuite.createLongAccum("", initValue = 1, id = accumId))))
}
}
}
/** Submits a job to the scheduler and returns the job id. */
private def submit(
rdd: RDD[_],
partitions: Array[Int],
func: (TaskContext, Iterator[_]) => _ = jobComputeFunc,
listener: JobListener = jobListener,
properties: Properties = null): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(JobSubmitted(jobId, rdd, func, partitions, CallSite("", ""), listener, properties))
jobId
}
/** Submits a map stage to the scheduler and returns the job id. */
private def submitMapStage(
shuffleDep: ShuffleDependency[_, _, _],
listener: JobListener = jobListener): Int = {
val jobId = scheduler.nextJobId.getAndIncrement()
runEvent(MapStageSubmitted(jobId, shuffleDep, CallSite("", ""), listener))
jobId
}
/** Sends TaskSetFailed to the scheduler. */
private def failed(taskSet: TaskSet, message: String) {
runEvent(TaskSetFailed(taskSet, message, None))
}
/** Sends JobCancelled to the DAG scheduler. */
private def cancel(jobId: Int) {
runEvent(JobCancelled(jobId, None))
}
test("[SPARK-3353] parent stage should have lower stage id") {
sparkListener.stageByOrderOfExecution.clear()
sc.parallelize(1 to 10).map(x => (x, x)).reduceByKey(_ + _, 4).count()
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.stageByOrderOfExecution.length === 2)
assert(sparkListener.stageByOrderOfExecution(0) < sparkListener.stageByOrderOfExecution(1))
}
/**
* This test ensures that DAGScheduler build stage graph correctly.
*
* Suppose you have the following DAG:
*
* [A] <--(s_A)-- [B] <--(s_B)-- [C] <--(s_C)-- [D]
* \\ /
* <-------------
*
* Here, RDD B has a shuffle dependency on RDD A, and RDD C has shuffle dependency on both
* B and A. The shuffle dependency IDs are numbers in the DAGScheduler, but to make the example
* easier to understand, let's call the shuffled data from A shuffle dependency ID s_A and the
* shuffled data from B shuffle dependency ID s_B.
*
* Note: [] means an RDD, () means a shuffle dependency.
*/
test("[SPARK-13902] Ensure no duplicate stages are created") {
val rddA = new MyRDD(sc, 1, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val s_A = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 1, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val s_B = shuffleDepB.shuffleId
val rddC = new MyRDD(sc, 1, List(shuffleDepA, shuffleDepB), tracker = mapOutputTracker)
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val s_C = shuffleDepC.shuffleId
val rddD = new MyRDD(sc, 1, List(shuffleDepC), tracker = mapOutputTracker)
submit(rddD, Array(0))
assert(scheduler.shuffleIdToMapStage.size === 3)
assert(scheduler.activeJobs.size === 1)
val mapStageA = scheduler.shuffleIdToMapStage(s_A)
val mapStageB = scheduler.shuffleIdToMapStage(s_B)
val mapStageC = scheduler.shuffleIdToMapStage(s_C)
val finalStage = scheduler.activeJobs.head.finalStage
assert(mapStageA.parents.isEmpty)
assert(mapStageB.parents === List(mapStageA))
assert(mapStageC.parents === List(mapStageA, mapStageB))
assert(finalStage.parents === List(mapStageC))
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(3), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("All shuffle files on the slave should be cleaned up when slave lost") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set("spark.shuffle.service.enabled", "true")
conf.set("spark.files.fetchFailure.unRegisterOutputOnHost", "true")
init(conf)
runEvent(ExecutorAdded("exec-hostA1", "hostA"))
runEvent(ExecutorAdded("exec-hostA2", "hostA"))
runEvent(ExecutorAdded("exec-hostB", "hostB"))
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(3))
val secondShuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// map stage1 completes successfully, with one task on each executor
complete(taskSets(0), Seq(
(Success,
MapStatus(BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2))),
(Success,
MapStatus(BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2))),
(Success, makeMapStatus("hostB", 1))
))
// map stage2 completes successfully, with one task on each executor
complete(taskSets(1), Seq(
(Success,
MapStatus(BlockManagerId("exec-hostA1", "hostA", 12345), Array.fill[Long](1)(2))),
(Success,
MapStatus(BlockManagerId("exec-hostA2", "hostA", 12345), Array.fill[Long](1)(2))),
(Success, makeMapStatus("hostB", 1))
))
// make sure our test setup is correct
val initialMapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus1.count(_ != null) === 3)
assert(initialMapStatus1.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
val initialMapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
// val initialMapStatus1 = mapOutputTracker.mapStatuses.get(0).get
assert(initialMapStatus2.count(_ != null) === 3)
assert(initialMapStatus2.map{_.location.executorId}.toSet ===
Set("exec-hostA1", "exec-hostA2", "exec-hostB"))
// reduce stage fails with a fetch failure from one host
complete(taskSets(2), Seq(
(FetchFailed(BlockManagerId("exec-hostA2", "hostA", 12345), firstShuffleId, 0, 0, "ignored"),
null)
))
// Here is the main assertion -- make sure that we de-register
// the map outputs for both map stage from both executors on hostA
val mapStatus1 = mapOutputTracker.shuffleStatuses(firstShuffleId).mapStatuses
assert(mapStatus1.count(_ != null) === 1)
assert(mapStatus1(2).location.executorId === "exec-hostB")
assert(mapStatus1(2).location.host === "hostB")
val mapStatus2 = mapOutputTracker.shuffleStatuses(secondShuffleId).mapStatuses
assert(mapStatus2.count(_ != null) === 1)
assert(mapStatus2(2).location.executorId === "exec-hostB")
assert(mapStatus2(2).location.host === "hostB")
}
test("zero split job") {
var numResults = 0
var failureReason: Option[Exception] = None
val fakeListener = new JobListener() {
override def taskSucceeded(partition: Int, value: Any): Unit = numResults += 1
override def jobFailed(exception: Exception): Unit = {
failureReason = Some(exception)
}
}
val jobId = submit(new MyRDD(sc, 0, Nil), Array(), listener = fakeListener)
assert(numResults === 0)
cancel(jobId)
assert(failureReason.isDefined)
assert(failureReason.get.getMessage() === "Job 0 cancelled ")
}
test("run trivial job") {
submit(new MyRDD(sc, 1, Nil), Array(0))
complete(taskSets(0), List((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial job w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil)
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0))
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("equals and hashCode AccumulableInfo") {
val accInfo1 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = true, countFailedValues = false)
val accInfo2 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
val accInfo3 = new AccumulableInfo(
1, Some("a1"), Some("delta1"), Some("val1"), internal = false, countFailedValues = false)
assert(accInfo1 !== accInfo2)
assert(accInfo2 === accInfo3)
assert(accInfo2.hashCode() === accInfo3.hashCode())
}
test("cache location preferences w/ dependency") {
val baseRdd = new MyRDD(sc, 1, Nil).cache()
val finalRdd = new MyRDD(sc, 1, List(new OneToOneDependency(baseRdd)))
cacheLocations(baseRdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(finalRdd, Array(0))
val taskSet = taskSets(0)
assertLocations(taskSet, Seq(Seq("hostA", "hostB")))
complete(taskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("regression test for getCacheLocs") {
val rdd = new MyRDD(sc, 3, Nil).cache()
cacheLocations(rdd.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
cacheLocations(rdd.id -> 1) =
Seq(makeBlockManagerId("hostB"), makeBlockManagerId("hostC"))
cacheLocations(rdd.id -> 2) =
Seq(makeBlockManagerId("hostC"), makeBlockManagerId("hostD"))
val locs = scheduler.getCacheLocs(rdd).map(_.map(_.host))
assert(locs === Seq(Seq("hostA", "hostB"), Seq("hostB", "hostC"), Seq("hostC", "hostD")))
}
/**
* This test ensures that if a particular RDD is cached, RDDs earlier in the dependency chain
* are not computed. It constructs the following chain of dependencies:
* +---+ shuffle +---+ +---+ +---+
* | A |<--------| B |<---| C |<---| D |
* +---+ +---+ +---+ +---+
* Here, B is derived from A by performing a shuffle, C has a one-to-one dependency on B,
* and D similarly has a one-to-one dependency on C. If none of the RDDs were cached, this
* set of RDDs would result in a two stage job: one ShuffleMapStage, and a ResultStage that
* reads the shuffled data from RDD A. This test ensures that if C is cached, the scheduler
* doesn't perform a shuffle, and instead computes the result using a single ResultStage
* that reads C's cached data.
*/
test("getMissingParentStages should consider all ancestor RDDs' cache statuses") {
val rddA = new MyRDD(sc, 1, Nil)
val rddB = new MyRDD(sc, 1, List(new ShuffleDependency(rddA, new HashPartitioner(1))),
tracker = mapOutputTracker)
val rddC = new MyRDD(sc, 1, List(new OneToOneDependency(rddB))).cache()
val rddD = new MyRDD(sc, 1, List(new OneToOneDependency(rddC)))
cacheLocations(rddC.id -> 0) =
Seq(makeBlockManagerId("hostA"), makeBlockManagerId("hostB"))
submit(rddD, Array(0))
assert(scheduler.runningStages.size === 1)
// Make sure that the scheduler is running the final result stage.
// Because C is cached, the shuffle map stage to compute A does not need to be run.
assert(scheduler.runningStages.head.isInstanceOf[ResultStage])
}
test("avoid exponential blowup when getting preferred locs list") {
// Build up a complex dependency graph with repeated zip operations, without preferred locations
var rdd: RDD[_] = new MyRDD(sc, 1, Nil)
(1 to 30).foreach(_ => rdd = rdd.zip(rdd))
// getPreferredLocs runs quickly, indicating that exponential graph traversal is avoided.
failAfter(10 seconds) {
val preferredLocs = scheduler.getPreferredLocs(rdd, 0)
// No preferred locations are returned.
assert(preferredLocs.length === 0)
}
}
test("unserializable task") {
val unserializableRdd = new MyRDD(sc, 1, Nil) {
class UnserializableClass
val unserializable = new UnserializableClass
}
submit(unserializableRdd, Array(0))
assert(failure.getMessage.startsWith(
"Job aborted due to stage failure: Task not serializable:"))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty()
}
test("trivial job failure") {
submit(new MyRDD(sc, 1, Nil), Array(0))
failed(taskSets(0), "some failure")
assert(failure.getMessage === "Job aborted due to stage failure: some failure")
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty()
}
test("trivial job cancellation") {
val rdd = new MyRDD(sc, 1, Nil)
val jobId = submit(rdd, Array(0))
cancel(jobId)
assert(failure.getMessage === s"Job $jobId cancelled ")
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(0))
assert(sparkListener.failedStages.size === 1)
assertDataStructuresEmpty()
}
test("job cancellation no-kill backend") {
// make sure that the DAGScheduler doesn't crash when the TaskScheduler
// doesn't implement killTask()
val noKillTaskScheduler = new TaskScheduler() {
override def schedulingMode: SchedulingMode = SchedulingMode.FIFO
override def rootPool: Pool = new Pool("", schedulingMode, 0, 0)
override def start(): Unit = {}
override def stop(): Unit = {}
override def submitTasks(taskSet: TaskSet): Unit = {
taskSets += taskSet
}
override def cancelTasks(stageId: Int, interruptThread: Boolean) {
throw new UnsupportedOperationException
}
override def killTaskAttempt(
taskId: Long, interruptThread: Boolean, reason: String): Boolean = {
throw new UnsupportedOperationException
}
override def setDAGScheduler(dagScheduler: DAGScheduler): Unit = {}
override def defaultParallelism(): Int = 2
override def executorHeartbeatReceived(
execId: String,
accumUpdates: Array[(Long, Seq[AccumulatorV2[_, _]])],
blockManagerId: BlockManagerId): Boolean = true
override def executorLost(executorId: String, reason: ExecutorLossReason): Unit = {}
override def workerRemoved(workerId: String, host: String, message: String): Unit = {}
override def applicationAttemptId(): Option[String] = None
}
val noKillScheduler = new DAGScheduler(
sc,
noKillTaskScheduler,
sc.listenerBus,
mapOutputTracker,
blockManagerMaster,
sc.env)
dagEventProcessLoopTester = new DAGSchedulerEventProcessLoopTester(noKillScheduler)
val jobId = submit(new MyRDD(sc, 1, Nil), Array(0))
cancel(jobId)
// Because the job wasn't actually cancelled, we shouldn't have received a failure message.
assert(failure === null)
// When the task set completes normally, state should be correctly updated.
complete(taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.isEmpty)
assert(sparkListener.successfulStages.contains(0))
}
test("run trivial shuffle") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
complete(taskSets(1), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("run trivial shuffle with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// the 2nd ResultTask failed
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"), null)))
// this will get called
// blockManagerMaster.removeExecutor("exec-hostA")
// ask the scheduler to try it again
scheduler.resubmitFailedStages()
// have the 2nd attempt pass
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
// we can see both result blocks now
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
private val shuffleFileLossTests = Seq(
("slave lost with shuffle service", SlaveLost("", false), true, false),
("worker lost with shuffle service", SlaveLost("", true), true, true),
("worker lost without shuffle service", SlaveLost("", true), false, true),
("executor failure with shuffle service", ExecutorKilled, true, false),
("executor failure without shuffle service", ExecutorKilled, false, true))
for ((eventDescription, event, shuffleServiceOn, expectFileLoss) <- shuffleFileLossTests) {
val maybeLost = if (expectFileLoss) {
"lost"
} else {
"not lost"
}
test(s"shuffle files $maybeLost when $eventDescription") {
// reset the test context with the right shuffle service config
afterEach()
val conf = new SparkConf()
conf.set("spark.shuffle.service.enabled", shuffleServiceOn.toString)
init(conf)
assert(sc.env.blockManager.externalShuffleServiceEnabled == shuffleServiceOn)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
runEvent(ExecutorLost("exec-hostA", event))
if (expectFileLoss) {
intercept[MetadataFetchFailedException] {
mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0)
}
} else {
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
}
}
}
// Helper function to validate state when creating tests for task failures
private def checkStageId(stageId: Int, attempt: Int, stageAttempt: TaskSet) {
assert(stageAttempt.stageId === stageId)
assert(stageAttempt.stageAttemptId == attempt)
}
// Helper functions to extract commonly used code in Fetch Failure test cases
private def setupStageAbortTest(sc: SparkContext) {
sc.listenerBus.addListener(new EndListener())
ended = false
jobResult = null
}
// Create a new Listener to confirm that the listenerBus sees the JobEnd message
// when we abort the stage. This message will also be consumed by the EventLoggingListener
// so this will propagate up to the user.
var ended = false
var jobResult : JobResult = null
class EndListener extends SparkListener {
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = {
jobResult = jobEnd.jobResult
ended = true
}
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* successfully.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param numShufflePartitions - The number of partitions in the next stage
*/
private def completeShuffleMapStageSuccessfully(
stageId: Int,
attemptIdx: Int,
numShufflePartitions: Int): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map {
case (task, idx) =>
(Success, makeMapStatus("host" + ('A' + idx).toChar, numShufflePartitions))
}.toSeq)
}
/**
* Common code to get the next stage attempt, confirm it's the one we expect, and complete it
* with all FetchFailure.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
* @param shuffleDep - The shuffle dependency of the stage with a fetch failure
*/
private def completeNextStageWithFetchFailure(
stageId: Int,
attemptIdx: Int,
shuffleDep: ShuffleDependency[_, _, _]): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
complete(stageAttempt, stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep.shuffleId, 0, idx, "ignored"), null)
}.toSeq)
}
/**
* Common code to get the next result stage attempt, confirm it's the one we expect, and
* complete it with a success where we return 42.
*
* @param stageId - The current stageId
* @param attemptIdx - The current attempt count
*/
private def completeNextResultStageWithSuccess(
stageId: Int,
attemptIdx: Int,
partitionToResult: Int => Int = _ => 42): Unit = {
val stageAttempt = taskSets.last
checkStageId(stageId, attemptIdx, stageAttempt)
assert(scheduler.stageIdToStage(stageId).isInstanceOf[ResultStage])
val taskResults = stageAttempt.tasks.zipWithIndex.map { case (task, idx) =>
(Success, partitionToResult(idx))
}
complete(stageAttempt, taskResults.toSeq)
}
/**
* In this test, we simulate a job where many tasks in the same stage fail. We want to show
* that many fetch failures inside a single stage attempt do not trigger an abort
* on their own, but only when there are enough failing stage attempts.
*/
test("Single stage fetch failure should not abort the stage.") {
setupStageAbortTest(sc)
val parts = 8
val shuffleMapRdd = new MyRDD(sc, parts, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(parts))
val reduceRdd = new MyRDD(sc, parts, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, (0 until parts).toArray)
completeShuffleMapStageSuccessfully(0, 0, numShufflePartitions = parts)
completeNextStageWithFetchFailure(1, 0, shuffleDep)
// Resubmit and confirm that now all is well
scheduler.resubmitFailedStages()
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Complete stage 0 and then stage 1 with a "42"
completeShuffleMapStageSuccessfully(0, 1, numShufflePartitions = parts)
completeNextResultStageWithSuccess(1, 1)
// Confirm job finished successfully
sc.listenerBus.waitUntilEmpty(1000)
assert(ended === true)
assert(results === (0 until parts).map { idx => idx -> 42 }.toMap)
assertDataStructuresEmpty()
}
/**
* In this test we simulate a job failure where the first stage completes successfully and
* the second stage fails due to a fetch failure. Multiple successive fetch failures of a stage
* trigger an overall job abort to avoid endless retries.
*/
test("Multiple consecutive stage fetch failures should lead to job being aborted.") {
setupStageAbortTest(sc)
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDep)
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
if (attempt < scheduler.maxConsecutiveStageAttempts - 1) {
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
} else {
// Stage should have been aborted and removed from running stages
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty(1000)
assert(ended)
jobResult match {
case JobFailed(reason) =>
assert(reason.getMessage.contains("ResultStage 1 () has failed the maximum"))
case other => fail(s"expected JobFailed, not $other")
}
}
}
}
/**
* In this test, we create a job with two consecutive shuffles, and simulate 2 failures for each
* shuffle fetch. In total In total, the job has had four failures overall but not four failures
* for a particular stage, and as such should not be aborted.
*/
test("Failures in different stages should not trigger an overall abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// In the first two iterations, Stage 0 succeeds and stage 1 fails. In the next two iterations,
// stage 2 fails.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts) {
// Complete all the tasks for the current attempt of stage 0 successfully
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
if (attempt < scheduler.maxConsecutiveStageAttempts / 2) {
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail all these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
} else {
completeShuffleMapStageSuccessfully(1, attempt, numShufflePartitions = 1)
// Fail stage 2
completeNextStageWithFetchFailure(2,
attempt - scheduler.maxConsecutiveStageAttempts / 2, shuffleDepTwo)
}
// this will trigger a resubmission of stage 0, since we've lost some of its
// map output, for the next iteration through the loop
scheduler.resubmitFailedStages()
}
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 4, numShufflePartitions = 1)
// Succeed stage2 with a "42"
completeNextResultStageWithSuccess(2, scheduler.maxConsecutiveStageAttempts / 2)
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
/**
* In this test we demonstrate that only consecutive failures trigger a stage abort. A stage may
* fail multiple times, succeed, then fail a few more times (because its run again by downstream
* dependencies). The total number of failed attempts for one stage will go over the limit,
* but that doesn't matter, since they have successes in the middle.
*/
test("Non-consecutive stage failures don't trigger abort") {
setupStageAbortTest(sc)
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// First, execute stages 0 and 1, failing stage 1 up to MAX-1 times.
for (attempt <- 0 until scheduler.maxConsecutiveStageAttempts - 1) {
// Make each task in stage 0 success
completeShuffleMapStageSuccessfully(0, attempt, numShufflePartitions = 2)
// Now we should have a new taskSet, for a new attempt of stage 1.
// Fail these tasks with FetchFailure
completeNextStageWithFetchFailure(1, attempt, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
}
// Rerun stage 0 and 1 to step through the task set
completeShuffleMapStageSuccessfully(0, 3, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 3, numShufflePartitions = 1)
// Fail stage 2 so that stage 1 is resubmitted when we call scheduler.resubmitFailedStages()
completeNextStageWithFetchFailure(2, 0, shuffleDepTwo)
scheduler.resubmitFailedStages()
// Rerun stage 0 to step through the task set
completeShuffleMapStageSuccessfully(0, 4, numShufflePartitions = 2)
// Now again, fail stage 1 (up to MAX_FAILURES) but confirm that this doesn't trigger an abort
// since we succeeded in between.
completeNextStageWithFetchFailure(1, 4, shuffleDepOne)
scheduler.resubmitFailedStages()
// Confirm we have not yet aborted
assert(scheduler.runningStages.nonEmpty)
assert(!ended)
// Next, succeed all and confirm output
// Rerun stage 0 + 1
completeShuffleMapStageSuccessfully(0, 5, numShufflePartitions = 2)
completeShuffleMapStageSuccessfully(1, 5, numShufflePartitions = 1)
// Succeed stage 2 and verify results
completeNextResultStageWithSuccess(2, 1)
assertDataStructuresEmpty()
sc.listenerBus.waitUntilEmpty(1000)
assert(ended === true)
assert(results === Map(0 -> 42))
}
test("trivial shuffle with multiple fetch failures") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(1))
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 1, 1, "ignored"),
null))
// The SparkListener should not receive redundant failure events.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.size == 1)
}
/**
* This tests the case where another FetchFailed comes in while the map stage is getting
* re-run.
*/
test("late fetch failures don't cause multiple concurrent attempts for the same map stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
val mapStageId = 0
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == mapStageId)
}
// The map stage should have been submitted.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 1)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The MapOutputTracker should know about both map output locations.
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 1).map(_._1.host).toSet ===
HashSet("hostA", "hostB"))
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.contains(1))
// Trigger resubmission of the failed map stage.
runEvent(ResubmitFailedStages)
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
// Another attempt for the map stage should have been submitted, resulting in 2 total attempts.
assert(countSubmittedMapStageAttempts() === 2)
// The second ResultTask fails, with a fetch failure for the output from the second mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1, 1, "ignored"),
null))
// Another ResubmitFailedStages event should not result in another attempt for the map
// stage being run concurrently.
// NOTE: the actual ResubmitFailedStages may get called at any time during this, but it
// shouldn't effect anything -- our calling it just makes *SURE* it gets called between the
// desired event and our check.
runEvent(ResubmitFailedStages)
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 2)
}
/**
* This tests the case where a late FetchFailed comes in after the map stage has finished getting
* retried and a new reduce stage starts running.
*/
test("extremely late fetch failures don't cause multiple concurrent attempts for " +
"the same stage") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
def countSubmittedReduceStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 1)
}
def countSubmittedMapStageAttempts(): Int = {
sparkListener.submittedStageInfos.count(_.stageId == 0)
}
// The map stage should have been submitted.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 1)
// Complete the map stage.
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// The reduce stage should have been submitted.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedReduceStageAttempts() === 1)
// The first result task fails, with a fetch failure for the output from the first mapper.
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"),
null))
// Trigger resubmission of the failed map stage and finish the re-started map task.
runEvent(ResubmitFailedStages)
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
// Because the map stage finished, another attempt for the reduce stage should have been
// submitted, resulting in 2 total attempts for each the map and the reduce stage.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(countSubmittedMapStageAttempts() === 2)
assert(countSubmittedReduceStageAttempts() === 2)
// A late FetchFailed arrives from the second task in the original reduce stage.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1),
FetchFailed(makeBlockManagerId("hostB"), shuffleId, 1, 1, "ignored"),
null))
// Running ResubmitFailedStages shouldn't result in any more attempts for the map stage, because
// the FetchFailed should have been ignored
runEvent(ResubmitFailedStages)
// The FetchFailed from the original reduce stage should be ignored.
assert(countSubmittedMapStageAttempts() === 2)
}
test("task events always posted in speculation / when stage is killed") {
val baseRdd = new MyRDD(sc, 4, Nil)
val finalRdd = new MyRDD(sc, 4, List(new OneToOneDependency(baseRdd)))
submit(finalRdd, Array(0, 1, 2, 3))
// complete two tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(0), Success, 42,
Seq.empty, createFakeTaskInfoWithId(0)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1), Success, 42,
Seq.empty, createFakeTaskInfoWithId(1)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
// verify stage exists
assert(scheduler.stageIdToStage.contains(0))
assert(sparkListener.endedTasks.size == 2)
// finish other 2 tasks
runEvent(makeCompletionEvent(
taskSets(0).tasks(2), Success, 42,
Seq.empty, createFakeTaskInfoWithId(2)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, createFakeTaskInfoWithId(3)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.endedTasks.size == 4)
// verify the stage is done
assert(!scheduler.stageIdToStage.contains(0))
// Stage should be complete. Finish one other Successful task to simulate what can happen
// with a speculative task and make sure the event is sent out
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), Success, 42,
Seq.empty, createFakeTaskInfoWithId(5)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.endedTasks.size == 5)
// make sure non successful tasks also send out event
runEvent(makeCompletionEvent(
taskSets(0).tasks(3), UnknownReason, 42,
Seq.empty, createFakeTaskInfoWithId(6)))
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.endedTasks.size == 6)
}
test("ignore late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// pretend we were told hostA went away
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// now start completing some tasks in the shuffle map stage, under different hosts
// and epochs, and make sure scheduler updates its state correctly
val taskSet = taskSets(0)
val shuffleStage = scheduler.stageIdToStage(taskSet.stageId).asInstanceOf[ShuffleMapStage]
assert(shuffleStage.numAvailableOutputs === 0)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 0)
// should work because it's a non-failed host (so the available map outputs will increase)
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostB", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should be ignored for being too old
runEvent(makeCompletionEvent(
taskSet.tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 1)
// should work because it's a new epoch, which will increase the number of available map
// outputs, and also finish the stage
taskSet.tasks(1).epoch = newEpoch
runEvent(makeCompletionEvent(
taskSet.tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.size)))
assert(shuffleStage.numAvailableOutputs === 2)
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostA")))
// finish the next stage normally, which completes the job
complete(taskSets(1), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
assertDataStructuresEmpty()
}
test("run shuffle with map stage failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0, 1))
// Fail the map stage. This should cause the entire job to fail.
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(failure.getMessage === s"Job aborted due to stage failure: $stageFailureMessage")
// Listener bus should get told about the map stage failing, but not the reduce stage
// (since the reduce stage hasn't been started yet).
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.failedStages.toSet === Set(0))
assertDataStructuresEmpty()
}
/**
* Run two jobs, with a shared dependency. We simulate a fetch failure in the second job, which
* requires regenerating some outputs of the shared dependency. One key aspect of this test is
* that the second job actually uses a different stage for the shared dependency (a "skipped"
* stage).
*/
test("shuffle fetch failure in a reused shuffle dependency") {
// Run the first job successfully, which creates one shuffle dependency
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep))
submit(reduceRdd, Array(0, 1))
completeShuffleMapStageSuccessfully(0, 0, 2)
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42, 1 -> 42))
assertDataStructuresEmpty()
// submit another job w/ the shared dependency, and have a fetch failure
val reduce2 = new MyRDD(sc, 2, List(shuffleDep))
submit(reduce2, Array(0, 1))
// Note that the stage numbering here is only b/c the shared dependency produces a new, skipped
// stage. If instead it reused the existing stage, then this would be stage 2
completeNextStageWithFetchFailure(3, 0, shuffleDep)
scheduler.resubmitFailedStages()
// the scheduler now creates a new task set to regenerate the missing map output, but this time
// using a different stage, the "skipped" one
// SPARK-9809 -- this stage is submitted without a task for each partition (because some of
// the shuffle map output is still available from stage 0); make sure we've still got internal
// accumulators setup
assert(scheduler.stageIdToStage(2).latestInfo.taskMetrics != null)
completeShuffleMapStageSuccessfully(2, 0, 2)
completeNextResultStageWithSuccess(3, 1, idx => idx + 1234)
assert(results === Map(0 -> 1234, 1 -> 1235))
assertDataStructuresEmpty()
}
/**
* This test runs a three stage job, with a fetch failure in stage 1. but during the retry, we
* have completions from both the first & second attempt of stage 1. So all the map output is
* available before we finish any task set for stage 1. We want to make sure that we don't
* submit stage 2 until the map output for stage 1 is registered
*/
test("don't submit stage until its dependencies map outputs are registered (SPARK-5259)") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(3))
val firstShuffleId = firstShuffleDep.shuffleId
val shuffleMapRdd = new MyRDD(sc, 3, List(firstShuffleDep))
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep))
submit(reduceRdd, Array(0))
// things start out smoothly, stage 0 completes with no issues
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostB", shuffleMapRdd.partitions.length)),
(Success, makeMapStatus("hostA", shuffleMapRdd.partitions.length))
))
// then one executor dies, and a task fails in stage 1
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(null, firstShuffleId, 2, 0, "Fetch failed"),
null))
// so we resubmit stage 0, which completes happily
scheduler.resubmitFailedStages()
val stage0Resubmit = taskSets(2)
assert(stage0Resubmit.stageId == 0)
assert(stage0Resubmit.stageAttemptId === 1)
val task = stage0Resubmit.tasks(0)
assert(task.partitionId === 2)
runEvent(makeCompletionEvent(
task,
Success,
makeMapStatus("hostC", shuffleMapRdd.partitions.length)))
// now here is where things get tricky : we will now have a task set representing
// the second attempt for stage 1, but we *also* have some tasks for the first attempt for
// stage 1 still going
val stage1Resubmit = taskSets(3)
assert(stage1Resubmit.stageId == 1)
assert(stage1Resubmit.stageAttemptId === 1)
assert(stage1Resubmit.tasks.length === 3)
// we'll have some tasks finish from the first attempt, and some finish from the second attempt,
// so that we actually have all stage outputs, though no attempt has completed all its
// tasks
runEvent(makeCompletionEvent(
taskSets(3).tasks(0),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(3).tasks(1),
Success,
makeMapStatus("hostC", reduceRdd.partitions.length)))
// late task finish from the first attempt
runEvent(makeCompletionEvent(
taskSets(1).tasks(2),
Success,
makeMapStatus("hostB", reduceRdd.partitions.length)))
// What should happen now is that we submit stage 2. However, we might not see an error
// b/c of DAGScheduler's error handling (it tends to swallow errors and just log them). But
// we can check some conditions.
// Note that the really important thing here is not so much that we submit stage 2 *immediately*
// but that we don't end up with some error from these interleaved completions. It would also
// be OK (though sub-optimal) if stage 2 simply waited until the resubmission of stage 1 had
// all its tasks complete
// check that we have all the map output for stage 0 (it should have been there even before
// the last round of completions from stage 1, but just to double check it hasn't been messed
// up) and also the newly available stage 1
val stageToReduceIdxs = Seq(
0 -> (0 until 3),
1 -> (0 until 1)
)
for {
(stage, reduceIdxs) <- stageToReduceIdxs
reduceIdx <- reduceIdxs
} {
// this would throw an exception if the map status hadn't been registered
val statuses = mapOutputTracker.getMapSizesByExecutorId(stage, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 2 has been submitted
assert(taskSets.size == 5)
val stage2TaskSet = taskSets(4)
assert(stage2TaskSet.stageId == 2)
assert(stage2TaskSet.stageAttemptId == 0)
}
/**
* We lose an executor after completing some shuffle map tasks on it. Those tasks get
* resubmitted, and when they finish the job completes normally
*/
test("register map outputs correctly after ExecutorLost and task Resubmitted") {
val firstRDD = new MyRDD(sc, 3, Nil)
val firstShuffleDep = new ShuffleDependency(firstRDD, new HashPartitioner(2))
val reduceRdd = new MyRDD(sc, 5, List(firstShuffleDep))
submit(reduceRdd, Array(0))
// complete some of the tasks from the first stage, on one host
runEvent(makeCompletionEvent(
taskSets(0).tasks(0),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
runEvent(makeCompletionEvent(
taskSets(0).tasks(1),
Success,
makeMapStatus("hostA", reduceRdd.partitions.length)))
// now that host goes down
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
// so we resubmit those tasks
runEvent(makeCompletionEvent(taskSets(0).tasks(0), Resubmitted, null))
runEvent(makeCompletionEvent(taskSets(0).tasks(1), Resubmitted, null))
// now complete everything on a different host
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))
))
// now we should submit stage 1, and the map output from stage 0 should be registered
// check that we have all the map output for stage 0
(0 until reduceRdd.partitions.length).foreach { reduceIdx =>
val statuses = mapOutputTracker.getMapSizesByExecutorId(0, reduceIdx)
// really we should have already thrown an exception rather than fail either of these
// asserts, but just to be extra defensive let's double check the statuses are OK
assert(statuses != null)
assert(statuses.nonEmpty)
}
// and check that stage 1 has been submitted
assert(taskSets.size == 2)
val stage1TaskSet = taskSets(1)
assert(stage1TaskSet.stageId == 1)
assert(stage1TaskSet.stageAttemptId == 0)
}
/**
* Makes sure that failures of stage used by multiple jobs are correctly handled.
*
* This test creates the following dependency graph:
*
* shuffleMapRdd1 shuffleMapRDD2
* | \\ |
* | \\ |
* | \\ |
* | \\ |
* reduceRdd1 reduceRdd2
*
* We start both shuffleMapRdds and then fail shuffleMapRdd1. As a result, the job listeners for
* reduceRdd1 and reduceRdd2 should both be informed that the job failed. shuffleMapRDD2 should
* also be cancelled, because it is only used by reduceRdd2 and reduceRdd2 cannot complete
* without shuffleMapRdd1.
*/
test("failure of stage used by two jobs") {
val shuffleMapRdd1 = new MyRDD(sc, 2, Nil)
val shuffleDep1 = new ShuffleDependency(shuffleMapRdd1, new HashPartitioner(2))
val shuffleMapRdd2 = new MyRDD(sc, 2, Nil)
val shuffleDep2 = new ShuffleDependency(shuffleMapRdd2, new HashPartitioner(2))
val reduceRdd1 = new MyRDD(sc, 2, List(shuffleDep1), tracker = mapOutputTracker)
val reduceRdd2 = new MyRDD(sc, 2, List(shuffleDep1, shuffleDep2), tracker = mapOutputTracker)
// We need to make our own listeners for this test, since by default submit uses the same
// listener for all jobs, and here we want to capture the failure for each job separately.
class FailureRecordingJobListener() extends JobListener {
var failureMessage: String = _
override def taskSucceeded(index: Int, result: Any) {}
override def jobFailed(exception: Exception): Unit = { failureMessage = exception.getMessage }
}
val listener1 = new FailureRecordingJobListener()
val listener2 = new FailureRecordingJobListener()
submit(reduceRdd1, Array(0, 1), listener = listener1)
submit(reduceRdd2, Array(0, 1), listener = listener2)
val stageFailureMessage = "Exception failure in map stage"
failed(taskSets(0), stageFailureMessage)
assert(cancelledStages.toSet === Set(0, 2))
// Make sure the listeners got told about both failed stages.
sc.listenerBus.waitUntilEmpty(WAIT_TIMEOUT_MILLIS)
assert(sparkListener.successfulStages.isEmpty)
assert(sparkListener.failedStages.toSet === Set(0, 2))
assert(listener1.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assert(listener2.failureMessage === s"Job aborted due to stage failure: $stageFailureMessage")
assertDataStructuresEmpty()
}
def checkJobPropertiesAndPriority(taskSet: TaskSet, expected: String, priority: Int): Unit = {
assert(taskSet.properties != null)
assert(taskSet.properties.getProperty("testProperty") === expected)
assert(taskSet.priority === priority)
}
def launchJobsThatShareStageAndCancelFirst(): ShuffleDependency[Int, Int, Nothing] = {
val baseRdd = new MyRDD(sc, 1, Nil)
val shuffleDep1 = new ShuffleDependency(baseRdd, new HashPartitioner(1))
val intermediateRdd = new MyRDD(sc, 1, List(shuffleDep1))
val shuffleDep2 = new ShuffleDependency(intermediateRdd, new HashPartitioner(1))
val finalRdd1 = new MyRDD(sc, 1, List(shuffleDep2))
val finalRdd2 = new MyRDD(sc, 1, List(shuffleDep2))
val job1Properties = new Properties()
val job2Properties = new Properties()
job1Properties.setProperty("testProperty", "job1")
job2Properties.setProperty("testProperty", "job2")
// Run jobs 1 & 2, both referencing the same stage, then cancel job1.
// Note that we have to submit job2 before we cancel job1 to have them actually share
// *Stages*, and not just shuffle dependencies, due to skipped stages (at least until
// we address SPARK-10193.)
val jobId1 = submit(finalRdd1, Array(0), properties = job1Properties)
val jobId2 = submit(finalRdd2, Array(0), properties = job2Properties)
assert(scheduler.activeJobs.nonEmpty)
val testProperty1 = scheduler.jobIdToActiveJob(jobId1).properties.getProperty("testProperty")
// remove job1 as an ActiveJob
cancel(jobId1)
// job2 should still be running
assert(scheduler.activeJobs.nonEmpty)
val testProperty2 = scheduler.jobIdToActiveJob(jobId2).properties.getProperty("testProperty")
assert(testProperty1 != testProperty2)
// NB: This next assert isn't necessarily the "desired" behavior; it's just to document
// the current behavior. We've already submitted the TaskSet for stage 0 based on job1, but
// even though we have cancelled that job and are now running it because of job2, we haven't
// updated the TaskSet's properties. Changing the properties to "job2" is likely the more
// correct behavior.
val job1Id = 0 // TaskSet priority for Stages run with "job1" as the ActiveJob
checkJobPropertiesAndPriority(taskSets(0), "job1", job1Id)
complete(taskSets(0), Seq((Success, makeMapStatus("hostA", 1))))
shuffleDep1
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active
*/
test("stage used by two jobs, the first no longer active (SPARK-6880)") {
launchJobsThatShareStageAndCancelFirst()
// The next check is the key for SPARK-6880. For the stage which was shared by both job1 and
// job2 but never had any tasks submitted for job1, the properties of job2 are now used to run
// the stage.
checkJobPropertiesAndPriority(taskSets(1), "job2", 1)
complete(taskSets(1), Seq((Success, makeMapStatus("hostA", 1))))
assert(taskSets(2).properties != null)
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* Makes sure that tasks for a stage used by multiple jobs are submitted with the properties of a
* later, active job if they were previously run under a job that is no longer active, even when
* there are fetch failures
*/
test("stage used by two jobs, some fetch failures, and the first job no longer active " +
"(SPARK-6880)") {
val shuffleDep1 = launchJobsThatShareStageAndCancelFirst()
val job2Id = 1 // TaskSet priority for Stages run with "job2" as the ActiveJob
// lets say there is a fetch failure in this task set, which makes us go back and
// run stage 0, attempt 1
complete(taskSets(1), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDep1.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// stage 0, attempt 1 should have the properties of job2
assert(taskSets(2).stageId === 0)
assert(taskSets(2).stageAttemptId === 1)
checkJobPropertiesAndPriority(taskSets(2), "job2", job2Id)
// run the rest of the stages normally, checking that they have the correct properties
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(3), "job2", job2Id)
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 1))))
checkJobPropertiesAndPriority(taskSets(4), "job2", job2Id)
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(scheduler.activeJobs.isEmpty)
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from a task that ran on that executor. We want to make sure the
* stage is resubmitted so that the task that ran on the failed executor is re-executed, and
* that the stage is only marked as finished once that task completes.
*/
test("run trivial shuffle with out-of-band executor failure and retry") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
// Tell the DAGScheduler that hostA was lost.
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers the
// stage complete), but the tasks that ran on HostA need to be re-run, so the DAGScheduler
// should re-submit the stage with one task (the task that originally ran on HostA).
assert(taskSets.size === 2)
assert(taskSets(1).tasks.size === 1)
// Make sure that the stage that was re-submitted was the ShuffleMapStage (not the reduce
// stage, which shouldn't be run until all of the tasks in the ShuffleMapStage complete on
// alive executors).
assert(taskSets(1).tasks(0).isInstanceOf[ShuffleMapTask])
// have hostC complete the resubmitted task
complete(taskSets(1), Seq((Success, makeMapStatus("hostC", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
// Make sure that the reduce stage was now submitted.
assert(taskSets.size === 3)
assert(taskSets(2).tasks(0).isInstanceOf[ResultTask[_, _]])
// Complete the reduce stage.
complete(taskSets(2), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("recursive shuffle failures") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil)
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker)
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
// have the first stage complete normally
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// have the second stage complete normally
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostC", 1))))
// fail the third stage because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDepTwo.shuffleId, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// have DAGScheduler try again
scheduler.resubmitFailedStages()
complete(taskSets(3), Seq((Success, makeMapStatus("hostA", 2))))
complete(taskSets(4), Seq((Success, makeMapStatus("hostA", 1))))
complete(taskSets(5), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("cached post-shuffle") {
val shuffleOneRdd = new MyRDD(sc, 2, Nil).cache()
val shuffleDepOne = new ShuffleDependency(shuffleOneRdd, new HashPartitioner(2))
val shuffleTwoRdd = new MyRDD(sc, 2, List(shuffleDepOne), tracker = mapOutputTracker).cache()
val shuffleDepTwo = new ShuffleDependency(shuffleTwoRdd, new HashPartitioner(1))
val finalRdd = new MyRDD(sc, 1, List(shuffleDepTwo), tracker = mapOutputTracker)
submit(finalRdd, Array(0))
cacheLocations(shuffleTwoRdd.id -> 0) = Seq(makeBlockManagerId("hostD"))
cacheLocations(shuffleTwoRdd.id -> 1) = Seq(makeBlockManagerId("hostC"))
// complete stage 0
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostB", 2))))
// complete stage 1
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", 1)),
(Success, makeMapStatus("hostB", 1))))
// pretend stage 2 failed because hostA went down
complete(taskSets(2), Seq(
(FetchFailed(makeBlockManagerId("hostA"), shuffleDepTwo.shuffleId, 0, 0, "ignored"), null)))
// TODO assert this:
// blockManagerMaster.removeExecutor("exec-hostA")
// DAGScheduler should notice the cached copy of the second shuffle and try to get it rerun.
scheduler.resubmitFailedStages()
assertLocations(taskSets(3), Seq(Seq("hostD")))
// allow hostD to recover
complete(taskSets(3), Seq((Success, makeMapStatus("hostD", 1))))
complete(taskSets(4), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("misbehaved accumulator should not crash DAGScheduler and SparkContext") {
val acc = new LongAccumulator {
override def add(v: java.lang.Long): Unit = throw new DAGSchedulerSuiteDummyException
override def add(v: Long): Unit = throw new DAGSchedulerSuiteDummyException
}
sc.register(acc)
// Run this on executors
sc.parallelize(1 to 10, 2).foreach { item => acc.add(1) }
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
/**
* The job will be failed on first task throwing a DAGSchedulerSuiteDummyException.
* Any subsequent task WILL throw a legitimate java.lang.UnsupportedOperationException.
* If multiple tasks, there exists a race condition between the SparkDriverExecutionExceptions
* and their differing causes as to which will represent result for job...
*/
test("misbehaved resultHandler should not crash DAGScheduler and SparkContext") {
val e = intercept[SparkDriverExecutionException] {
// Number of parallelized partitions implies number of tasks of job
val rdd = sc.parallelize(1 to 10, 2)
sc.runJob[Int, Int](
rdd,
(context: TaskContext, iter: Iterator[Int]) => iter.size,
// For a robust test assertion, limit number of job tasks to 1; that is,
// if multiple RDD partitions, use id of any one partition, say, first partition id=0
Seq(0),
(part: Int, result: Int) => throw new DAGSchedulerSuiteDummyException)
}
assert(e.getCause.isInstanceOf[DAGSchedulerSuiteDummyException])
// Make sure we can still run commands on our SparkContext
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPartitions exceptions should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[DAGSchedulerSuiteDummyException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPartitions: Array[Partition] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.reduceByKey(_ + _, 1).count()
}
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("getPreferredLocations errors should not crash DAGScheduler and SparkContext (SPARK-8606)") {
val e1 = intercept[SparkException] {
val rdd = new MyRDD(sc, 2, Nil) {
override def getPreferredLocations(split: Partition): Seq[String] = {
throw new DAGSchedulerSuiteDummyException
}
}
rdd.count()
}
assert(e1.getMessage.contains(classOf[DAGSchedulerSuiteDummyException].getName))
// Make sure we can still run commands
assert(sc.parallelize(1 to 10, 2).count() === 10)
}
test("accumulator not calculated for resubmitted result stage") {
// just for register
val accum = AccumulatorSuite.createLongAccum("a")
val finalRdd = new MyRDD(sc, 1, Nil)
submit(finalRdd, Array(0))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
completeWithAccumulator(accum.id, taskSets(0), Seq((Success, 42)))
assert(results === Map(0 -> 42))
assert(accum.value === 1)
assertDataStructuresEmpty()
}
test("accumulators are updated on exception failures") {
val acc1 = AccumulatorSuite.createLongAccum("ingenieur")
val acc2 = AccumulatorSuite.createLongAccum("boulanger")
val acc3 = AccumulatorSuite.createLongAccum("agriculteur")
assert(AccumulatorContext.get(acc1.id).isDefined)
assert(AccumulatorContext.get(acc2.id).isDefined)
assert(AccumulatorContext.get(acc3.id).isDefined)
val accUpdate1 = new LongAccumulator
accUpdate1.metadata = acc1.metadata
accUpdate1.setValue(15)
val accUpdate2 = new LongAccumulator
accUpdate2.metadata = acc2.metadata
accUpdate2.setValue(13)
val accUpdate3 = new LongAccumulator
accUpdate3.metadata = acc3.metadata
accUpdate3.setValue(18)
val accumUpdates = Seq(accUpdate1, accUpdate2, accUpdate3)
val accumInfo = accumUpdates.map(AccumulatorSuite.makeInfo)
val exceptionFailure = new ExceptionFailure(
new SparkException("fondue?"),
accumInfo).copy(accums = accumUpdates)
submit(new MyRDD(sc, 1, Nil), Array(0))
runEvent(makeCompletionEvent(taskSets.head.tasks.head, exceptionFailure, "result"))
assert(AccumulatorContext.get(acc1.id).get.value === 15L)
assert(AccumulatorContext.get(acc2.id).get.value === 13L)
assert(AccumulatorContext.get(acc3.id).get.value === 18L)
}
test("reduce tasks should be placed locally with map output") {
// Create a shuffleMapRdd with 1 partition
val shuffleMapRdd = new MyRDD(sc, 1, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run on the same host that map task ran
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostA")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("reduce task locality preferences should only include machines with largest map outputs") {
val numMapTasks = 4
// Create a shuffleMapRdd with more partitions
val shuffleMapRdd = new MyRDD(sc, numMapTasks, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
val statuses = (1 to numMapTasks).map { i =>
(Success, makeMapStatus("host" + i, 1, (10*i).toByte))
}
complete(taskSets(0), statuses)
// Reducer should prefer the last 3 hosts as they have 20%, 30% and 40% of data
val hosts = (1 to numMapTasks).map(i => "host" + i).reverse.take(numMapTasks - 1)
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(hosts))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("stages with both narrow and shuffle dependencies use narrow ones for locality") {
// Create an RDD that has both a shuffle dependency and a narrow dependency (e.g. for a join)
val rdd1 = new MyRDD(sc, 1, Nil)
val rdd2 = new MyRDD(sc, 1, Nil, locations = Seq(Seq("hostB")))
val shuffleDep = new ShuffleDependency(rdd1, new HashPartitioner(1))
val narrowDep = new OneToOneDependency(rdd2)
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep, narrowDep), tracker = mapOutputTracker)
submit(reduceRdd, Array(0))
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 1))))
assert(mapOutputTracker.getMapSizesByExecutorId(shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA")))
// Reducer should run where RDD 2 has preferences, even though it also has a shuffle dep
val reduceTaskSet = taskSets(1)
assertLocations(reduceTaskSet, Seq(Seq("hostB")))
complete(reduceTaskSet, Seq((Success, 42)))
assert(results === Map(0 -> 42))
assertDataStructuresEmpty()
}
test("Spark exceptions should include call site in stack trace") {
val e = intercept[SparkException] {
sc.parallelize(1 to 10, 2).map { _ => throw new RuntimeException("uh-oh!") }.count()
}
// Does not include message, ONLY stack trace.
val stackTraceString = Utils.exceptionString(e)
// should actually include the RDD operation that invoked the method:
assert(stackTraceString.contains("org.apache.spark.rdd.RDD.count"))
// should include the FunSuite setup:
assert(stackTraceString.contains("org.scalatest.FunSuite"))
}
test("catch errors in event loop") {
// this is a test of our testing framework -- make sure errors in event loop don't get ignored
// just run some bad event that will throw an exception -- we'll give a null TaskEndReason
val rdd1 = new MyRDD(sc, 1, Nil)
submit(rdd1, Array(0))
intercept[Exception] {
complete(taskSets(0), Seq(
(null, makeMapStatus("hostA", 1))))
}
}
test("simple map stage submission") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
assert(results.size === 0) // No results yet
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage; it should directly do the reduce
submit(reduceRdd, Array(0))
completeNextResultStageWithSuccess(2, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with reduce stage also depending on the data") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(1))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 1, List(shuffleDep), tracker = mapOutputTracker)
// Submit the map stage by itself
submitMapStage(shuffleDep)
// Submit a reduce job that depends on this map stage
submit(reduceRdd, Array(0))
// Complete tasks for the map stage
completeShuffleMapStageSuccessfully(0, 0, 1)
assert(results.size === 1)
results.clear()
// Complete tasks for the reduce stage
completeNextResultStageWithSuccess(1, 0)
assert(results === Map(0 -> 42))
results.clear()
assertDataStructuresEmpty()
// Check that if we submit the map stage again, no tasks run
submitMapStage(shuffleDep)
assert(results.size === 1)
assertDataStructuresEmpty()
}
test("map stage submission with fetch failure") {
val shuffleMapRdd = new MyRDD(sc, 2, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
val shuffleId = shuffleDep.shuffleId
val reduceRdd = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
// Submit a map stage by itself
submitMapStage(shuffleDep)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", reduceRdd.partitions.length)),
(Success, makeMapStatus("hostB", reduceRdd.partitions.length))))
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
// Submit a reduce job that depends on this map stage, but where one reduce will fail a fetch
submit(reduceRdd, Array(0, 1))
complete(taskSets(1), Seq(
(Success, 42),
(FetchFailed(makeBlockManagerId("hostA"), shuffleId, 0, 0, "ignored"), null)))
// Ask the scheduler to try it again; TaskSet 2 will rerun the map task that we couldn't fetch
// from, then TaskSet 3 will run the reduce stage
scheduler.resubmitFailedStages()
complete(taskSets(2), Seq((Success, makeMapStatus("hostA", reduceRdd.partitions.length))))
complete(taskSets(3), Seq((Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
// Run another reduce job without a failure; this should just work
submit(reduceRdd, Array(0, 1))
complete(taskSets(4), Seq(
(Success, 44),
(Success, 45)))
assert(results === Map(0 -> 44, 1 -> 45))
results.clear()
assertDataStructuresEmpty()
// Resubmit the map stage; this should also just work
submitMapStage(shuffleDep)
assert(results.size === 1)
results.clear()
assertDataStructuresEmpty()
}
/**
* In this test, we have three RDDs with shuffle dependencies, and we submit map stage jobs
* that are waiting on each one, as well as a reduce job on the last one. We test that all of
* these jobs complete even if there are some fetch failures in both shuffles.
*/
test("map stage submission with multiple shared stages and failures") {
val rdd1 = new MyRDD(sc, 2, Nil)
val dep1 = new ShuffleDependency(rdd1, new HashPartitioner(2))
val rdd2 = new MyRDD(sc, 2, List(dep1), tracker = mapOutputTracker)
val dep2 = new ShuffleDependency(rdd2, new HashPartitioner(2))
val rdd3 = new MyRDD(sc, 2, List(dep2), tracker = mapOutputTracker)
val listener1 = new SimpleListener
val listener2 = new SimpleListener
val listener3 = new SimpleListener
submitMapStage(dep1, listener1)
submitMapStage(dep2, listener2)
submit(rdd3, Array(0, 1), listener = listener3)
// Complete the first stage
assert(taskSets(0).stageId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", rdd1.partitions.length)),
(Success, makeMapStatus("hostB", rdd1.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostA"), makeBlockManagerId("hostB")))
assert(listener1.results.size === 1)
// When attempting the second stage, show a fetch failure
assert(taskSets(1).stageId === 1)
complete(taskSets(1), Seq(
(Success, makeMapStatus("hostA", rdd2.partitions.length)),
(FetchFailed(makeBlockManagerId("hostA"), dep1.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
assert(listener2.results.size === 0) // Second stage listener should not have a result yet
// Stage 0 should now be running as task set 2; make its task succeed
assert(taskSets(2).stageId === 0)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostC", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep1.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostC"), makeBlockManagerId("hostB")))
assert(listener2.results.size === 0) // Second stage listener should still not have a result
// Stage 1 should now be running as task set 3; make its first task succeed
assert(taskSets(3).stageId === 1)
complete(taskSets(3), Seq(
(Success, makeMapStatus("hostB", rdd2.partitions.length)),
(Success, makeMapStatus("hostD", rdd2.partitions.length))))
assert(mapOutputTracker.getMapSizesByExecutorId(dep2.shuffleId, 0).map(_._1).toSet ===
HashSet(makeBlockManagerId("hostB"), makeBlockManagerId("hostD")))
assert(listener2.results.size === 1)
// Finally, the reduce job should be running as task set 4; make it see a fetch failure,
// then make it run again and succeed
assert(taskSets(4).stageId === 2)
complete(taskSets(4), Seq(
(Success, 52),
(FetchFailed(makeBlockManagerId("hostD"), dep2.shuffleId, 0, 0, "ignored"), null)))
scheduler.resubmitFailedStages()
// TaskSet 5 will rerun stage 1's lost task, then TaskSet 6 will rerun stage 2
assert(taskSets(5).stageId === 1)
complete(taskSets(5), Seq(
(Success, makeMapStatus("hostE", rdd2.partitions.length))))
complete(taskSets(6), Seq(
(Success, 53)))
assert(listener3.results === Map(0 -> 52, 1 -> 53))
assertDataStructuresEmpty()
}
/**
* In this test, we run a map stage where one of the executors fails but we still receive a
* "zombie" complete message from that executor. We want to make sure the stage is not reported
* as done until all tasks have completed.
*
* Most of the functionality in this test is tested in "run trivial shuffle with out-of-band
* executor failure and retry". However, that test uses ShuffleMapStages that are followed by
* a ResultStage, whereas in this test, the ShuffleMapStage is tested in isolation, without a
* ResultStage after it.
*/
test("map stage submission with executor failure late map task completions") {
val shuffleMapRdd = new MyRDD(sc, 3, Nil)
val shuffleDep = new ShuffleDependency(shuffleMapRdd, new HashPartitioner(2))
submitMapStage(shuffleDep)
val oldTaskSet = taskSets(0)
runEvent(makeCompletionEvent(oldTaskSet.tasks(0), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// Pretend host A was lost. This will cause the TaskSetManager to resubmit task 0, because it
// completed on hostA.
val oldEpoch = mapOutputTracker.getEpoch
runEvent(ExecutorLost("exec-hostA", ExecutorKilled))
val newEpoch = mapOutputTracker.getEpoch
assert(newEpoch > oldEpoch)
// Suppose we also get a completed event from task 1 on the same host; this should be ignored
runEvent(makeCompletionEvent(oldTaskSet.tasks(1), Success, makeMapStatus("hostA", 2)))
assert(results.size === 0) // Map stage job should not be complete yet
// A completion from another task should work because it's a non-failed host
runEvent(makeCompletionEvent(oldTaskSet.tasks(2), Success, makeMapStatus("hostB", 2)))
// At this point, no more tasks are running for the stage (and the TaskSetManager considers
// the stage complete), but the task that ran on hostA needs to be re-run, so the map stage
// shouldn't be marked as complete, and the DAGScheduler should re-submit the stage.
assert(results.size === 0)
assert(taskSets.size === 2)
// Now complete tasks in the second task set
val newTaskSet = taskSets(1)
// 2 tasks should have been re-submitted, for tasks 0 and 1 (which ran on hostA).
assert(newTaskSet.tasks.size === 2)
// Complete task 0 from the original task set (i.e., not hte one that's currently active).
// This should still be counted towards the job being complete (but there's still one
// outstanding task).
runEvent(makeCompletionEvent(newTaskSet.tasks(0), Success, makeMapStatus("hostB", 2)))
assert(results.size === 0)
// Complete the final task, from the currently active task set. There's still one
// running task, task 0 in the currently active stage attempt, but the success of task 0 means
// the DAGScheduler can mark the stage as finished.
runEvent(makeCompletionEvent(newTaskSet.tasks(1), Success, makeMapStatus("hostB", 2)))
assert(results.size === 1) // Map stage job should now finally be complete
assertDataStructuresEmpty()
// Also test that a reduce stage using this shuffled data can immediately run
val reduceRDD = new MyRDD(sc, 2, List(shuffleDep), tracker = mapOutputTracker)
results.clear()
submit(reduceRDD, Array(0, 1))
complete(taskSets(2), Seq((Success, 42), (Success, 43)))
assert(results === Map(0 -> 42, 1 -> 43))
results.clear()
assertDataStructuresEmpty()
}
/**
* Checks the DAGScheduler's internal logic for traversing an RDD DAG by making sure that
* getShuffleDependencies correctly returns the direct shuffle dependencies of a particular
* RDD. The test creates the following RDD graph (where n denotes a narrow dependency and s
* denotes a shuffle dependency):
*
* A <------------s---------,
* \\
* B <--s-- C <--s-- D <--n---`-- E
*
* Here, the direct shuffle dependency of C is just the shuffle dependency on B. The direct
* shuffle dependencies of E are the shuffle dependency on A and the shuffle dependency on C.
*/
test("getShuffleDependencies correctly returns only direct shuffle parents") {
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(1))
val rddB = new MyRDD(sc, 2, Nil)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(1))
val rddC = new MyRDD(sc, 1, List(shuffleDepB))
val shuffleDepC = new ShuffleDependency(rddC, new HashPartitioner(1))
val rddD = new MyRDD(sc, 1, List(shuffleDepC))
val narrowDepD = new OneToOneDependency(rddD)
val rddE = new MyRDD(sc, 1, List(shuffleDepA, narrowDepD), tracker = mapOutputTracker)
assert(scheduler.getShuffleDependencies(rddA) === Set())
assert(scheduler.getShuffleDependencies(rddB) === Set())
assert(scheduler.getShuffleDependencies(rddC) === Set(shuffleDepB))
assert(scheduler.getShuffleDependencies(rddD) === Set(shuffleDepC))
assert(scheduler.getShuffleDependencies(rddE) === Set(shuffleDepA, shuffleDepC))
}
test("SPARK-17644: After one stage is aborted for too many failed attempts, subsequent stages" +
"still behave correctly on fetch failures") {
// Runs a job that always encounters a fetch failure, so should eventually be aborted
def runJobWithPersistentFetchFailure: Unit = {
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0, 0, "test")
case (x, _) => x
}.count()
}
// Runs a job that encounters a single fetch failure but succeeds on the second attempt
def runJobWithTemporaryFetchFailure: Unit = {
object FailThisAttempt {
val _fail = new AtomicBoolean(true)
}
val rdd1 = sc.makeRDD(Array(1, 2, 3, 4), 2).map(x => (x, 1)).groupByKey()
val shuffleHandle =
rdd1.dependencies.head.asInstanceOf[ShuffleDependency[_, _, _]].shuffleHandle
rdd1.map {
case (x, _) if (x == 1) && FailThisAttempt._fail.getAndSet(false) =>
throw new FetchFailedException(
BlockManagerId("1", "1", 1), shuffleHandle.shuffleId, 0, 0, "test")
}
}
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
// Run a second job that will fail due to a fetch failure.
// This job will hang without the fix for SPARK-17644.
failAfter(10.seconds) {
val e = intercept[SparkException] {
runJobWithPersistentFetchFailure
}
assert(e.getMessage.contains("org.apache.spark.shuffle.FetchFailedException"))
}
failAfter(10.seconds) {
try {
runJobWithTemporaryFetchFailure
} catch {
case e: Throwable => fail("A job with one fetch failure should eventually succeed")
}
}
}
test("[SPARK-19263] DAGScheduler should not submit multiple active tasksets," +
" even with late completions from earlier stage attempts") {
// Create 3 RDDs with shuffle dependencies on each other: rddA <--- rddB <--- rddC
val rddA = new MyRDD(sc, 2, Nil)
val shuffleDepA = new ShuffleDependency(rddA, new HashPartitioner(2))
val shuffleIdA = shuffleDepA.shuffleId
val rddB = new MyRDD(sc, 2, List(shuffleDepA), tracker = mapOutputTracker)
val shuffleDepB = new ShuffleDependency(rddB, new HashPartitioner(2))
val rddC = new MyRDD(sc, 2, List(shuffleDepB), tracker = mapOutputTracker)
submit(rddC, Array(0, 1))
// Complete both tasks in rddA.
assert(taskSets(0).stageId === 0 && taskSets(0).stageAttemptId === 0)
complete(taskSets(0), Seq(
(Success, makeMapStatus("hostA", 2)),
(Success, makeMapStatus("hostA", 2))))
// Fetch failed for task(stageId=1, stageAttemptId=0, partitionId=0) running on hostA
// and task(stageId=1, stageAttemptId=0, partitionId=1) is still running.
assert(taskSets(1).stageId === 1 && taskSets(1).stageAttemptId === 0)
runEvent(makeCompletionEvent(
taskSets(1).tasks(0),
FetchFailed(makeBlockManagerId("hostA"), shuffleIdA, 0, 0,
"Fetch failure of task: stageId=1, stageAttempt=0, partitionId=0"),
result = null))
// Both original tasks in rddA should be marked as failed, because they ran on the
// failed hostA, so both should be resubmitted. Complete them on hostB successfully.
scheduler.resubmitFailedStages()
assert(taskSets(2).stageId === 0 && taskSets(2).stageAttemptId === 1
&& taskSets(2).tasks.size === 2)
complete(taskSets(2), Seq(
(Success, makeMapStatus("hostB", 2)),
(Success, makeMapStatus("hostB", 2))))
// Complete task(stageId=1, stageAttemptId=0, partitionId=1) running on failed hostA
// successfully. The success should be ignored because the task started before the
// executor failed, so the output may have been lost.
runEvent(makeCompletionEvent(
taskSets(1).tasks(1), Success, makeMapStatus("hostA", 2)))
// Both tasks in rddB should be resubmitted, because none of them has succeeded truely.
// Complete the task(stageId=1, stageAttemptId=1, partitionId=0) successfully.
// Task(stageId=1, stageAttemptId=1, partitionId=1) of this new active stage attempt
// is still running.
assert(taskSets(3).stageId === 1 && taskSets(3).stageAttemptId === 1
&& taskSets(3).tasks.size === 2)
runEvent(makeCompletionEvent(
taskSets(3).tasks(0), Success, makeMapStatus("hostB", 2)))
// There should be no new attempt of stage submitted,
// because task(stageId=1, stageAttempt=1, partitionId=1) is still running in
// the current attempt (and hasn't completed successfully in any earlier attempts).
assert(taskSets.size === 4)
// Complete task(stageId=1, stageAttempt=1, partitionId=1) successfully.
runEvent(makeCompletionEvent(
taskSets(3).tasks(1), Success, makeMapStatus("hostB", 2)))
// Now the ResultStage should be submitted, because all of the tasks of rddB have
// completed successfully on alive executors.
assert(taskSets.size === 5 && taskSets(4).tasks(0).isInstanceOf[ResultTask[_, _]])
complete(taskSets(4), Seq(
(Success, 1),
(Success, 1)))
}
test("task end event should have updated accumulators (SPARK-20342)") {
val tasks = 10
val accumId = new AtomicLong()
val foundCount = new AtomicLong()
val listener = new SparkListener() {
override def onTaskEnd(event: SparkListenerTaskEnd): Unit = {
event.taskInfo.accumulables.find(_.id == accumId.get).foreach { _ =>
foundCount.incrementAndGet()
}
}
}
sc.addSparkListener(listener)
// Try a few times in a loop to make sure. This is not guaranteed to fail when the bug exists,
// but it should at least make the test flaky. If the bug is fixed, this should always pass.
(1 to 10).foreach { i =>
foundCount.set(0L)
val accum = sc.longAccumulator(s"accum$i")
accumId.set(accum.id)
sc.parallelize(1 to tasks, tasks).foreach { _ =>
accum.add(1L)
}
sc.listenerBus.waitUntilEmpty(1000)
assert(foundCount.get() === tasks)
}
}
/**
* Assert that the supplied TaskSet has exactly the given hosts as its preferred locations.
* Note that this checks only the host and not the executor ID.
*/
private def assertLocations(taskSet: TaskSet, hosts: Seq[Seq[String]]) {
assert(hosts.size === taskSet.tasks.size)
for ((taskLocs, expectedLocs) <- taskSet.tasks.map(_.preferredLocations).zip(hosts)) {
assert(taskLocs.map(_.host).toSet === expectedLocs.toSet)
}
}
private def assertDataStructuresEmpty(): Unit = {
assert(scheduler.activeJobs.isEmpty)
assert(scheduler.failedStages.isEmpty)
assert(scheduler.jobIdToActiveJob.isEmpty)
assert(scheduler.jobIdToStageIds.isEmpty)
assert(scheduler.stageIdToStage.isEmpty)
assert(scheduler.runningStages.isEmpty)
assert(scheduler.shuffleIdToMapStage.isEmpty)
assert(scheduler.waitingStages.isEmpty)
assert(scheduler.outputCommitCoordinator.isEmpty)
}
// Nothing in this test should break if the task info's fields are null, but
// OutputCommitCoordinator requires the task info itself to not be null.
private def createFakeTaskInfo(): TaskInfo = {
val info = new TaskInfo(0, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1 // to prevent spurious errors in JobProgressListener
info
}
private def createFakeTaskInfoWithId(taskId: Long): TaskInfo = {
val info = new TaskInfo(taskId, 0, 0, 0L, "", "", TaskLocality.ANY, false)
info.finishTime = 1 // to prevent spurious errors in JobProgressListener
info
}
private def makeCompletionEvent(
task: Task[_],
reason: TaskEndReason,
result: Any,
extraAccumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty,
taskInfo: TaskInfo = createFakeTaskInfo()): CompletionEvent = {
val accumUpdates = reason match {
case Success => task.metrics.accumulators()
case ef: ExceptionFailure => ef.accums
case _ => Seq.empty
}
CompletionEvent(task, reason, result, accumUpdates ++ extraAccumUpdates, taskInfo)
}
}
object DAGSchedulerSuite {
def makeMapStatus(host: String, reduces: Int, sizes: Byte = 2): MapStatus =
MapStatus(makeBlockManagerId(host), Array.fill[Long](reduces)(sizes))
def makeBlockManagerId(host: String): BlockManagerId =
BlockManagerId("exec-" + host, host, 12345)
}
| aokolnychyi/spark | core/src/test/scala/org/apache/spark/scheduler/DAGSchedulerSuite.scala | Scala | apache-2.0 | 104,092 |
package com.socrata.datacoordinator.secondary.feedback
import com.rojoma.json.io.JsonLexException
import com.rojoma.json.util.JsonArrayIterator.ElementDecodeException
import com.rojoma.json.v3.ast._
import com.rojoma.json.v3.codec.JsonEncode
import com.rojoma.json.v3.interpolation._
import com.rojoma.simplearm.v2.{ResourceScope, using}
import com.socrata.datacoordinator.id.UserColumnId
import com.socrata.datacoordinator.secondary
import com.socrata.datacoordinator.secondary.feedback.monitor.StatusMonitor
import com.socrata.datacoordinator.util.collection.MutableColumnIdMap
case class Row[CV](data: secondary.Row[CV], oldData: Option[secondary.Row[CV]])
sealed abstract class FeedbackResult
case class Success(feedbackCookie: FeedbackCookie) extends FeedbackResult
case class ReplayLater(reason: String, feedbackCookie: FeedbackCookie => FeedbackCookie) extends FeedbackResult
case class BrokenDataset(reason: String, feedbackCookie: FeedbackCookie => FeedbackCookie) extends FeedbackResult
case class FeedbackError(reason: String, cause: Throwable) extends FeedbackResult
object FeedbackContext {
def apply[CT,CV](user: String,
batchSize: Int => Int,
statusMonitor: StatusMonitor,
computationHandlers: Seq[ComputationHandler[CT,CV]],
computationRetryLimit: Int,
dataCoordinator: (String, CT => JValue => Option[CV]) => DataCoordinatorClient[CT,CV],
dataCoordinatorRetryLimit: Int,
datasetContext: (String, CV => JValue, CT => JValue => Option[CV], CV => Int)): FeedbackCookie => FeedbackContext[CT,CV] = {
val (datasetInternalName, toJValueFunc, fromJValueFunc, estimateValueSize) = datasetContext
new FeedbackContext(
user,
batchSize,
statusMonitor,
computationHandlers,
computationRetryLimit,
dataCoordinator,
dataCoordinatorRetryLimit,
datasetInternalName,
toJValueFunc,
fromJValueFunc,
estimateValueSize,
_
)
}
}
class FeedbackContext[CT,CV](user: String,
batchSize: Int => Int,
statusMonitor: StatusMonitor,
computationHandlers: Seq[ComputationHandler[CT,CV]],
computationRetryLimit: Int,
dataCoordinator: (String, CT => JValue => Option[CV]) => DataCoordinatorClient[CT,CV],
dataCoordinatorRetryLimit: Int,
datasetInternalName: String,
toJValueFunc: CV => JValue,
fromJValueFunc: CT => JValue => Option[CV],
estimateValueSize: CV => Int,
currentCookie: FeedbackCookie) {
private def success(current: CookieSchema): Success = {
Success(currentCookie.copyCurrent(
current = current,
computationRetriesLeft = computationRetryLimit,
dataCoordinatorRetriesLeft = dataCoordinatorRetryLimit,
resync = false,
errorMessage = None
))
}
private def replayLater(reason: String, resync: Boolean): ReplayLater = {
ReplayLater(reason, _.copyCurrent(resync = resync, errorMessage = Some(reason)))
}
private def replayComputation(reason: String, resync: Boolean): ReplayLater = {
ReplayLater(reason, { feedbackCookie =>
feedbackCookie.copyCurrent(
computationRetriesLeft = feedbackCookie.current.computationRetriesLeft - 1,
dataCoordinatorRetriesLeft = dataCoordinatorRetryLimit,
resync = resync,
errorMessage = Some(reason))
})
}
private def replayDataCoordinator(reason: String, resync: Boolean): ReplayLater = {
ReplayLater(reason, { feedbackCookie =>
feedbackCookie.copyCurrent(
computationRetriesLeft = computationRetryLimit,
dataCoordinatorRetriesLeft = feedbackCookie.current.dataCoordinatorRetriesLeft - 1,
resync = resync,
errorMessage = Some(reason))
})
}
private def brokenDataset(reason: String, resync: Boolean): BrokenDataset = {
BrokenDataset(reason, _.copyCurrent(resync = resync, errorMessage = Some(reason)))
}
val log = org.slf4j.LoggerFactory.getLogger(classOf[FeedbackContext[CT, CV]])
private var cookie = currentCookie.current
val dataCoordinatorClient = dataCoordinator(datasetInternalName, fromJValueFunc)
def mergeWith[A, B](xs: Map[A, B], ys: Map[A, B])(f: (B, B) => B): Map[A, B] = {
ys.foldLeft(xs) { (combined, yab) =>
val (a, yb) = yab
val newB = combined.get(a) match {
case None => yb
case Some(xb) => f(xb, yb)
}
combined.updated(a, newB)
}
}
private def computeUpdates(computationHandler: ComputationHandler[CT, CV],
rows: IndexedSeq[Row[CV]],
targetColumns: Set[UserColumnId]): Either[ComputationFailure, Map[Int, Map[UserColumnId, CV]]] = {
val perDatasetData = computationHandler.setupDataset(cookie)
val perColumnDataAndDependentColumns = cookie.strategyMap.toSeq.collect {
case (targetCol, strat) if targetColumns.contains(targetCol) & computationHandler.matchesStrategyType(strat.strategyType) =>
(computationHandler.setupColumn(perDatasetData, strat, targetCol), strat.sourceColumnIds :+ targetCol)
}
val toCompute = rows.iterator.zipWithIndex.flatMap { case (row, index) =>
val rcis =
perColumnDataAndDependentColumns.flatMap { case (columnData, dependentColumns) =>
// Don't compute if there has been no change to the dependent columns which include source columns + target column.
// New value of either target or source will have the same existing if update does not contain the field.
// There should be no change either they contain the same value or are not passed in
if (noChange(row, dependentColumns))
None
else
Some(computationHandler.setupCell(columnData, row))
}
if (rcis.isEmpty) Iterator.empty
else Iterator.single(index -> rcis)
}.toMap
computationHandler.compute(toCompute) match {
case Right(results) =>
Right(results.flatMap { case (rowIdx, updates) =>
val row = rows(rowIdx)
val filtered = updates.filter { case (colId: UserColumnId, value) =>
extractCV(row.data, colId) != Some(value) // don't update to current value
}
val rowId = extractCV(row.data, cookie.systemId) match {
case None => throw new Exception(s"Cannot find value for system id ${cookie.systemId} in row: $row!") // throwing as exception because this should not happen _ever_
case Some(other) => other
}
if (filtered.nonEmpty) {
val newRow = Map(
cookie.systemId -> rowId
) ++ filtered
Some(rowIdx -> newRow)
} else {
None
}
})
case failure => failure
}
}
private def noChange(row: Row[CV], columns: Seq[UserColumnId]): Boolean = row.oldData match {
case Some(old) =>
columns.forall { id =>
val internal = cookie.columnIdMap(id)
row.data(internal) == old(internal) // safe because updates contain _all_ row values (and this must be one)
}
case None => false
}
private def extractCV(row: secondary.Row[CV], userId: UserColumnId): Option[CV] = {
val internal = cookie.columnIdMap(userId)
row.get(internal)
}
// commands for mutation scripts
private val commands: Seq[JValue] =
j"""[ { "c" : "normal", "user" : $user }
, { "c" : "row data", "update_only" : true, "by_system_id" : true, "nonfatal_row_errors" : [ "insert_in_update_only", "no_such_row_to_update" ] }
]""".toSeq
private def writeMutationScript(rowUpdates: Iterator[JValue]): Option[JArray] = {
if (!rowUpdates.hasNext) None
else Some(JArray(commands ++ rowUpdates))
}
private def estimateSecondarySize(row: secondary.Row[CV]) =
row.values.map(estimateValueSize).sum
private def estimateSize(row: Row[CV]): Long = {
val Row(data, oldData) = row
estimateSecondarySize(data) + oldData.fold(0L)(estimateSecondarySize)
}
private def batchRows(rows: Iterator[Row[CV]], batchSize: Long): Iterator[Vector[Row[CV]]] =
new Iterator[Vector[Row[CV]]] {
def hasNext = rows.hasNext
def next() = {
val result = Vector.newBuilder[Row[CV]]
var total = 0L
do {
val row = rows.next()
result += row
total += estimateSize(row)
} while(total < batchSize && rows.hasNext)
result.result()
}
}
// this may throw a ReplayLaterSecondaryException or a BrokenDatasetSecondaryException
def feedback(rows: Iterator[Row[CV]],
targetColumns: Set[UserColumnId] = cookie.strategyMap.keySet,
resync: Boolean = false): FeedbackResult = {
val width = cookie.columnIdMap.size
val size = batchSize(width)
log.info("Feeding back rows with batch_size = {} for target computed columns: {}", size, targetColumns)
var count = 0
batchRows(rows, size).foreach { batch =>
count += 1
val updates = computationHandlers.foldLeft(Map.empty[Int, Map[UserColumnId, CV]]) { (currentUpdates, computationHandler) =>
val currentRows = batch.toArray
for ((idx, upds) <- currentUpdates) {
val currentRow = MutableColumnIdMap(currentRows(idx).data)
for ((cid, cv) <- upds) {
currentRow(cookie.columnIdMap(cid)) = cv
}
currentRows(idx) = currentRows(idx).copy(data = currentRow.freeze())
}
computeUpdates(computationHandler, currentRows, targetColumns) match {
case Right(newUpdates) =>
mergeWith(currentUpdates, newUpdates)(_ ++ _)
case Left(ComputationError(reason, cause)) =>
// Some failure has occurred in computation; we will only retry these exceptions so many times
return replayComputation(reason, resync)
case Left(FatalComputationError(reason, cause)) =>
// Some fatal failure has occurred in computation; the dataset should be marked broken
return brokenDataset(reason, resync)
}
}
val jsonUpdates = updates.valuesIterator.map { updates =>
JsonEncode.toJValue(updates.mapValues(toJValueFunc))
}
writeMutationScript(jsonUpdates) match {
case Some(script) =>
dataCoordinatorClient.postMutationScript(script, cookie) match {
case None =>
log.info("Finished batch {} of {} rows", count, batch.length)
statusMonitor.update(datasetInternalName, cookie.dataVersion, size, count)
case Some(Right(TargetColumnDoesNotExist(column))) =>
// this is pretty lame; but better than doing a full resync
val deleted = deleteColumns(Set(column))
computeColumns(targetColumns -- deleted)
case Some(Left(ftddc@FailedToDiscoverDataCoordinator)) =>
log.warn("Failed to discover data-coordinator; going to request to have this dataset replayed later")
// we will retry this "indefinitely"; do not decrement retries left
return replayLater(ftddc.english, resync)
case Some(Left(ddb@DataCoordinatorBusy)) =>
log.info("Received a 409 from data-coordinator; going to request to have this dataset replayed later")
// we will retry this "indefinitely"; do not decrement retries left
return replayLater(ddb.english, resync)
case Some(Left(ddne@DatasetDoesNotExist)) =>
log.info("Completed updating dataset {} to version {} early: {}", datasetInternalName, cookie.dataVersion.underlying.toString, ddne.english)
statusMonitor.remove(datasetInternalName, cookie.dataVersion, count)
// nothing more to do here
return success(cookie)
case Some(Left(UnexpectedError(reason, cause))) =>
// We failed to post our mutation script to data-coordinator for some reason
return replayDataCoordinator(reason, resync) // TODO: use cause
}
case None =>
log.info("Batch {} had no rows to update of {} rows; no script posted.", count, batch.length)
statusMonitor.update(datasetInternalName, cookie.dataVersion, size, count)
}
}
log.info("Completed row update of dataset {} in version {} after batch: {}",
datasetInternalName, cookie.dataVersion.underlying.toString, count.toString)
statusMonitor.remove(datasetInternalName, cookie.dataVersion, count)
// success!
success(cookie)
}
def flushColumnCreations(newColumns: Set[UserColumnId]): FeedbackResult = {
if (newColumns.nonEmpty) {
log.info("Flushing newly created columns...")
val result = computeColumns(newColumns)
log.info("Done flushing columns")
result
} else {
success(cookie)
}
}
// this is "safe" because we must be caught up before a publication stage change can be made
private def deleteColumns(columns: Set[UserColumnId]): Set[UserColumnId] = {
log.info("Columns have been deleted in truth: {}; updating the cookie.", columns)
val (deleted, reliant, newCookie) = CookieOperator.deleteColumns(columns, cookie)
if (reliant.nonEmpty) {
log.info("Reliant computed columns will have also been deleted: {}; updating the cookie.", reliant)
log.info("Will attempt to retry with remaining computed columns...")
}
cookie = newCookie
deleted // all deleted columns and reliant computed columns that must also be deleted
}
private def computeColumns(targetColumns: Set[UserColumnId]): FeedbackResult = {
if (targetColumns.nonEmpty) {
log.info("Computing columns: {}", targetColumns)
val sourceColumns = targetColumns.map(cookie.strategyMap(_)).flatMap(_.sourceColumnIds).toSet.toSeq // .toSet for uniqueness
// always ask for the system id for two reasons:
// - if the dataset has a user primary key, the system id will not be automatically returned
// - if the computed columns don't have any source columns, we still need the system id column
val columns = Seq(cookie.systemId) ++ sourceColumns
val feedbackResult = using(new ResourceScope("compute new columns")) { resourceScope => try {
dataCoordinatorClient.exportRows(columns, cookie, resourceScope) match {
case Right(Right(RowData(_, rows))) =>
val result = feedback(rows.map { row => Row(row, None) }, targetColumns)
log.info("Done computing columns")
result
case Right(Left(ColumnsDoNotExist(unknown))) =>
// since source columns must be deleted after computed columns
// just delete those unknown columns and associated computed columns from our cookie
// we'll get the event replayed to us later
val deleted = deleteColumns(unknown)
computeColumns(targetColumns -- deleted) // try to compute the un-deleted columns again
case Left(ftddc@FailedToDiscoverDataCoordinator) =>
log.warn("Failed to discover data-coordinator; going to request to have this dataset replayed later")
// we will retry this "indefinitely"; do not decrement retries left
replayLater(ftddc.english, resync = false)
case Left(ddb@DataCoordinatorBusy) =>
log.info("Received a 409 from data-coordinator; going to request to have this dataset replayed later")
// we will retry this "indefinitely"; do not decrement retries left
replayLater(ddb.english, resync = false)
case Left(ddne@DatasetDoesNotExist) =>
log.info("Completed updating dataset {} to version {} early: {}", datasetInternalName, cookie.dataVersion.underlying.toString, ddne.english)
// nothing more to do here
success(cookie)
case Left(ue@UnexpectedError(_, cause)) =>
log.error("Unexpected error from data-coordinator client")
// this is unexpected, we will throw an exception and use the SW retry logic
FeedbackError(ue.english, cause)
}
} catch {
case e: JsonLexException =>
val message = s"Unable to parse response from data-coordinator as JSON: ${e.message}"
log.error(message)
FeedbackError(message, e)
case e: ElementDecodeException =>
val message = s"Unable to parse element in array from data-coordinator as JSON: ${e.position}"
log.error(message)
FeedbackError(message, e)
}}
feedbackResult
} else {
success(cookie)
}
}
}
object CookieOperator {
// returns all deleted columns, deleted computed columns, and the resulting cookie schema
def deleteColumns(columns: Set[UserColumnId], cookie: CookieSchema): (Set[UserColumnId], Set[UserColumnId], CookieSchema) = {
val reverseStrategyMap = scala.collection.mutable.Map[UserColumnId, Set[UserColumnId]]()
cookie.strategyMap.foreach { case (target, strategy) =>
strategy.sourceColumnIds.foreach { source =>
reverseStrategyMap.put(source, reverseStrategyMap.getOrElse(source, Set.empty) + target)
}
}
// include starting column
def findReliantColumns(column: UserColumnId): Set[UserColumnId] = {
val reliant = scala.collection.mutable.Set(column)
val queue = scala.collection.mutable.Queue[UserColumnId](column)
while (queue.nonEmpty) {
reverseStrategyMap.getOrElse(queue.dequeue(), Set.empty).foreach { parent =>
if (reliant.add(parent)) queue.enqueue(parent)
}
}
reliant.toSet
}
val deleted = columns.flatMap(findReliantColumns) // includes starting columns
val reliant = deleted.filter(cookie.strategyMap.contains)
val resultCookie = cookie.copy(
columnIdMap = cookie.columnIdMap -- deleted,
strategyMap = cookie.strategyMap -- reliant
)
(deleted, reliant, resultCookie) // all deleted columns and reliant computed columns that must also be deleted and the resulting cookie
}
}
| socrata-platform/data-coordinator | secondarylib-feedback/src/main/scala/com/socrata/datacoordinator/secondary/feedback/FeedbackContext.scala | Scala | apache-2.0 | 18,375 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import java.nio.charset.StandardCharsets
import java.util.concurrent.TimeUnit
import java.util.{Collections, Properties}
import joptsimple._
import kafka.common.Config
import kafka.log.LogConfig
import kafka.server.DynamicConfig.QuotaConfigs
import kafka.server.{ConfigEntityName, ConfigType, Defaults, DynamicBrokerConfig, DynamicConfig, KafkaConfig}
import kafka.utils.{CommandDefaultOptions, CommandLineUtils, Exit, PasswordEncoder}
import kafka.utils.Implicits._
import kafka.zk.{AdminZkClient, KafkaZkClient}
import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, Config => JConfig, ScramMechanism => PublicScramMechanism}
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.common.config.ConfigResource
import org.apache.kafka.common.config.types.Password
import org.apache.kafka.common.errors.InvalidConfigurationException
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent}
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.common.security.scram.internals.{ScramCredentialUtils, ScramFormatter, ScramMechanism}
import org.apache.kafka.common.utils.{Sanitizer, Time, Utils}
import org.apache.zookeeper.client.ZKClientConfig
import scala.annotation.nowarn
import scala.jdk.CollectionConverters._
import scala.collection._
/**
* This script can be used to change configs for topics/clients/users/brokers/ips dynamically
* An entity described or altered by the command may be one of:
* <ul>
* <li> topic: --topic <topic> OR --entity-type topics --entity-name <topic>
* <li> client: --client <client> OR --entity-type clients --entity-name <client-id>
* <li> user: --user <user-principal> OR --entity-type users --entity-name <user-principal>
* <li> <user, client>: --user <user-principal> --client <client-id> OR
* --entity-type users --entity-name <user-principal> --entity-type clients --entity-name <client-id>
* <li> broker: --broker <broker-id> OR --entity-type brokers --entity-name <broker-id>
* <li> broker-logger: --broker-logger <broker-id> OR --entity-type broker-loggers --entity-name <broker-id>
* <li> ip: --ip <ip> OR --entity-type ips --entity-name <ip>
* </ul>
* --entity-type <users|clients|brokers|ips> --entity-default may be specified in place of --entity-type <users|clients|brokers|ips> --entity-name <entityName>
* when describing or altering default configuration for users, clients, brokers, or ips, respectively.
* Alternatively, --user-defaults, --client-defaults, --broker-defaults, or --ip-defaults may be specified in place of
* --entity-type <users|clients|brokers|ips> --entity-default, respectively.
*
* For most use cases, this script communicates with a kafka cluster (specified via the
* `--bootstrap-server` option). There are three exceptions where direct communication with a
* ZooKeeper ensemble (specified via the `--zookeeper` option) is allowed:
*
* 1. Describe/alter user configs where the config is a SCRAM mechanism name (i.e. a SCRAM credential for a user)
* 2. Describe/alter broker configs for a particular broker when that broker is down
* 3. Describe/alter broker default configs when all brokers are down
*
* For example, this allows password configs to be stored encrypted in ZK before brokers are started,
* avoiding cleartext passwords in `server.properties`.
*/
object ConfigCommand extends Config {
val BrokerDefaultEntityName = ""
val BrokerLoggerConfigType = "broker-loggers"
val BrokerSupportedConfigTypes = ConfigType.all :+ BrokerLoggerConfigType
val ZkSupportedConfigTypes = Seq(ConfigType.User, ConfigType.Broker)
val DefaultScramIterations = 4096
def main(args: Array[String]): Unit = {
try {
val opts = new ConfigCommandOptions(args)
CommandLineUtils.printHelpAndExitIfNeeded(opts, "This tool helps to manipulate and describe entity config for a topic, client, user, broker or ip")
opts.checkArgs()
if (opts.options.has(opts.zkConnectOpt)) {
println(s"Warning: --zookeeper is deprecated and will be removed in a future version of Kafka.")
println(s"Use --bootstrap-server instead to specify a broker to connect to.")
processCommandWithZk(opts.options.valueOf(opts.zkConnectOpt), opts)
} else {
processCommand(opts)
}
} catch {
case e @ (_: IllegalArgumentException | _: InvalidConfigurationException | _: OptionException) =>
logger.debug(s"Failed config command with args '${args.mkString(" ")}'", e)
System.err.println(e.getMessage)
Exit.exit(1)
case t: Throwable =>
logger.debug(s"Error while executing config command with args '${args.mkString(" ")}'", t)
System.err.println(s"Error while executing config command with args '${args.mkString(" ")}'")
t.printStackTrace(System.err)
Exit.exit(1)
}
}
private def processCommandWithZk(zkConnectString: String, opts: ConfigCommandOptions): Unit = {
val zkClientConfig = ZkSecurityMigrator.createZkClientConfigFromOption(opts.options, opts.zkTlsConfigFile)
.getOrElse(new ZKClientConfig())
val zkClient = KafkaZkClient(zkConnectString, JaasUtils.isZkSaslEnabled || KafkaConfig.zkTlsClientAuthEnabled(zkClientConfig), 30000, 30000,
Int.MaxValue, Time.SYSTEM, zkClientConfig = zkClientConfig, name = "ConfigCommand")
val adminZkClient = new AdminZkClient(zkClient)
try {
if (opts.options.has(opts.alterOpt))
alterConfigWithZk(zkClient, opts, adminZkClient)
else if (opts.options.has(opts.describeOpt))
describeConfigWithZk(zkClient, opts, adminZkClient)
} finally {
zkClient.close()
}
}
private[admin] def alterConfigWithZk(zkClient: KafkaZkClient, opts: ConfigCommandOptions, adminZkClient: AdminZkClient): Unit = {
val configsToBeAdded = parseConfigsToBeAdded(opts)
val configsToBeDeleted = parseConfigsToBeDeleted(opts)
val entity = parseEntity(opts)
val entityType = entity.root.entityType
val entityName = entity.fullSanitizedName
val errorMessage = s"--bootstrap-server option must be specified to update $entityType configs: {add: $configsToBeAdded, delete: $configsToBeDeleted}"
if (entityType == ConfigType.User) {
if (!configsToBeAdded.isEmpty || !configsToBeDeleted.isEmpty) {
val info = "User configuration updates using ZooKeeper are only supported for SCRAM credential updates."
val scramMechanismNames = ScramMechanism.values.map(_.mechanismName)
// make sure every added/deleted configs are SCRAM related, other configs are not supported using zookeeper
require(configsToBeAdded.stringPropertyNames.asScala.forall(scramMechanismNames.contains),
s"$errorMessage. $info")
require(configsToBeDeleted.forall(scramMechanismNames.contains), s"$errorMessage. $info")
}
preProcessScramCredentials(configsToBeAdded)
} else if (entityType == ConfigType.Broker) {
// Dynamic broker configs can be updated using ZooKeeper only if the corresponding broker is not running.
if (!configsToBeAdded.isEmpty || !configsToBeDeleted.isEmpty) {
validateBrokersNotRunning(entityName, adminZkClient, zkClient, errorMessage)
val perBrokerConfig = entityName != ConfigEntityName.Default
preProcessBrokerConfigs(configsToBeAdded, perBrokerConfig)
}
}
// compile the final set of configs
val configs = adminZkClient.fetchEntityConfig(entityType, entityName)
// fail the command if any of the configs to be deleted does not exist
val invalidConfigs = configsToBeDeleted.filterNot(configs.containsKey(_))
if (invalidConfigs.nonEmpty)
throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}")
configs ++= configsToBeAdded
configsToBeDeleted.foreach(configs.remove(_))
adminZkClient.changeConfigs(entityType, entityName, configs)
println(s"Completed updating config for entity: $entity.")
}
private def validateBrokersNotRunning(entityName: String,
adminZkClient: AdminZkClient,
zkClient: KafkaZkClient,
errorMessage: String): Unit = {
val perBrokerConfig = entityName != ConfigEntityName.Default
val info = "Broker configuration operations using ZooKeeper are only supported if the affected broker(s) are not running."
if (perBrokerConfig) {
adminZkClient.parseBroker(entityName).foreach { brokerId =>
require(zkClient.getBroker(brokerId).isEmpty, s"$errorMessage - broker $brokerId is running. $info")
}
} else {
val runningBrokersCount = zkClient.getAllBrokersInCluster.size
require(runningBrokersCount == 0, s"$errorMessage - $runningBrokersCount brokers are running. $info")
}
}
private def preProcessScramCredentials(configsToBeAdded: Properties): Unit = {
def scramCredential(mechanism: ScramMechanism, credentialStr: String): String = {
val pattern = "(?:iterations=([0-9]*),)?password=(.*)".r
val (iterations, password) = credentialStr match {
case pattern(iterations, password) => (if (iterations != null) iterations.toInt else DefaultScramIterations, password)
case _ => throw new IllegalArgumentException(s"Invalid credential property $mechanism=$credentialStr")
}
if (iterations < mechanism.minIterations())
throw new IllegalArgumentException(s"Iterations $iterations is less than the minimum ${mechanism.minIterations()} required for $mechanism")
val credential = new ScramFormatter(mechanism).generateCredential(password, iterations)
ScramCredentialUtils.credentialToString(credential)
}
for (mechanism <- ScramMechanism.values) {
configsToBeAdded.getProperty(mechanism.mechanismName) match {
case null =>
case value =>
configsToBeAdded.setProperty(mechanism.mechanismName, scramCredential(mechanism, value))
}
}
}
private[admin] def createPasswordEncoder(encoderConfigs: Map[String, String]): PasswordEncoder = {
encoderConfigs.get(KafkaConfig.PasswordEncoderSecretProp)
val encoderSecret = encoderConfigs.getOrElse(KafkaConfig.PasswordEncoderSecretProp,
throw new IllegalArgumentException("Password encoder secret not specified"))
new PasswordEncoder(new Password(encoderSecret),
None,
encoderConfigs.get(KafkaConfig.PasswordEncoderCipherAlgorithmProp).getOrElse(Defaults.PasswordEncoderCipherAlgorithm),
encoderConfigs.get(KafkaConfig.PasswordEncoderKeyLengthProp).map(_.toInt).getOrElse(Defaults.PasswordEncoderKeyLength),
encoderConfigs.get(KafkaConfig.PasswordEncoderIterationsProp).map(_.toInt).getOrElse(Defaults.PasswordEncoderIterations))
}
/**
* Pre-process broker configs provided to convert them to persistent format.
* Password configs are encrypted using the secret `KafkaConfig.PasswordEncoderSecretProp`.
* The secret is removed from `configsToBeAdded` and will not be persisted in ZooKeeper.
*/
private def preProcessBrokerConfigs(configsToBeAdded: Properties, perBrokerConfig: Boolean): Unit = {
val passwordEncoderConfigs = new Properties
passwordEncoderConfigs ++= configsToBeAdded.asScala.filter { case (key, _) => key.startsWith("password.encoder.") }
if (!passwordEncoderConfigs.isEmpty) {
info(s"Password encoder configs ${passwordEncoderConfigs.keySet} will be used for encrypting" +
" passwords, but will not be stored in ZooKeeper.")
passwordEncoderConfigs.asScala.keySet.foreach(configsToBeAdded.remove)
}
DynamicBrokerConfig.validateConfigs(configsToBeAdded, perBrokerConfig)
val passwordConfigs = configsToBeAdded.asScala.keySet.filter(DynamicBrokerConfig.isPasswordConfig)
if (passwordConfigs.nonEmpty) {
require(passwordEncoderConfigs.containsKey(KafkaConfig.PasswordEncoderSecretProp),
s"${KafkaConfig.PasswordEncoderSecretProp} must be specified to update $passwordConfigs." +
" Other password encoder configs like cipher algorithm and iterations may also be specified" +
" to override the default encoding parameters. Password encoder configs will not be persisted" +
" in ZooKeeper."
)
val passwordEncoder = createPasswordEncoder(passwordEncoderConfigs.asScala)
passwordConfigs.foreach { configName =>
val encodedValue = passwordEncoder.encode(new Password(configsToBeAdded.getProperty(configName)))
configsToBeAdded.setProperty(configName, encodedValue)
}
}
}
private[admin] def describeConfigWithZk(zkClient: KafkaZkClient, opts: ConfigCommandOptions, adminZkClient: AdminZkClient): Unit = {
val configEntity = parseEntity(opts)
val entityType = configEntity.root.entityType
val describeAllUsers = entityType == ConfigType.User && !configEntity.root.sanitizedName.isDefined && !configEntity.child.isDefined
val entityName = configEntity.fullSanitizedName
val errorMessage = s"--bootstrap-server option must be specified to describe $entityType"
if (entityType == ConfigType.Broker) {
// Dynamic broker configs can be described using ZooKeeper only if the corresponding broker is not running.
validateBrokersNotRunning(entityName, adminZkClient, zkClient, errorMessage)
}
val entities = configEntity.getAllEntities(zkClient)
for (entity <- entities) {
val configs = adminZkClient.fetchEntityConfig(entity.root.entityType, entity.fullSanitizedName)
// When describing all users, don't include empty user nodes with only <user, client> quota overrides.
if (!configs.isEmpty || !describeAllUsers) {
println("Configs for %s are %s"
.format(entity, configs.asScala.map(kv => kv._1 + "=" + kv._2).mkString(",")))
}
}
}
@nowarn("cat=deprecation")
private[admin] def parseConfigsToBeAdded(opts: ConfigCommandOptions): Properties = {
val props = new Properties
if (opts.options.has(opts.addConfigFile)) {
val file = opts.options.valueOf(opts.addConfigFile)
props ++= Utils.loadProps(file)
}
if (opts.options.has(opts.addConfig)) {
// Split list by commas, but avoid those in [], then into KV pairs
// Each KV pair is of format key=value, split them into key and value, using -1 as the limit for split() to
// include trailing empty strings. This is to support empty value (e.g. 'ssl.endpoint.identification.algorithm=')
val pattern = "(?=[^\\\\]]*(?:\\\\[|$))"
val configsToBeAdded = opts.options.valueOf(opts.addConfig)
.split("," + pattern)
.map(_.split("""\\s*=\\s*""" + pattern, -1))
require(configsToBeAdded.forall(config => config.length == 2), "Invalid entity config: all configs to be added must be in the format \\"key=val\\".")
//Create properties, parsing square brackets from values if necessary
configsToBeAdded.foreach(pair => props.setProperty(pair(0).trim, pair(1).replaceAll("\\\\[?\\\\]?", "").trim))
}
if (props.containsKey(LogConfig.MessageFormatVersionProp)) {
println(s"WARNING: The configuration ${LogConfig.MessageFormatVersionProp}=${props.getProperty(LogConfig.MessageFormatVersionProp)} is specified. " +
"This configuration will be ignored if the version is newer than the inter.broker.protocol.version specified in the broker or " +
"if the inter.broker.protocol.version is 3.0 or newer. This configuration is deprecated and it will be removed in Apache Kafka 4.0.")
}
props
}
private[admin] def parseConfigsToBeDeleted(opts: ConfigCommandOptions): Seq[String] = {
if (opts.options.has(opts.deleteConfig)) {
val configsToBeDeleted = opts.options.valuesOf(opts.deleteConfig).asScala.map(_.trim())
val propsToBeDeleted = new Properties
configsToBeDeleted.foreach(propsToBeDeleted.setProperty(_, ""))
configsToBeDeleted
}
else
Seq.empty
}
private def processCommand(opts: ConfigCommandOptions): Unit = {
val props = if (opts.options.has(opts.commandConfigOpt))
Utils.loadProps(opts.options.valueOf(opts.commandConfigOpt))
else
new Properties()
props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, opts.options.valueOf(opts.bootstrapServerOpt))
val adminClient = Admin.create(props)
if (opts.options.has(opts.alterOpt) && opts.entityTypes.size != opts.entityNames.size)
throw new IllegalArgumentException(s"An entity name must be specified for every entity type")
try {
if (opts.options.has(opts.alterOpt))
alterConfig(adminClient, opts)
else if (opts.options.has(opts.describeOpt))
describeConfig(adminClient, opts)
} finally {
adminClient.close()
}
}
@nowarn("cat=deprecation")
private[admin] def alterConfig(adminClient: Admin, opts: ConfigCommandOptions): Unit = {
val entityTypes = opts.entityTypes
val entityNames = opts.entityNames
val entityTypeHead = entityTypes.head
val entityNameHead = entityNames.head
val configsToBeAddedMap = parseConfigsToBeAdded(opts).asScala.toMap // no need for mutability
val configsToBeAdded = configsToBeAddedMap.map { case (k, v) => (k, new ConfigEntry(k, v)) }
val configsToBeDeleted = parseConfigsToBeDeleted(opts)
entityTypeHead match {
case ConfigType.Topic =>
val oldConfig = getResourceConfig(adminClient, entityTypeHead, entityNameHead, includeSynonyms = false, describeAll = false)
.map { entry => (entry.name, entry) }.toMap
// fail the command if any of the configs to be deleted does not exist
val invalidConfigs = configsToBeDeleted.filterNot(oldConfig.contains)
if (invalidConfigs.nonEmpty)
throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}")
val configResource = new ConfigResource(ConfigResource.Type.TOPIC, entityNameHead)
val alterOptions = new AlterConfigsOptions().timeoutMs(30000).validateOnly(false)
val alterEntries = (configsToBeAdded.values.map(new AlterConfigOp(_, AlterConfigOp.OpType.SET))
++ configsToBeDeleted.map { k => new AlterConfigOp(new ConfigEntry(k, ""), AlterConfigOp.OpType.DELETE) }
).asJavaCollection
adminClient.incrementalAlterConfigs(Map(configResource -> alterEntries).asJava, alterOptions).all().get(60, TimeUnit.SECONDS)
case ConfigType.Broker =>
val oldConfig = getResourceConfig(adminClient, entityTypeHead, entityNameHead, includeSynonyms = false, describeAll = false)
.map { entry => (entry.name, entry) }.toMap
// fail the command if any of the configs to be deleted does not exist
val invalidConfigs = configsToBeDeleted.filterNot(oldConfig.contains)
if (invalidConfigs.nonEmpty)
throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}")
val newEntries = oldConfig ++ configsToBeAdded -- configsToBeDeleted
val sensitiveEntries = newEntries.filter(_._2.value == null)
if (sensitiveEntries.nonEmpty)
throw new InvalidConfigurationException(s"All sensitive broker config entries must be specified for --alter, missing entries: ${sensitiveEntries.keySet}")
val newConfig = new JConfig(newEntries.asJava.values)
val configResource = new ConfigResource(ConfigResource.Type.BROKER, entityNameHead)
val alterOptions = new AlterConfigsOptions().timeoutMs(30000).validateOnly(false)
adminClient.alterConfigs(Map(configResource -> newConfig).asJava, alterOptions).all().get(60, TimeUnit.SECONDS)
case BrokerLoggerConfigType =>
val validLoggers = getResourceConfig(adminClient, entityTypeHead, entityNameHead, includeSynonyms = true, describeAll = false).map(_.name)
// fail the command if any of the configured broker loggers do not exist
val invalidBrokerLoggers = configsToBeDeleted.filterNot(validLoggers.contains) ++ configsToBeAdded.keys.filterNot(validLoggers.contains)
if (invalidBrokerLoggers.nonEmpty)
throw new InvalidConfigurationException(s"Invalid broker logger(s): ${invalidBrokerLoggers.mkString(",")}")
val configResource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, entityNameHead)
val alterOptions = new AlterConfigsOptions().timeoutMs(30000).validateOnly(false)
val alterLogLevelEntries = (configsToBeAdded.values.map(new AlterConfigOp(_, AlterConfigOp.OpType.SET))
++ configsToBeDeleted.map { k => new AlterConfigOp(new ConfigEntry(k, ""), AlterConfigOp.OpType.DELETE) }
).asJavaCollection
adminClient.incrementalAlterConfigs(Map(configResource -> alterLogLevelEntries).asJava, alterOptions).all().get(60, TimeUnit.SECONDS)
case ConfigType.User | ConfigType.Client =>
val hasQuotaConfigsToAdd = configsToBeAdded.keys.exists(QuotaConfigs.isClientOrUserQuotaConfig)
val scramConfigsToAddMap = configsToBeAdded.filter(entry => ScramMechanism.isScram(entry._1))
val unknownConfigsToAdd = configsToBeAdded.keys.filterNot(key => ScramMechanism.isScram(key) || QuotaConfigs.isClientOrUserQuotaConfig(key))
val hasQuotaConfigsToDelete = configsToBeDeleted.exists(QuotaConfigs.isClientOrUserQuotaConfig)
val scramConfigsToDelete = configsToBeDeleted.filter(ScramMechanism.isScram)
val unknownConfigsToDelete = configsToBeDeleted.filterNot(key => ScramMechanism.isScram(key) || QuotaConfigs.isClientOrUserQuotaConfig(key))
if (entityTypeHead == ConfigType.Client || entityTypes.size == 2) { // size==2 for case where users is specified first on the command line, before clients
// either just a client or both a user and a client
if (unknownConfigsToAdd.nonEmpty || scramConfigsToAddMap.nonEmpty)
throw new IllegalArgumentException(s"Only quota configs can be added for '${ConfigType.Client}' using --bootstrap-server. Unexpected config names: ${unknownConfigsToAdd ++ scramConfigsToAddMap.keys}")
if (unknownConfigsToDelete.nonEmpty || scramConfigsToDelete.nonEmpty)
throw new IllegalArgumentException(s"Only quota configs can be deleted for '${ConfigType.Client}' using --bootstrap-server. Unexpected config names: ${unknownConfigsToDelete ++ scramConfigsToDelete}")
} else { // ConfigType.User
if (unknownConfigsToAdd.nonEmpty)
throw new IllegalArgumentException(s"Only quota and SCRAM credential configs can be added for '${ConfigType.User}' using --bootstrap-server. Unexpected config names: $unknownConfigsToAdd")
if (unknownConfigsToDelete.nonEmpty)
throw new IllegalArgumentException(s"Only quota and SCRAM credential configs can be deleted for '${ConfigType.User}' using --bootstrap-server. Unexpected config names: $unknownConfigsToDelete")
if (scramConfigsToAddMap.nonEmpty || scramConfigsToDelete.nonEmpty) {
if (entityNames.exists(_.isEmpty)) // either --entity-type users --entity-default or --user-defaults
throw new IllegalArgumentException("The use of --entity-default or --user-defaults is not allowed with User SCRAM Credentials using --bootstrap-server.")
if (hasQuotaConfigsToAdd || hasQuotaConfigsToDelete)
throw new IllegalArgumentException(s"Cannot alter both quota and SCRAM credential configs simultaneously for '${ConfigType.User}' using --bootstrap-server.")
}
}
if (hasQuotaConfigsToAdd || hasQuotaConfigsToDelete) {
alterQuotaConfigs(adminClient, entityTypes, entityNames, configsToBeAddedMap, configsToBeDeleted)
} else {
// handle altering user SCRAM credential configs
if (entityNames.size != 1)
// should never happen, if we get here then it is a bug
throw new IllegalStateException(s"Altering user SCRAM credentials should never occur for more zero or multiple users: $entityNames")
alterUserScramCredentialConfigs(adminClient, entityNames.head, scramConfigsToAddMap, scramConfigsToDelete)
}
case ConfigType.Ip =>
val unknownConfigs = (configsToBeAdded.keys ++ configsToBeDeleted).filterNot(key => DynamicConfig.Ip.names.contains(key))
if (unknownConfigs.nonEmpty)
throw new IllegalArgumentException(s"Only connection quota configs can be added for '${ConfigType.Ip}' using --bootstrap-server. Unexpected config names: ${unknownConfigs.mkString(",")}")
alterQuotaConfigs(adminClient, entityTypes, entityNames, configsToBeAddedMap, configsToBeDeleted)
case _ => throw new IllegalArgumentException(s"Unsupported entity type: $entityTypeHead")
}
if (entityNameHead.nonEmpty)
println(s"Completed updating config for ${entityTypeHead.dropRight(1)} $entityNameHead.")
else
println(s"Completed updating default config for $entityTypeHead in the cluster.")
}
private def alterUserScramCredentialConfigs(adminClient: Admin, user: String, scramConfigsToAddMap: Map[String, ConfigEntry], scramConfigsToDelete: Seq[String]) = {
val deletions = scramConfigsToDelete.map(mechanismName =>
new UserScramCredentialDeletion(user, PublicScramMechanism.fromMechanismName(mechanismName)))
def iterationsAndPasswordBytes(mechanism: ScramMechanism, credentialStr: String): (Integer, Array[Byte]) = {
val pattern = "(?:iterations=(\\\\-?[0-9]*),)?password=(.*)".r
val (iterations, password) = credentialStr match {
case pattern(iterations, password) => (if (iterations != null && iterations != "-1") iterations.toInt else DefaultScramIterations, password)
case _ => throw new IllegalArgumentException(s"Invalid credential property $mechanism=$credentialStr")
}
if (iterations < mechanism.minIterations)
throw new IllegalArgumentException(s"Iterations $iterations is less than the minimum ${mechanism.minIterations} required for ${mechanism.mechanismName}")
(iterations, password.getBytes(StandardCharsets.UTF_8))
}
val upsertions = scramConfigsToAddMap.map { case (mechanismName, configEntry) =>
val (iterations, passwordBytes) = iterationsAndPasswordBytes(ScramMechanism.forMechanismName(mechanismName), configEntry.value)
new UserScramCredentialUpsertion(user, new ScramCredentialInfo(PublicScramMechanism.fromMechanismName(mechanismName), iterations), passwordBytes)
}
// we are altering only a single user by definition, so we don't have to worry about one user succeeding and another
// failing; therefore just check the success of all the futures (since there will only be 1)
adminClient.alterUserScramCredentials((deletions ++ upsertions).toList.asJava).all.get(60, TimeUnit.SECONDS)
}
private def alterQuotaConfigs(adminClient: Admin, entityTypes: List[String], entityNames: List[String], configsToBeAddedMap: Map[String, String], configsToBeDeleted: Seq[String]) = {
// handle altering client/user quota configs
val oldConfig = getClientQuotasConfig(adminClient, entityTypes, entityNames)
val invalidConfigs = configsToBeDeleted.filterNot(oldConfig.contains)
if (invalidConfigs.nonEmpty)
throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}")
val alterEntityTypes = entityTypes.map {
case ConfigType.User => ClientQuotaEntity.USER
case ConfigType.Client => ClientQuotaEntity.CLIENT_ID
case ConfigType.Ip => ClientQuotaEntity.IP
case entType => throw new IllegalArgumentException(s"Unexpected entity type: $entType")
}
val alterEntityNames = entityNames.map(en => if (en.nonEmpty) en else null)
// Explicitly populate a HashMap to ensure nulls are recorded properly.
val alterEntityMap = new java.util.HashMap[String, String]
alterEntityTypes.zip(alterEntityNames).foreach { case (k, v) => alterEntityMap.put(k, v) }
val entity = new ClientQuotaEntity(alterEntityMap)
val alterOptions = new AlterClientQuotasOptions().validateOnly(false)
val alterOps = (configsToBeAddedMap.map { case (key, value) =>
val doubleValue = try value.toDouble catch {
case _: NumberFormatException =>
throw new IllegalArgumentException(s"Cannot parse quota configuration value for $key: $value")
}
new ClientQuotaAlteration.Op(key, doubleValue)
} ++ configsToBeDeleted.map(key => new ClientQuotaAlteration.Op(key, null))).asJavaCollection
adminClient.alterClientQuotas(Collections.singleton(new ClientQuotaAlteration(entity, alterOps)), alterOptions)
.all().get(60, TimeUnit.SECONDS)
}
private[admin] def describeConfig(adminClient: Admin, opts: ConfigCommandOptions): Unit = {
val entityTypes = opts.entityTypes
val entityNames = opts.entityNames
val describeAll = opts.options.has(opts.allOpt)
entityTypes.head match {
case ConfigType.Topic | ConfigType.Broker | BrokerLoggerConfigType =>
describeResourceConfig(adminClient, entityTypes.head, entityNames.headOption, describeAll)
case ConfigType.User | ConfigType.Client =>
describeClientQuotaAndUserScramCredentialConfigs(adminClient, entityTypes, entityNames)
case ConfigType.Ip =>
describeQuotaConfigs(adminClient, entityTypes, entityNames)
case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType")
}
}
private def describeResourceConfig(adminClient: Admin, entityType: String, entityName: Option[String], describeAll: Boolean): Unit = {
val entities = entityName
.map(name => List(name))
.getOrElse(entityType match {
case ConfigType.Topic =>
adminClient.listTopics(new ListTopicsOptions().listInternal(true)).names().get().asScala.toSeq
case ConfigType.Broker | BrokerLoggerConfigType =>
adminClient.describeCluster(new DescribeClusterOptions()).nodes().get().asScala.map(_.idString).toSeq :+ BrokerDefaultEntityName
case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType")
})
entities.foreach { entity =>
entity match {
case BrokerDefaultEntityName =>
println(s"Default configs for $entityType in the cluster are:")
case _ =>
val configSourceStr = if (describeAll) "All" else "Dynamic"
println(s"$configSourceStr configs for ${entityType.dropRight(1)} $entity are:")
}
getResourceConfig(adminClient, entityType, entity, includeSynonyms = true, describeAll).foreach { entry =>
val synonyms = entry.synonyms.asScala.map(synonym => s"${synonym.source}:${synonym.name}=${synonym.value}").mkString(", ")
println(s" ${entry.name}=${entry.value} sensitive=${entry.isSensitive} synonyms={$synonyms}")
}
}
}
private def getResourceConfig(adminClient: Admin, entityType: String, entityName: String, includeSynonyms: Boolean, describeAll: Boolean) = {
def validateBrokerId(): Unit = try entityName.toInt catch {
case _: NumberFormatException =>
throw new IllegalArgumentException(s"The entity name for $entityType must be a valid integer broker id, found: $entityName")
}
val (configResourceType, dynamicConfigSource) = entityType match {
case ConfigType.Topic =>
if (!entityName.isEmpty)
Topic.validate(entityName)
(ConfigResource.Type.TOPIC, Some(ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG))
case ConfigType.Broker => entityName match {
case BrokerDefaultEntityName =>
(ConfigResource.Type.BROKER, Some(ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG))
case _ =>
validateBrokerId()
(ConfigResource.Type.BROKER, Some(ConfigEntry.ConfigSource.DYNAMIC_BROKER_CONFIG))
}
case BrokerLoggerConfigType =>
if (!entityName.isEmpty)
validateBrokerId()
(ConfigResource.Type.BROKER_LOGGER, None)
case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType")
}
val configSourceFilter = if (describeAll)
None
else
dynamicConfigSource
val configResource = new ConfigResource(configResourceType, entityName)
val describeOptions = new DescribeConfigsOptions().includeSynonyms(includeSynonyms)
val configs = adminClient.describeConfigs(Collections.singleton(configResource), describeOptions)
.all.get(30, TimeUnit.SECONDS)
configs.get(configResource).entries.asScala
.filter(entry => configSourceFilter match {
case Some(configSource) => entry.source == configSource
case None => true
}).toSeq
}
private def describeQuotaConfigs(adminClient: Admin, entityTypes: List[String], entityNames: List[String]) = {
val quotaConfigs = getAllClientQuotasConfigs(adminClient, entityTypes, entityNames)
quotaConfigs.forKeyValue { (entity, entries) =>
val entityEntries = entity.entries.asScala
def entitySubstr(entityType: String): Option[String] =
entityEntries.get(entityType).map { name =>
val typeStr = entityType match {
case ClientQuotaEntity.USER => "user-principal"
case ClientQuotaEntity.CLIENT_ID => "client-id"
case ClientQuotaEntity.IP => "ip"
}
if (name != null) s"$typeStr '$name'"
else s"the default $typeStr"
}
val entityStr = (entitySubstr(ClientQuotaEntity.USER) ++
entitySubstr(ClientQuotaEntity.CLIENT_ID) ++
entitySubstr(ClientQuotaEntity.IP)).mkString(", ")
val entriesStr = entries.asScala.map(e => s"${e._1}=${e._2}").mkString(", ")
println(s"Quota configs for $entityStr are $entriesStr")
}
}
private def describeClientQuotaAndUserScramCredentialConfigs(adminClient: Admin, entityTypes: List[String], entityNames: List[String]) = {
describeQuotaConfigs(adminClient, entityTypes, entityNames)
// we describe user SCRAM credentials only when we are not describing client information
// and we are not given either --entity-default or --user-defaults
if (!entityTypes.contains(ConfigType.Client) && !entityNames.contains("")) {
val result = adminClient.describeUserScramCredentials(entityNames.asJava)
result.users.get(30, TimeUnit.SECONDS).asScala.foreach(user => {
try {
val description = result.description(user).get(30, TimeUnit.SECONDS)
val descriptionText = description.credentialInfos.asScala.map(info => s"${info.mechanism.mechanismName}=iterations=${info.iterations}").mkString(", ")
println(s"SCRAM credential configs for user-principal '$user' are $descriptionText")
} catch {
case e: Exception => println(s"Error retrieving SCRAM credential configs for user-principal '$user': ${e.getClass.getSimpleName}: ${e.getMessage}")
}
})
}
}
private def getClientQuotasConfig(adminClient: Admin, entityTypes: List[String], entityNames: List[String]): Map[String, java.lang.Double] = {
if (entityTypes.size != entityNames.size)
throw new IllegalArgumentException("Exactly one entity name must be specified for every entity type")
getAllClientQuotasConfigs(adminClient, entityTypes, entityNames).headOption.map(_._2.asScala).getOrElse(Map.empty)
}
private def getAllClientQuotasConfigs(adminClient: Admin, entityTypes: List[String], entityNames: List[String]) = {
val components = entityTypes.map(Some(_)).zipAll(entityNames.map(Some(_)), None, None).map { case (entityTypeOpt, entityNameOpt) =>
val entityType = entityTypeOpt match {
case Some(ConfigType.User) => ClientQuotaEntity.USER
case Some(ConfigType.Client) => ClientQuotaEntity.CLIENT_ID
case Some(ConfigType.Ip) => ClientQuotaEntity.IP
case Some(_) => throw new IllegalArgumentException(s"Unexpected entity type ${entityTypeOpt.get}")
case None => throw new IllegalArgumentException("More entity names specified than entity types")
}
entityNameOpt match {
case Some("") => ClientQuotaFilterComponent.ofDefaultEntity(entityType)
case Some(name) => ClientQuotaFilterComponent.ofEntity(entityType, name)
case None => ClientQuotaFilterComponent.ofEntityType(entityType)
}
}
adminClient.describeClientQuotas(ClientQuotaFilter.containsOnly(components.asJava)).entities.get(30, TimeUnit.SECONDS).asScala
}
case class Entity(entityType: String, sanitizedName: Option[String]) {
val entityPath = sanitizedName match {
case Some(n) => entityType + "/" + n
case None => entityType
}
override def toString: String = {
val typeName = entityType match {
case ConfigType.User => "user-principal"
case ConfigType.Client => "client-id"
case ConfigType.Topic => "topic"
case t => t
}
sanitizedName match {
case Some(ConfigEntityName.Default) => "default " + typeName
case Some(n) =>
val desanitized = if (entityType == ConfigType.User || entityType == ConfigType.Client) Sanitizer.desanitize(n) else n
s"$typeName '$desanitized'"
case None => entityType
}
}
}
case class ConfigEntity(root: Entity, child: Option[Entity]) {
val fullSanitizedName = root.sanitizedName.getOrElse("") + child.map(s => "/" + s.entityPath).getOrElse("")
def getAllEntities(zkClient: KafkaZkClient) : Seq[ConfigEntity] = {
// Describe option examples:
// Describe entity with specified name:
// --entity-type topics --entity-name topic1 (topic1)
// Describe all entities of a type (topics/brokers/users/clients):
// --entity-type topics (all topics)
// Describe <user, client> quotas:
// --entity-type users --entity-name user1 --entity-type clients --entity-name client2 (<user1, client2>)
// --entity-type users --entity-name userA --entity-type clients (all clients of userA)
// --entity-type users --entity-type clients (all <user, client>s))
// Describe default quotas:
// --entity-type users --entity-default (Default user)
// --entity-type users --entity-default --entity-type clients --entity-default (Default <user, client>)
(root.sanitizedName, child) match {
case (None, _) =>
val rootEntities = zkClient.getAllEntitiesWithConfig(root.entityType)
.map(name => ConfigEntity(Entity(root.entityType, Some(name)), child))
child match {
case Some(s) =>
rootEntities.flatMap(rootEntity =>
ConfigEntity(rootEntity.root, Some(Entity(s.entityType, None))).getAllEntities(zkClient))
case None => rootEntities
}
case (_, Some(childEntity)) =>
childEntity.sanitizedName match {
case Some(_) => Seq(this)
case None =>
zkClient.getAllEntitiesWithConfig(root.entityPath + "/" + childEntity.entityType)
.map(name => ConfigEntity(root, Some(Entity(childEntity.entityType, Some(name)))))
}
case (_, None) =>
Seq(this)
}
}
override def toString: String = {
root.toString + child.map(s => ", " + s.toString).getOrElse("")
}
}
private[admin] def parseEntity(opts: ConfigCommandOptions): ConfigEntity = {
val entityTypes = opts.entityTypes
val entityNames = opts.entityNames
if (entityTypes.head == ConfigType.User || entityTypes.head == ConfigType.Client)
parseClientQuotaEntity(opts, entityTypes, entityNames)
else {
// Exactly one entity type and at-most one entity name expected for other entities
val name = entityNames.headOption match {
case Some("") => Some(ConfigEntityName.Default)
case v => v
}
ConfigEntity(Entity(entityTypes.head, name), None)
}
}
private def parseClientQuotaEntity(opts: ConfigCommandOptions, types: List[String], names: List[String]): ConfigEntity = {
if (opts.options.has(opts.alterOpt) && names.size != types.size)
throw new IllegalArgumentException("--entity-name or --entity-default must be specified with each --entity-type for --alter")
val reverse = types.size == 2 && types.head == ConfigType.Client
val entityTypes = if (reverse) types.reverse else types
val sortedNames = (if (reverse && names.length == 2) names.reverse else names).iterator
def sanitizeName(entityType: String, name: String) = {
if (name.isEmpty)
ConfigEntityName.Default
else {
entityType match {
case ConfigType.User | ConfigType.Client => Sanitizer.sanitize(name)
case _ => throw new IllegalArgumentException("Invalid entity type " + entityType)
}
}
}
val entities = entityTypes.map(t => Entity(t, if (sortedNames.hasNext) Some(sanitizeName(t, sortedNames.next())) else None))
ConfigEntity(entities.head, if (entities.size > 1) Some(entities(1)) else None)
}
class ConfigCommandOptions(args: Array[String]) extends CommandDefaultOptions(args) {
val zkConnectOpt = parser.accepts("zookeeper", "DEPRECATED. The connection string for the zookeeper connection in the form host:port. " +
"Multiple URLS can be given to allow fail-over. Required when configuring SCRAM credentials for users or " +
"dynamic broker configs when the relevant broker(s) are down. Not allowed otherwise.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val bootstrapServerOpt = parser.accepts("bootstrap-server", "The Kafka server to connect to. " +
"This is required for describing and altering broker configs.")
.withRequiredArg
.describedAs("server to connect to")
.ofType(classOf[String])
val commandConfigOpt = parser.accepts("command-config", "Property file containing configs to be passed to Admin Client. " +
"This is used only with --bootstrap-server option for describing and altering broker configs.")
.withRequiredArg
.describedAs("command config property file")
.ofType(classOf[String])
val alterOpt = parser.accepts("alter", "Alter the configuration for the entity.")
val describeOpt = parser.accepts("describe", "List configs for the given entity.")
val allOpt = parser.accepts("all", "List all configs for the given topic, broker, or broker-logger entity (includes static configuration when the entity type is brokers)")
val entityType = parser.accepts("entity-type", "Type of entity (topics/clients/users/brokers/broker-loggers/ips)")
.withRequiredArg
.ofType(classOf[String])
val entityName = parser.accepts("entity-name", "Name of entity (topic name/client id/user principal name/broker id/ip)")
.withRequiredArg
.ofType(classOf[String])
val entityDefault = parser.accepts("entity-default", "Default entity name for clients/users/brokers/ips (applies to corresponding entity type in command line)")
val nl = System.getProperty("line.separator")
val addConfig = parser.accepts("add-config", "Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: 'k1=v1,k2=[v1,v2,v2],k3=v3'. The following is a list of valid configurations: " +
"For entity-type '" + ConfigType.Topic + "': " + LogConfig.configNames.map("\\t" + _).mkString(nl, nl, nl) +
"For entity-type '" + ConfigType.Broker + "': " + DynamicConfig.Broker.names.asScala.toSeq.sorted.map("\\t" + _).mkString(nl, nl, nl) +
"For entity-type '" + ConfigType.User + "': " + DynamicConfig.User.names.asScala.toSeq.sorted.map("\\t" + _).mkString(nl, nl, nl) +
"For entity-type '" + ConfigType.Client + "': " + DynamicConfig.Client.names.asScala.toSeq.sorted.map("\\t" + _).mkString(nl, nl, nl) +
"For entity-type '" + ConfigType.Ip + "': " + DynamicConfig.Ip.names.asScala.toSeq.sorted.map("\\t" + _).mkString(nl, nl, nl) +
s"Entity types '${ConfigType.User}' and '${ConfigType.Client}' may be specified together to update config for clients of a specific user.")
.withRequiredArg
.ofType(classOf[String])
val addConfigFile = parser.accepts("add-config-file", "Path to a properties file with configs to add. See add-config for a list of valid configurations.")
.withRequiredArg
.ofType(classOf[String])
val deleteConfig = parser.accepts("delete-config", "config keys to remove 'k1,k2'")
.withRequiredArg
.ofType(classOf[String])
.withValuesSeparatedBy(',')
val forceOpt = parser.accepts("force", "Suppress console prompts")
val topic = parser.accepts("topic", "The topic's name.")
.withRequiredArg
.ofType(classOf[String])
val client = parser.accepts("client", "The client's ID.")
.withRequiredArg
.ofType(classOf[String])
val clientDefaults = parser.accepts("client-defaults", "The config defaults for all clients.")
val user = parser.accepts("user", "The user's principal name.")
.withRequiredArg
.ofType(classOf[String])
val userDefaults = parser.accepts("user-defaults", "The config defaults for all users.")
val broker = parser.accepts("broker", "The broker's ID.")
.withRequiredArg
.ofType(classOf[String])
val brokerDefaults = parser.accepts("broker-defaults", "The config defaults for all brokers.")
val brokerLogger = parser.accepts("broker-logger", "The broker's ID for its logger config.")
.withRequiredArg
.ofType(classOf[String])
val ipDefaults = parser.accepts("ip-defaults", "The config defaults for all IPs.")
val ip = parser.accepts("ip", "The IP address.")
.withRequiredArg
.ofType(classOf[String])
val zkTlsConfigFile = parser.accepts("zk-tls-config-file",
"Identifies the file where ZooKeeper client TLS connectivity properties are defined. Any properties other than " +
KafkaConfig.ZkSslConfigToSystemPropertyMap.keys.toList.sorted.mkString(", ") + " are ignored.")
.withRequiredArg().describedAs("ZooKeeper TLS configuration").ofType(classOf[String])
options = parser.parse(args : _*)
private val entityFlags = List((topic, ConfigType.Topic),
(client, ConfigType.Client),
(user, ConfigType.User),
(broker, ConfigType.Broker),
(brokerLogger, BrokerLoggerConfigType),
(ip, ConfigType.Ip))
private val entityDefaultsFlags = List((clientDefaults, ConfigType.Client),
(userDefaults, ConfigType.User),
(brokerDefaults, ConfigType.Broker),
(ipDefaults, ConfigType.Ip))
private[admin] def entityTypes: List[String] = {
options.valuesOf(entityType).asScala.toList ++
(entityFlags ++ entityDefaultsFlags).filter(entity => options.has(entity._1)).map(_._2)
}
private[admin] def entityNames: List[String] = {
val namesIterator = options.valuesOf(entityName).iterator
options.specs.asScala
.filter(spec => spec.options.contains("entity-name") || spec.options.contains("entity-default"))
.map(spec => if (spec.options.contains("entity-name")) namesIterator.next else "").toList ++
entityFlags
.filter(entity => options.has(entity._1))
.map(entity => options.valueOf(entity._1)) ++
entityDefaultsFlags
.filter(entity => options.has(entity._1))
.map(_ => "")
}
def checkArgs(): Unit = {
// should have exactly one action
val actions = Seq(alterOpt, describeOpt).count(options.has _)
if (actions != 1)
CommandLineUtils.printUsageAndDie(parser, "Command must include exactly one action: --describe, --alter")
// check required args
CommandLineUtils.checkInvalidArgs(parser, options, alterOpt, Set(describeOpt))
CommandLineUtils.checkInvalidArgs(parser, options, describeOpt, Set(alterOpt, addConfig, deleteConfig))
val entityTypeVals = entityTypes
if (entityTypeVals.size != entityTypeVals.distinct.size)
throw new IllegalArgumentException(s"Duplicate entity type(s) specified: ${entityTypeVals.diff(entityTypeVals.distinct).mkString(",")}")
val (allowedEntityTypes, connectOptString) = if (options.has(bootstrapServerOpt))
(BrokerSupportedConfigTypes, "--bootstrap-server")
else
(ZkSupportedConfigTypes, "--zookeeper")
entityTypeVals.foreach(entityTypeVal =>
if (!allowedEntityTypes.contains(entityTypeVal))
throw new IllegalArgumentException(s"Invalid entity type $entityTypeVal, the entity type must be one of ${allowedEntityTypes.mkString(", ")} with the $connectOptString argument")
)
if (entityTypeVals.isEmpty)
throw new IllegalArgumentException("At least one entity type must be specified")
else if (entityTypeVals.size > 1 && !entityTypeVals.toSet.equals(Set(ConfigType.User, ConfigType.Client)))
throw new IllegalArgumentException(s"Only '${ConfigType.User}' and '${ConfigType.Client}' entity types may be specified together")
if ((options.has(entityName) || options.has(entityType) || options.has(entityDefault)) &&
(entityFlags ++ entityDefaultsFlags).exists(entity => options.has(entity._1)))
throw new IllegalArgumentException("--entity-{type,name,default} should not be used in conjunction with specific entity flags")
val hasEntityName = entityNames.exists(!_.isEmpty)
val hasEntityDefault = entityNames.exists(_.isEmpty)
if (!options.has(bootstrapServerOpt) && !options.has(zkConnectOpt))
throw new IllegalArgumentException("One of the required --bootstrap-server or --zookeeper arguments must be specified")
else if (options.has(bootstrapServerOpt) && options.has(zkConnectOpt))
throw new IllegalArgumentException("Only one of --bootstrap-server or --zookeeper must be specified")
if (options.has(allOpt) && options.has(zkConnectOpt)) {
throw new IllegalArgumentException(s"--bootstrap-server must be specified for --all")
}
if (options.has(zkTlsConfigFile) && options.has(bootstrapServerOpt)) {
throw new IllegalArgumentException("--bootstrap-server doesn't support --zk-tls-config-file option. " +
"If you intend the command to communicate directly with ZooKeeper, please use the option --zookeeper instead of --bootstrap-server. " +
"Otherwise, remove the --zk-tls-config-file option.")
}
if (hasEntityName && (entityTypeVals.contains(ConfigType.Broker) || entityTypeVals.contains(BrokerLoggerConfigType))) {
Seq(entityName, broker, brokerLogger).filter(options.has(_)).map(options.valueOf(_)).foreach { brokerId =>
try brokerId.toInt catch {
case _: NumberFormatException =>
throw new IllegalArgumentException(s"The entity name for ${entityTypeVals.head} must be a valid integer broker id, but it is: $brokerId")
}
}
}
if (hasEntityName && entityTypeVals.contains(ConfigType.Ip)) {
Seq(entityName, ip).filter(options.has(_)).map(options.valueOf(_)).foreach { ipEntity =>
if (!DynamicConfig.Ip.isValidIpEntity(ipEntity))
throw new IllegalArgumentException(s"The entity name for ${entityTypeVals.head} must be a valid IP or resolvable host, but it is: $ipEntity")
}
}
if (options.has(describeOpt) && entityTypeVals.contains(BrokerLoggerConfigType) && !hasEntityName)
throw new IllegalArgumentException(s"an entity name must be specified with --describe of ${entityTypeVals.mkString(",")}")
if (options.has(alterOpt)) {
if (entityTypeVals.contains(ConfigType.User) ||
entityTypeVals.contains(ConfigType.Client) ||
entityTypeVals.contains(ConfigType.Broker) ||
entityTypeVals.contains(ConfigType.Ip)) {
if (!hasEntityName && !hasEntityDefault)
throw new IllegalArgumentException("an entity-name or default entity must be specified with --alter of users, clients, brokers or ips")
} else if (!hasEntityName)
throw new IllegalArgumentException(s"an entity name must be specified with --alter of ${entityTypeVals.mkString(",")}")
val isAddConfigPresent = options.has(addConfig)
val isAddConfigFilePresent = options.has(addConfigFile)
val isDeleteConfigPresent = options.has(deleteConfig)
if(isAddConfigPresent && isAddConfigFilePresent)
throw new IllegalArgumentException("Only one of --add-config or --add-config-file must be specified")
if(!isAddConfigPresent && !isAddConfigFilePresent && !isDeleteConfigPresent)
throw new IllegalArgumentException("At least one of --add-config, --add-config-file, or --delete-config must be specified with --alter")
}
}
}
}
| guozhangwang/kafka | core/src/main/scala/kafka/admin/ConfigCommand.scala | Scala | apache-2.0 | 54,052 |
package com.marcocampana.google.utils
import org.scalatest.{FlatSpec,Matchers}
abstract class UnitTest(component: String) extends FlatSpec with Matchers {
behavior of component
} | marcocampana/google-maps-services-scala | src/test/scala/com/marcocampana/google/utils/UnitTest.scala | Scala | apache-2.0 | 184 |
package org.openmole.gui.client.core
/*
* Copyright (C) 17/05/15 // [email protected]
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
import scalatags.JsDom.all._
import scaladget.bootstrapnative.bsn._
import scaladget.tools._
import scalatags.JsDom.tags
import org.openmole.gui.ext.client._
import scala.concurrent.Future
import boopickle.Default._
import scala.concurrent.ExecutionContext.Implicits.global
import org.openmole.gui.ext.data._
import rx._
import scaladget.bootstrapnative.Selector.{ Dropdown, Options }
class AuthenticationPanel(authenticationFactories: Seq[AuthenticationPluginFactory]) {
implicit val ctx: Ctx.Owner = Ctx.Owner.safe()
case class TestedAuthentication(auth: AuthenticationPlugin, tests: Future[Seq[Test]])
val authSetting: Var[Option[AuthenticationPlugin]] = Var(None)
private lazy val auths: Var[Seq[TestedAuthentication]] = Var(Seq())
lazy val initialCheck = Var(false)
def getAuthSelector(currentFactory: AuthenticationPluginFactory) = {
lazy val authenticationSelector: Options[AuthenticationPluginFactory] = {
val factories = authenticationFactories
val currentInd = {
val ind = factories.map { _.name }.indexOf(currentFactory.name)
if (ind == -1) 0 else ind
}
factories.options(currentInd, btn_primary, (a: AuthenticationPluginFactory) โ a.name, onclose = () โ
authSetting() = authenticationSelector.content.now.map {
_.buildEmpty
})
}
authenticationSelector
}
def getAuthentications =
authenticationFactories.map { factory โ
val data = factory.getData
auths() = Seq()
data.foreach { d โ
auths() = (auths.now ++ d.map {
factory.build
}.map { auth โ TestedAuthentication(auth, auth.test) })
}
initialCheck() = true
}
lazy val authenticationTable = {
case class Reactive(testedAuth: TestedAuthentication) {
val errorOn = Var(false)
val currentStack: Var[String] = Var("")
def toLabel(test: Test) = {
val lab = label(
test.message,
scalatags.JsDom.all.marginLeft := 10
)
test match {
case PassedTest(_) โ lab(label_success).render
case PendingTest() โ lab(label_warning).render
case _ โ lab(label_danger +++ pointer)(onclick := { () โ
currentStack() = test.error.map(ErrorData.stackTrace).getOrElse("")
errorOn() = !errorOn.now
}).render
}
}
lazy val render = {
tr(omsheet.docEntry +++ (lineHeight := "35px"))(
td(colMD(4))(
tags.a(testedAuth.auth.data.name, omsheet.docTitleEntry +++ floatLeft +++ omsheet.color(omsheet.WHITE), cursor := "pointer", onclick := { () โ
authSetting() = Some(testedAuth.auth)
})
),
td(colMD(4) +++ (paddingTop := 5))(label(testedAuth.auth.factory.name, label_primary)),
td(colMD(2))({
val tests: Var[Seq[Test]] = Var(Seq(Test.pending))
testedAuth.tests.foreach { ts โ
tests() = ts
}
Rx {
tests().map {
toLabel
}
}
}),
td(colMD(2))(
glyphSpan(glyph_trash, () โ removeAuthentication(testedAuth.auth))(omsheet.grey +++ (paddingTop := 9) +++ "glyphitem" +++ glyph_trash)
)
)
}
}
Rx {
authSetting() match {
case Some(p: AuthenticationPlugin) โ div(paddingTop := 20)(p.panel)
case _ โ
tags.table(fixedTable)(
thead,
for (a โ auths()) yield {
val r = Reactive(a)
Seq(
r.render,
tr(
td(colMD(12))(
colspan := 12,
tags.div(Rx {
if (r.errorOn()) {
tags.textarea(dropdownError)(r.currentStack())
}
else tags.div()
})
)
)
)
}
)
}
}
}
val newButton = button("New", btn_primary, onclick := { () โ
authSetting() = authenticationFactories.headOption.map { _.buildEmpty }
})
val saveButton = button("Save", btn_primary, onclick := { () โ
{
save
}
})
val cancelButton = button("Cancel", btn_default, onclick := { () โ
{
authSetting.now match {
case None โ dialog.hide
case _ โ authSetting() = None
}
}
})
val dialog: ModalDialog =
ModalDialog(
omsheet.panelWidth(52),
onopen = () โ {
if (!initialCheck.now) {
getAuthentications
}
},
onclose = () โ {
authSetting() = None
}
)
dialog.header(
div(
Rx {
div(
authSetting() match {
case Some(o) โ getAuthSelector(o.factory).selector
case _ โ div(
b("Authentications")
)
}
)
}
)
)
dialog body (div(authenticationTable))
dialog.footer(
tags.div(
Rx {
buttonGroup()(
authSetting() match {
case Some(_) โ saveButton
case _ โ newButton
},
cancelButton
)
}
)
)
def removeAuthentication(ad: AuthenticationPlugin) = {
ad.remove(() โ getAuthentications)
}
def save = {
authSetting.now.map {
_.save(() โ {
getAuthentications
})
}
authSetting() = None
}
} | openmole/openmole | openmole/gui/client/org.openmole.gui.client.core/src/main/scala/org/openmole/gui/client/core/AuthenticationPanel.scala | Scala | agpl-3.0 | 6,297 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.check.header
import io.gatling.commons.validation._
import io.gatling.core.check.extractor._
import io.gatling.core.check.extractor.regex.{ GroupExtractor, Patterns }
import io.gatling.http.response.Response
object HttpHeaderRegexExtractorFactory extends CriterionExtractorFactory[Response, (String, String)]("headerRegex") {
private def extractHeadersValues[X: GroupExtractor](response: Response, headerName: String, pattern: String, patterns: Patterns) =
response.headers(headerName).flatMap(patterns.extractAll(_, pattern))
def newHeaderRegexSingleExtractor[X: GroupExtractor](headerName: String, pattern: String, occurrence: Int, patterns: Patterns) =
newSingleExtractor(
(headerName, pattern),
occurrence,
extractHeadersValues(_, headerName, pattern, patterns).lift(occurrence).success
)
def newHeaderRegexMultipleExtractor[X: GroupExtractor](headerName: String, pattern: String, patterns: Patterns) =
newMultipleExtractor(
(headerName, pattern),
extractHeadersValues(_, headerName, pattern, patterns).liftSeqOption.success
)
def newHeaderRegexCountExtractor(headerName: String, pattern: String, patterns: Patterns) =
newCountExtractor(
(headerName, pattern),
extractHeadersValues[String](_, headerName, pattern, patterns).liftSeqOption.map(_.size).success
)
}
| pwielgolaski/gatling | gatling-http/src/main/scala/io/gatling/http/check/header/HttpHeaderRegexExtractorFactory.scala | Scala | apache-2.0 | 1,992 |
package com.wallace.demo.rest.sever.demo.rest
import akka.actor.{Actor, ActorRefFactory}
import com.wallace.demo.rest.sever.demo.common.LogSupport
import org.json4s.{DefaultFormats, Formats}
import spray.http.HttpResponse
import spray.http.MediaTypes._
import spray.httpx.Json4sSupport
import spray.routing.{HttpService, Route}
/**
* Created by wallace on 2018/8/27.
*/
class RedisClientActor extends Actor with RedisClientService {
def actorRefFactory: ActorRefFactory = context
def receive: Receive = runRoute(redisClientRoute)
}
trait RedisClientService extends HttpService with Json4sSupport with LogSupport {
implicit def json4sFormats: Formats = DefaultFormats
val redisClientRoute: Route =
path("query") {
get {
parameter('key) {
(key: String) =>
detach() {
respondWithMediaType(`application/json`) {
complete {
try {
???
} catch {
case e: Exception =>
val msg = "Respond Failed!"
HttpResponse(500, msg)
}
}
}
}
}
}
}
}
| LeagueForHacker/Rest-Sever-Demo | src/main/scala/com/wallace/demo/rest/sever/demo/rest/RedisClientActor.scala | Scala | mit | 1,214 |
import swing._
import scala.swing.event._
import java.util.UUID
object Main extends SimpleSwingApplication {
def top = new MainFrame {
title = "Bookmark Manager"
contents = new GridPanel(2, 2) {
hGap = 3
vGap = 3
contents += new Button {
text = "Press Me!"
reactions += {
case ButtonClicked(_) => sayHi
}
}
}
}
def sayHi = {
println("Hello there")
}
Sunshine.init()
}
| jeroanan/SunshineRecorder | src/Main.scala | Scala | gpl-3.0 | 453 |
package o1.adventurejukebox
import org.scalatra.test.specs2._
// For more on Specs2, see http://etorreborre.github.com/specs2/guide/org.specs2.guide.QuickStart.html
class AdventureServletSpec extends ScalatraSpec { def is =
"GET / on AdventureServlet" ^
"should return status 200" ! root200^
end
addServlet(classOf[AdventureServlet], "/*")
def root200 = get("/") {
status must_== 200
}
}
| tuner/adventure-jukebox | src/test/scala/o1/adventurejukebox/AdventureServletSpec.scala | Scala | mit | 493 |
package latis.writer
import java.io.FileOutputStream
import org.junit._
import Assert._
import scala.io.Source
import latis.reader.tsml.TsmlReader
import java.io.DataOutputStream
import java.io.File
import latis.dm._
import latis.metadata.Metadata
class TestMetadataWriter extends WriterTest{
@Test @Ignore
def test_dap2 {
test_writer(getDataset("dap2"),"meta")
}
@Test @Ignore
def test_fof {
test_writer(getDataset(fof),"meta")
}
@Test @Ignore
def test_scalar {
test_writer(getDataset("scalar"),"meta")
}
@Test @Ignore
def test_tsi {
test_writer(getDataset("tsi"),"meta")
}
@Test @Ignore
def test_tof {
test_writer(getDataset(tof),"meta")
}
//@Test
def print_meta {
print("scalar", "meta")
}
//@Test
def write_meta_file {
write_to_file(fof, "meta")
}
//@Test
def test {
val ds = getDataset(tof) //TestDataset.canonical
Writer.fromSuffix("meta").write(ds)
}
} | dlindhol/LaTiS | src/test/scala/latis/writer/TestMetadataWriter.scala | Scala | epl-1.0 | 958 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.integration.torch
import com.intel.analytics.bigdl.nn.{RReLU, ReLU}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers, fixture}
import scala.math._
@com.intel.analytics.bigdl.tags.Serial
class RReLUSpec extends TorchSpec {
"A RReLU Module " should "generate correct output and grad not inplace when train = true" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new RReLU[Double]()
val input = Tensor[Double](2, 2, 2)
input(Array(1, 1, 1)) = -0.97008799016476
input(Array(1, 1, 2)) = -0.89318234380335
input(Array(1, 2, 1)) = -0.65073125436902
input(Array(1, 2, 2)) = -0.35406025126576
input(Array(2, 1, 1)) = -1.0360766677186
input(Array(2, 1, 2)) = 1.173689913936
input(Array(2, 2, 1)) = 1.6776262558997
input(Array(2, 2, 2)) = -0.64814318157732
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput(Array(1, 1, 1)) = 0.43442418193445
gradOutput(Array(1, 1, 2)) = 0.97614445211366
gradOutput(Array(1, 2, 1)) = 0.081252868985757
gradOutput(Array(1, 2, 2)) = 0.24688877537847
gradOutput(Array(2, 1, 1)) = 0.027903598966077
gradOutput(Array(2, 1, 2)) = 0.0086153273005038
gradOutput(Array(2, 2, 1)) = 0.053113180678338
gradOutput(Array(2, 2, 2)) = 0.74842141871341
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\n" +
"module = nn.RReLU()\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
luaOutput1 should be (output)
luaOutput2 should be (gradInput)
println("Test case : RReLU, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
"A RReLU Module " should "generate correct output and grad inplace when train = true" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new RReLU[Double](inplace = false)
val input = Tensor[Double](2, 2, 2)
input(Array(1, 1, 1)) = -0.97008799016476
input(Array(1, 1, 2)) = -0.89318234380335
input(Array(1, 2, 1)) = -0.65073125436902
input(Array(1, 2, 2)) = -0.35406025126576
input(Array(2, 1, 1)) = -1.0360766677186
input(Array(2, 1, 2)) = 1.173689913936
input(Array(2, 2, 1)) = 1.6776262558997
input(Array(2, 2, 2)) = -0.64814318157732
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput(Array(1, 1, 1)) = 0.43442418193445
gradOutput(Array(1, 1, 2)) = 0.97614445211366
gradOutput(Array(1, 2, 1)) = 0.081252868985757
gradOutput(Array(1, 2, 2)) = 0.24688877537847
gradOutput(Array(2, 1, 1)) = 0.027903598966077
gradOutput(Array(2, 1, 2)) = 0.0086153273005038
gradOutput(Array(2, 2, 1)) = 0.053113180678338
gradOutput(Array(2, 2, 2)) = 0.74842141871341
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input.clone(), gradOutput.clone())
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\n" +
"module = nn.RReLU(1/8,1/3,true)\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
luaOutput1 should be (output)
luaOutput2 should be (gradInput)
println("Test case : RReLU, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
"A RReLU Module " should "generate correct output and grad not inplace when train = false" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new RReLU[Double]()
module.evaluate()
val input = Tensor[Double](2, 2, 2)
input(Array(1, 1, 1)) = -0.97008799016476
input(Array(1, 1, 2)) = -0.89318234380335
input(Array(1, 2, 1)) = -0.65073125436902
input(Array(1, 2, 2)) = -0.35406025126576
input(Array(2, 1, 1)) = -1.0360766677186
input(Array(2, 1, 2)) = 1.173689913936
input(Array(2, 2, 1)) = 1.6776262558997
input(Array(2, 2, 2)) = -0.64814318157732
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput(Array(1, 1, 1)) = 0.43442418193445
gradOutput(Array(1, 1, 2)) = 0.97614445211366
gradOutput(Array(1, 2, 1)) = 0.081252868985757
gradOutput(Array(1, 2, 2)) = 0.24688877537847
gradOutput(Array(2, 1, 1)) = 0.027903598966077
gradOutput(Array(2, 1, 2)) = 0.0086153273005038
gradOutput(Array(2, 2, 1)) = 0.053113180678338
gradOutput(Array(2, 2, 2)) = 0.74842141871341
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\n" +
"module = nn.RReLU()\n" +
"module.train = false\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
luaOutput1 should be (output)
luaOutput2 should be (gradInput)
println("Test case : RReLU, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
"A RReLU Module " should "generate correct output and grad inplace when train = false" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new RReLU[Double](inplace = false)
module.evaluate()
val input = Tensor[Double](2, 2, 2)
input(Array(1, 1, 1)) = -0.97008799016476
input(Array(1, 1, 2)) = -0.89318234380335
input(Array(1, 2, 1)) = -0.65073125436902
input(Array(1, 2, 2)) = -0.35406025126576
input(Array(2, 1, 1)) = -1.0360766677186
input(Array(2, 1, 2)) = 1.173689913936
input(Array(2, 2, 1)) = 1.6776262558997
input(Array(2, 2, 2)) = -0.64814318157732
val gradOutput = Tensor[Double](2, 2, 2)
gradOutput(Array(1, 1, 1)) = 0.43442418193445
gradOutput(Array(1, 1, 2)) = 0.97614445211366
gradOutput(Array(1, 2, 1)) = 0.081252868985757
gradOutput(Array(1, 2, 2)) = 0.24688877537847
gradOutput(Array(2, 1, 1)) = 0.027903598966077
gradOutput(Array(2, 1, 2)) = 0.0086153273005038
gradOutput(Array(2, 2, 1)) = 0.053113180678338
gradOutput(Array(2, 2, 2)) = 0.74842141871341
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input.clone(), gradOutput.clone())
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\n" +
"module = nn.RReLU(1/8,1/3,true)\n" +
"module.train = false\n" +
"output = module:forward(input)\n" +
"gradInput = module:backward(input,gradOutput)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
luaOutput1 should be (output)
luaOutput2 should be (gradInput)
println("Test case : RReLU, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s")
}
}
| zhangxiaoli73/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/integration/torch/RReLUSpec.scala | Scala | apache-2.0 | 8,663 |
package com.twitter.scalding
import com.twitter.scalding.serialization._
import org.specs._
import java.io.{ByteArrayOutputStream=>BOS}
import java.io.{ByteArrayInputStream=>BIS}
import scala.collection.immutable.ListMap
import scala.collection.immutable.HashMap
import com.twitter.algebird.{AveragedValue, DecayedValue,
HyperLogLog, HyperLogLogMonoid, Moments, Monoid}
/*
* This is just a test case for Kryo to deal with. It should
* be outside KryoTest, otherwise the enclosing class, KryoTest
* will also need to be serialized
*/
case class TestCaseClassForSerialization(x : String, y : Int)
case class TestValMap(val map : Map[String,Double])
case class TestValHashMap(val map : HashMap[String,Double])
class KryoTest extends Specification {
noDetailedDiffs() //Fixes issue for scala 2.9
def serObj[T <: AnyRef](in : T) = {
val khs = new KryoHadoop
khs.accept(in.getClass)
val ks = khs.getSerializer(in.getClass.asInstanceOf[Class[AnyRef]])
val out = new BOS
ks.open(out)
ks.serialize(in)
ks.close
out.toByteArray
}
def deserObj[T <: AnyRef](cls : Class[_], input : Array[Byte]) : T = {
val khs = new KryoHadoop
khs.accept(cls)
val ks = khs.getDeserializer(cls.asInstanceOf[Class[AnyRef]])
val in = new BIS(input)
ks.open(in)
val fakeInputHadoopNeeds = null
val res = ks.deserialize(fakeInputHadoopNeeds.asInstanceOf[T])
ks.close
res.asInstanceOf[T]
}
def singleRT[T <: AnyRef](in : T) : T = {
deserObj[T](in.getClass, serObj(in))
}
//These are analogous to how Hadoop will serialize
def serialize(ins : List[AnyRef]) = {
ins.map { v => (v.getClass, serObj(v)) }
}
def deserialize(input : List[(Class[_], Array[Byte])]) = {
input.map { tup => deserObj[AnyRef](tup._1, tup._2) }
}
def serializationRT(ins : List[AnyRef]) = deserialize(serialize(ins))
"KryoSerializers and KryoDeserializers" should {
"round trip any non-array object" in {
import HyperLogLog._
implicit val hllmon = new HyperLogLogMonoid(4)
val test = List(1,2,"hey",(1,2),Args("--this is --a --b --test 34"),
("hey","you"),
("slightly", 1L, "longer", 42, "tuple"),
Map(1->2,4->5),
0 to 100,
(0 to 42).toList, Seq(1,100,1000),
Map("good" -> 0.5, "bad" -> -1.0),
Set(1,2,3,4,10),
ListMap("good" -> 0.5, "bad" -> -1.0),
HashMap("good" -> 0.5, "bad" -> -1.0),
TestCaseClassForSerialization("case classes are: ", 10),
TestValMap(Map("you" -> 1.0, "every" -> 2.0, "body" -> 3.0, "a" -> 1.0,
"b" -> 2.0, "c" -> 3.0, "d" -> 4.0)),
TestValHashMap(HashMap("you" -> 1.0)),
Vector(1,2,3,4,5),
TestValMap(null),
Some("junk"),
DecayedValue(1.0, 2.0),
Moments(100.0), Monoid.plus(Moments(100), Moments(2)),
AveragedValue(100, 32.0),
// Serialize an instance of the HLL monoid
hllmon.apply(42),
Monoid.sum(List(1,2,3,4).map { hllmon(_) }),
'hai)
.asInstanceOf[List[AnyRef]]
serializationRT(test) must be_==(test)
// HyperLogLogMonoid doesn't have a good equals. :(
singleRT(new HyperLogLogMonoid(5)).bits must be_==(5)
}
"handle arrays" in {
def arrayRT[T](arr : Array[T]) {
serializationRT(List(arr))(0)
.asInstanceOf[Array[T]].toList must be_==(arr.toList)
}
arrayRT(Array(0))
arrayRT(Array(0.1))
arrayRT(Array("hey"))
arrayRT(Array((0,1)))
arrayRT(Array(None, Nil, None, Nil))
}
"handle scala singletons" in {
val test = List(Nil, None)
//Serialize each:
serializationRT(test) must be_==(test)
//Together in a list:
singleRT(test) must be_==(test)
}
"handle Date, RichDate and DateRange" in {
import DateOps._
implicit val tz = PACIFIC
val myDate : RichDate = "1999-12-30T14"
val simpleDate : java.util.Date = myDate.value
val myDateRange = DateRange("2012-01-02", "2012-06-09")
singleRT(myDate) must be_==(myDate)
singleRT(simpleDate) must be_==(simpleDate)
singleRT(myDateRange) must be_==(myDateRange)
}
"Serialize a giant list" in {
val bigList = (1 to 100000).toList
val list2 = deserObj[List[Int]](bigList.getClass, serObj(bigList))
//Specs, it turns out, also doesn't deal with giant lists well:
list2.zip(bigList).foreach { tup =>
tup._1 must be_==(tup._2)
}
}
}
}
| stripe/scalding | scalding-core/src/test/scala/com/twitter/scalding/KryoTest.scala | Scala | apache-2.0 | 4,824 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.File
import java.math.BigInteger
import java.sql.{Date, Timestamp}
import java.time.{ZoneId, ZoneOffset}
import java.util.{Calendar, Locale, TimeZone}
import scala.collection.mutable.ArrayBuffer
import com.google.common.io.Files
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.ParquetOutputFormat
import org.apache.spark.SparkConf
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils
import org.apache.spark.sql.catalyst.expressions.Literal
import org.apache.spark.sql.catalyst.util.{DateFormatter, DateTimeUtils, TimestampFormatter}
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.{PartitionPath => Partition}
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, FileTable}
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
// The data where the partitioning key exists only in the directory structure.
case class ParquetData(intField: Int, stringField: String)
// The data that also includes the partitioning key
case class ParquetDataWithKey(intField: Int, pi: Int, stringField: String, ps: String)
abstract class ParquetPartitionDiscoverySuite
extends QueryTest with ParquetTest with SharedSparkSession {
import PartitioningUtils._
import testImplicits._
val defaultPartitionName = ExternalCatalogUtils.DEFAULT_PARTITION_NAME
val timeZoneId = ZoneId.systemDefault()
val df = DateFormatter(timeZoneId)
val tf = TimestampFormatter(timestampPartitionPattern, timeZoneId)
protected override def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.DEFAULT_DATA_SOURCE_NAME.key, "parquet")
}
protected override def afterAll(): Unit = {
spark.conf.unset(SQLConf.DEFAULT_DATA_SOURCE_NAME.key)
super.afterAll()
}
test("column type inference") {
def check(raw: String, literal: Literal, zoneId: ZoneId = timeZoneId): Unit = {
assert(inferPartitionColumnValue(raw, true, zoneId, df, tf) === literal)
}
check("10", Literal.create(10, IntegerType))
check("1000000000000000", Literal.create(1000000000000000L, LongType))
val decimal = Decimal("1" * 20)
check("1" * 20,
Literal.create(decimal, DecimalType(decimal.precision, decimal.scale)))
check("1.5", Literal.create(1.5, DoubleType))
check("hello", Literal.create("hello", StringType))
check("1990-02-24", Literal.create(Date.valueOf("1990-02-24"), DateType))
check("1990-02-24 12:00:30",
Literal.create(Timestamp.valueOf("1990-02-24 12:00:30"), TimestampType))
val c = Calendar.getInstance(TimeZone.getTimeZone("GMT"))
c.set(1990, 1, 24, 12, 0, 30)
c.set(Calendar.MILLISECOND, 0)
check("1990-02-24 12:00:30",
Literal.create(new Timestamp(c.getTimeInMillis), TimestampType),
ZoneOffset.UTC)
check(defaultPartitionName, Literal.create(null, NullType))
}
test("parse invalid partitioned directories") {
// Invalid
var paths = Seq(
"hdfs://host:9000/invalidPath",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello")
var exception = intercept[AssertionError] {
parsePartitions(paths.map(new Path(_)), true, Set.empty[Path], None, true, true, timeZoneId)
}
assert(exception.getMessage().contains("Conflicting directory structures detected"))
// Valid
paths = Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/_temporary/path")
parsePartitions(
paths.map(new Path(_)),
true,
Set(new Path("hdfs://host:9000/path/")),
None,
true,
true,
timeZoneId)
// Valid
paths = Seq(
"hdfs://host:9000/path/something=true/table/",
"hdfs://host:9000/path/something=true/table/_temporary",
"hdfs://host:9000/path/something=true/table/a=10/b=20",
"hdfs://host:9000/path/something=true/table/_temporary/path")
parsePartitions(
paths.map(new Path(_)),
true,
Set(new Path("hdfs://host:9000/path/something=true/table")),
None,
true,
true,
timeZoneId)
// Valid
paths = Seq(
"hdfs://host:9000/path/table=true/",
"hdfs://host:9000/path/table=true/_temporary",
"hdfs://host:9000/path/table=true/a=10/b=20",
"hdfs://host:9000/path/table=true/_temporary/path")
parsePartitions(
paths.map(new Path(_)),
true,
Set(new Path("hdfs://host:9000/path/table=true")),
None,
true,
true,
timeZoneId)
// Invalid
paths = Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/path1")
exception = intercept[AssertionError] {
parsePartitions(
paths.map(new Path(_)),
true,
Set(new Path("hdfs://host:9000/path/")),
None,
true,
true,
timeZoneId)
}
assert(exception.getMessage().contains("Conflicting directory structures detected"))
// Invalid
// Conflicting directory structure:
// "hdfs://host:9000/tmp/tables/partitionedTable"
// "hdfs://host:9000/tmp/tables/nonPartitionedTable1"
// "hdfs://host:9000/tmp/tables/nonPartitionedTable2"
paths = Seq(
"hdfs://host:9000/tmp/tables/partitionedTable",
"hdfs://host:9000/tmp/tables/partitionedTable/p=1/",
"hdfs://host:9000/tmp/tables/nonPartitionedTable1",
"hdfs://host:9000/tmp/tables/nonPartitionedTable2")
exception = intercept[AssertionError] {
parsePartitions(
paths.map(new Path(_)),
true,
Set(new Path("hdfs://host:9000/tmp/tables/")),
None,
true,
true,
timeZoneId)
}
assert(exception.getMessage().contains("Conflicting directory structures detected"))
}
test("parse partition") {
def check(path: String, expected: Option[PartitionValues]): Unit = {
val actual = parsePartition(new Path(path), true, Set.empty[Path],
Map.empty, true, timeZoneId, df, tf)._1
assert(expected === actual)
}
def checkThrows[T <: Throwable: Manifest](path: String, expected: String): Unit = {
val message = intercept[T] {
parsePartition(new Path(path), true, Set.empty[Path], Map.empty, true, timeZoneId, df, tf)
}.getMessage
assert(message.contains(expected))
}
check("file://path/a=10", Some {
PartitionValues(
Seq("a"),
Seq(Literal.create(10, IntegerType)))
})
check("file://path/a=10/b=hello/c=1.5", Some {
PartitionValues(
Seq("a", "b", "c"),
Seq(
Literal.create(10, IntegerType),
Literal.create("hello", StringType),
Literal.create(1.5, DoubleType)))
})
check("file://path/a=10/b_hello/c=1.5", Some {
PartitionValues(
Seq("c"),
Seq(Literal.create(1.5, DoubleType)))
})
check("file:///", None)
check("file:///path/_temporary", None)
check("file:///path/_temporary/c=1.5", None)
check("file:///path/_temporary/path", None)
check("file://path/a=10/_temporary/c=1.5", None)
check("file://path/a=10/c=1.5/_temporary", None)
checkThrows[AssertionError]("file://path/=10", "Empty partition column name")
checkThrows[AssertionError]("file://path/a=", "Empty partition column value")
}
test("parse partition with base paths") {
// when the basePaths is the same as the path to a leaf directory
val partitionSpec1: Option[PartitionValues] = parsePartition(
path = new Path("file://path/a=10"),
typeInference = true,
basePaths = Set(new Path("file://path/a=10")),
Map.empty,
true,
zoneId = timeZoneId,
df,
tf)._1
assert(partitionSpec1.isEmpty)
// when the basePaths is the path to a base directory of leaf directories
val partitionSpec2: Option[PartitionValues] = parsePartition(
path = new Path("file://path/a=10"),
typeInference = true,
basePaths = Set(new Path("file://path")),
Map.empty,
true,
zoneId = timeZoneId,
df,
tf)._1
assert(partitionSpec2 ==
Option(PartitionValues(
Seq("a"),
Seq(Literal.create(10, IntegerType)))))
}
test("parse partitions") {
def check(
paths: Seq[String],
spec: PartitionSpec,
rootPaths: Set[Path] = Set.empty[Path]): Unit = {
val actualSpec =
parsePartitions(
paths.map(new Path(_)),
true,
rootPaths,
None,
true,
true,
timeZoneId)
assert(actualSpec.partitionColumns === spec.partitionColumns)
assert(actualSpec.partitions.length === spec.partitions.length)
actualSpec.partitions.zip(spec.partitions).foreach { case (actual, expected) =>
assert(actual === expected)
}
assert(actualSpec === spec)
}
check(Seq(
"hdfs://host:9000/path/a=10/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", IntegerType),
StructField("b", StringType))),
Seq(Partition(InternalRow(10, UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10/b=hello"))))
check(Seq(
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", DoubleType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(10.5, UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello",
"hdfs://host:9000/path/a=10.5/_temporary",
"hdfs://host:9000/path/a=10.5/_TeMpOrArY",
"hdfs://host:9000/path/a=10.5/b=hello/_temporary",
"hdfs://host:9000/path/a=10.5/b=hello/_TEMPORARY",
"hdfs://host:9000/path/_temporary/path",
"hdfs://host:9000/path/a=11/_temporary/path",
"hdfs://host:9000/path/a=10.5/b=world/_temporary/path"),
PartitionSpec(
StructType(Seq(
StructField("a", DoubleType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(10.5, UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=20",
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", IntegerType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(10, UTF8String.fromString("20")),
s"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(null, UTF8String.fromString("hello")),
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=$defaultPartitionName",
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"),
PartitionSpec(
StructType(Seq(
StructField("a", DoubleType),
StructField("b", NullType))),
Seq(
Partition(InternalRow(10, null), s"hdfs://host:9000/path/a=10/b=$defaultPartitionName"),
Partition(InternalRow(10.5, null),
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"))))
check(Seq(
s"hdfs://host:9000/path1",
s"hdfs://host:9000/path2"),
PartitionSpec.emptySpec)
// The cases below check the resolution for type conflicts.
val t1 = Timestamp.valueOf("2014-01-01 00:00:00.0").getTime * 1000
val t2 = Timestamp.valueOf("2014-01-01 00:01:00.0").getTime * 1000
// Values in column 'a' are inferred as null, date and timestamp each, and timestamp is set
// as a common type.
// Values in column 'b' are inferred as integer, decimal(22, 0) and null, and decimal(22, 0)
// is set as a common type.
check(Seq(
s"hdfs://host:9000/path/a=$defaultPartitionName/b=0",
s"hdfs://host:9000/path/a=2014-01-01/b=${Long.MaxValue}111",
s"hdfs://host:9000/path/a=2014-01-01 00%3A01%3A00.0/b=$defaultPartitionName"),
PartitionSpec(
StructType(Seq(
StructField("a", TimestampType),
StructField("b", DecimalType(22, 0)))),
Seq(
Partition(
InternalRow(null, Decimal(0)),
s"hdfs://host:9000/path/a=$defaultPartitionName/b=0"),
Partition(
InternalRow(t1, Decimal(s"${Long.MaxValue}111")),
s"hdfs://host:9000/path/a=2014-01-01/b=${Long.MaxValue}111"),
Partition(
InternalRow(t2, null),
s"hdfs://host:9000/path/a=2014-01-01 00%3A01%3A00.0/b=$defaultPartitionName"))))
}
test("parse partitions with type inference disabled") {
def check(paths: Seq[String], spec: PartitionSpec): Unit = {
val actualSpec =
parsePartitions(paths.map(new Path(_)), false, Set.empty[Path], None,
true, true, timeZoneId)
assert(actualSpec === spec)
}
check(Seq(
"hdfs://host:9000/path/a=10/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10/b=hello"))))
check(Seq(
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(UTF8String.fromString("10.5"), UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
"hdfs://host:9000/path/_temporary",
"hdfs://host:9000/path/a=10/b=20",
"hdfs://host:9000/path/a=10.5/b=hello",
"hdfs://host:9000/path/a=10.5/_temporary",
"hdfs://host:9000/path/a=10.5/_TeMpOrArY",
"hdfs://host:9000/path/a=10.5/b=hello/_temporary",
"hdfs://host:9000/path/a=10.5/b=hello/_TEMPORARY",
"hdfs://host:9000/path/_temporary/path",
"hdfs://host:9000/path/a=11/_temporary/path",
"hdfs://host:9000/path/a=10.5/b=world/_temporary/path"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("20")),
"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(UTF8String.fromString("10.5"), UTF8String.fromString("hello")),
"hdfs://host:9000/path/a=10.5/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=20",
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", StringType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), UTF8String.fromString("20")),
s"hdfs://host:9000/path/a=10/b=20"),
Partition(InternalRow(null, UTF8String.fromString("hello")),
s"hdfs://host:9000/path/a=$defaultPartitionName/b=hello"))))
check(Seq(
s"hdfs://host:9000/path/a=10/b=$defaultPartitionName",
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"),
PartitionSpec(
StructType(Seq(
StructField("a", StringType),
StructField("b", NullType))),
Seq(
Partition(InternalRow(UTF8String.fromString("10"), null),
s"hdfs://host:9000/path/a=10/b=$defaultPartitionName"),
Partition(InternalRow(UTF8String.fromString("10.5"), null),
s"hdfs://host:9000/path/a=10.5/b=$defaultPartitionName"))))
check(Seq(
s"hdfs://host:9000/path1",
s"hdfs://host:9000/path2"),
PartitionSpec.emptySpec)
}
test("read partitioned table - normal case") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} {
val dir = makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps)
makeParquetFile(
(1 to 10).map(i => ParquetData(i, i.toString)),
dir)
// Introduce _temporary dir to test the robustness of the schema discovery process.
new File(dir.toString, "_temporary").mkdir()
}
// Introduce _temporary dir to the base dir the robustness of the schema discovery process.
new File(base.getCanonicalPath, "_temporary").mkdir()
spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} yield Row(i, i.toString, pi, ps))
checkAnswer(
sql("SELECT intField, pi FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
_ <- Seq("foo", "bar")
} yield Row(i, pi))
checkAnswer(
sql("SELECT * FROM t WHERE pi = 1"),
for {
i <- 1 to 10
ps <- Seq("foo", "bar")
} yield Row(i, i.toString, 1, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps = 'foo'"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, i.toString, pi, "foo"))
}
}
}
test("read partitioned table using different path options") {
withTempDir { base =>
val pi = 1
val ps = "foo"
val path = makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps)
makeParquetFile(
(1 to 10).map(i => ParquetData(i, i.toString)), path)
// when the input is the base path containing partitioning directories
val baseDf = spark.read.parquet(base.getCanonicalPath)
assert(baseDf.schema.map(_.name) === Seq("intField", "stringField", "pi", "ps"))
// when the input is a path to the leaf directory containing a parquet file
val partDf = spark.read.parquet(path.getCanonicalPath)
assert(partDf.schema.map(_.name) === Seq("intField", "stringField"))
path.listFiles().foreach { f =>
if (!f.getName.startsWith("_") &&
f.getName.toLowerCase(Locale.ROOT).endsWith(".parquet")) {
// when the input is a path to a parquet file
val df = spark.read.parquet(f.getCanonicalPath)
assert(df.schema.map(_.name) === Seq("intField", "stringField"))
}
}
path.listFiles().foreach { f =>
if (!f.getName.startsWith("_") &&
f.getName.toLowerCase(Locale.ROOT).endsWith(".parquet")) {
// when the input is a path to a parquet file but `basePath` is overridden to
// the base path containing partitioning directories
val df = spark
.read.option("basePath", base.getCanonicalPath)
.parquet(f.getCanonicalPath)
assert(df.schema.map(_.name) === Seq("intField", "stringField", "pi", "ps"))
}
}
}
}
test("read partitioned table - with nulls") {
withTempDir { base =>
for {
// Must be `Integer` rather than `Int` here. `null.asInstanceOf[Int]` results in a zero...
pi <- Seq(1, null.asInstanceOf[Integer])
ps <- Seq("foo", null.asInstanceOf[String])
} {
makeParquetFile(
(1 to 10).map(i => ParquetData(i, i.toString)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
parquetRelation.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, null.asInstanceOf[Integer])
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, i.toString, pi, ps))
checkAnswer(
sql("SELECT * FROM t WHERE pi IS NULL"),
for {
i <- 1 to 10
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, i.toString, null, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps IS NULL"),
for {
i <- 1 to 10
pi <- Seq(1, null.asInstanceOf[Integer])
} yield Row(i, i.toString, pi, null))
}
}
}
test("read partitioned table - merging compatible schemas") {
withTempDir { base =>
makeParquetFile(
(1 to 10).map(i => Tuple1(i)).toDF("intField"),
makePartitionDir(base, defaultPartitionName, "pi" -> 1))
makeParquetFile(
(1 to 10).map(i => (i, i.toString)).toDF("intField", "stringField"),
makePartitionDir(base, defaultPartitionName, "pi" -> 2))
spark
.read
.option("mergeSchema", "true")
.format("parquet")
.load(base.getCanonicalPath)
.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
(1 to 10).map(i => Row(i, null, 1)) ++ (1 to 10).map(i => Row(i, i.toString, 2)))
}
}
}
test("SPARK-7847: Dynamic partition directory path escaping and unescaping") {
withTempPath { dir =>
val df = Seq("/", "[]", "?").zipWithIndex.map(_.swap).toDF("i", "s")
df.write.format("parquet").partitionBy("s").save(dir.getCanonicalPath)
checkAnswer(spark.read.parquet(dir.getCanonicalPath), df.collect())
}
}
test("Various partition value types") {
val row =
Row(
100.toByte,
40000.toShort,
Int.MaxValue,
Long.MaxValue,
1.5.toFloat,
4.5,
new java.math.BigDecimal(new BigInteger("212500"), 5),
new java.math.BigDecimal("2.125"),
java.sql.Date.valueOf("2015-05-23"),
new Timestamp(0),
"This is a string, /[]?=:",
"This is not a partition column")
// BooleanType is not supported yet
val partitionColumnTypes =
Seq(
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
DecimalType(10, 5),
DecimalType.SYSTEM_DEFAULT,
DateType,
TimestampType,
StringType)
val partitionColumns = partitionColumnTypes.zipWithIndex.map {
case (t, index) => StructField(s"p_$index", t)
}
val schema = StructType(partitionColumns :+ StructField(s"i", StringType))
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
withTempPath { dir =>
df.write.format("parquet").partitionBy(partitionColumns.map(_.name): _*).save(dir.toString)
val fields = schema.map(f => Column(f.name).cast(f.dataType))
checkAnswer(spark.read.load(dir.toString).select(fields: _*), row)
}
withTempPath { dir =>
df.write.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.format("parquet").partitionBy(partitionColumns.map(_.name): _*).save(dir.toString)
val fields = schema.map(f => Column(f.name).cast(f.dataType))
checkAnswer(spark.read.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.load(dir.toString).select(fields: _*), row)
}
}
test("Various inferred partition value types") {
val row =
Row(
Long.MaxValue,
4.5,
new java.math.BigDecimal(new BigInteger("1" * 20)),
java.sql.Date.valueOf("2015-05-23"),
java.sql.Timestamp.valueOf("1990-02-24 12:00:30"),
"This is a string, /[]?=:",
"This is not a partition column")
val partitionColumnTypes =
Seq(
LongType,
DoubleType,
DecimalType(20, 0),
DateType,
TimestampType,
StringType)
val partitionColumns = partitionColumnTypes.zipWithIndex.map {
case (t, index) => StructField(s"p_$index", t)
}
val schema = StructType(partitionColumns :+ StructField(s"i", StringType))
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
withTempPath { dir =>
df.write.format("parquet").partitionBy(partitionColumns.map(_.name): _*).save(dir.toString)
val fields = schema.map(f => Column(f.name))
checkAnswer(spark.read.load(dir.toString).select(fields: _*), row)
}
withTempPath { dir =>
df.write.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.format("parquet").partitionBy(partitionColumns.map(_.name): _*).save(dir.toString)
val fields = schema.map(f => Column(f.name))
checkAnswer(spark.read.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.load(dir.toString).select(fields: _*), row)
}
}
test("SPARK-8037: Ignores files whose name starts with dot") {
withTempPath { dir =>
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(dir.getCanonicalPath)
Files.touch(new File(s"${dir.getCanonicalPath}/b=1", ".DS_Store"))
Files.createParentDirs(new File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
checkAnswer(spark.read.format("parquet").load(dir.getCanonicalPath), df)
}
}
test("SPARK-11678: Partition discovery stops at the root path of the dataset") {
withTempPath { dir =>
val tablePath = new File(dir, "key=value")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
Files.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
Files.createParentDirs(new File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
}
withTempPath { dir =>
val path = new File(dir, "key=value")
val tablePath = new File(path, "table")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
Files.touch(new File(s"${tablePath.getCanonicalPath}/", "_SUCCESS"))
Files.createParentDirs(new File(s"${dir.getCanonicalPath}/b=1/c=1/.foo/bar"))
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
}
}
test("use basePath to specify the root dir of a partitioned table.") {
withTempPath { dir =>
val tablePath = new File(dir, "table")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
val twoPartitionsDF =
spark
.read
.option("basePath", tablePath.getCanonicalPath)
.parquet(
s"${tablePath.getCanonicalPath}/b=1",
s"${tablePath.getCanonicalPath}/b=2")
checkAnswer(twoPartitionsDF, df.filter("b != 3"))
intercept[AssertionError] {
spark
.read
.parquet(
s"${tablePath.getCanonicalPath}/b=1",
s"${tablePath.getCanonicalPath}/b=2")
}
}
}
test("use basePath and file globbing to selectively load partitioned table") {
withTempPath { dir =>
val df = Seq(
(1, "foo", 100),
(1, "bar", 200),
(2, "foo", 300),
(2, "bar", 400)
).toDF("p1", "p2", "v")
df.write
.mode(SaveMode.Overwrite)
.partitionBy("p1", "p2")
.parquet(dir.getCanonicalPath)
def check(path: String, basePath: String, expectedDf: DataFrame): Unit = {
val testDf = spark.read
.option("basePath", basePath)
.parquet(path)
checkAnswer(testDf, expectedDf)
}
// Should find all the data with partitioning columns when base path is set to the root
val resultDf = df.select("v", "p1", "p2")
check(path = s"$dir", basePath = s"$dir", resultDf)
check(path = s"$dir/*", basePath = s"$dir", resultDf)
check(path = s"$dir/*/*", basePath = s"$dir", resultDf)
check(path = s"$dir/*/*/*", basePath = s"$dir", resultDf)
// Should find selective partitions of the data if the base path is not set to root
check( // read from ../p1=1 with base ../p1=1, should not infer p1 col
path = s"$dir/p1=1/*",
basePath = s"$dir/p1=1/",
resultDf.filter("p1 = 1").drop("p1"))
check( // red from ../p1=1/p2=foo with base ../p1=1/ should not infer p1
path = s"$dir/p1=1/p2=foo/*",
basePath = s"$dir/p1=1/",
resultDf.filter("p1 = 1").filter("p2 = 'foo'").drop("p1"))
check( // red from ../p1=1/p2=foo with base ../p1=1/p2=foo, should not infer p1, p2
path = s"$dir/p1=1/p2=foo/*",
basePath = s"$dir/p1=1/p2=foo/",
resultDf.filter("p1 = 1").filter("p2 = 'foo'").drop("p1", "p2"))
}
}
test("_SUCCESS should not break partitioning discovery") {
Seq(1, 32).foreach { threshold =>
// We have two paths to list files, one at driver side, another one that we use
// a Spark job. We need to test both ways.
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> threshold.toString) {
withTempPath { dir =>
val tablePath = new File(dir, "table")
val df = (1 to 3).map(i => (i, i, i, i)).toDF("a", "b", "c", "d")
df.write
.format("parquet")
.partitionBy("b", "c", "d")
.save(tablePath.getCanonicalPath)
Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1", "_SUCCESS"))
Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1", "_SUCCESS"))
Files.touch(new File(s"${tablePath.getCanonicalPath}/b=1/c=1/d=1", "_SUCCESS"))
checkAnswer(spark.read.format("parquet").load(tablePath.getCanonicalPath), df)
}
}
}
}
test("listConflictingPartitionColumns") {
def makeExpectedMessage(colNameLists: Seq[String], paths: Seq[String]): String = {
val conflictingColNameLists = colNameLists.zipWithIndex.map { case (list, index) =>
s"\\tPartition column name list #$index: $list"
}.mkString("\\n", "\\n", "\\n")
// scalastyle:off
s"""Conflicting partition column names detected:
|$conflictingColNameLists
|For partitioned table directories, data files should only live in leaf directories.
|And directories at the same level should have the same partition column name.
|Please check the following directories for unexpected files or inconsistent partition column names:
|${paths.map("\\t" + _).mkString("\\n", "\\n", "")}
""".stripMargin.trim
// scalastyle:on
}
assert(
listConflictingPartitionColumns(
Seq(
(new Path("file:/tmp/foo/a=1"), PartitionValues(Seq("a"), Seq(Literal(1)))),
(new Path("file:/tmp/foo/b=1"), PartitionValues(Seq("b"), Seq(Literal(1)))))).trim ===
makeExpectedMessage(Seq("a", "b"), Seq("file:/tmp/foo/a=1", "file:/tmp/foo/b=1")))
assert(
listConflictingPartitionColumns(
Seq(
(new Path("file:/tmp/foo/a=1/_temporary"), PartitionValues(Seq("a"), Seq(Literal(1)))),
(new Path("file:/tmp/foo/a=1"), PartitionValues(Seq("a"), Seq(Literal(1)))))).trim ===
makeExpectedMessage(
Seq("a"),
Seq("file:/tmp/foo/a=1/_temporary", "file:/tmp/foo/a=1")))
assert(
listConflictingPartitionColumns(
Seq(
(new Path("file:/tmp/foo/a=1"),
PartitionValues(Seq("a"), Seq(Literal(1)))),
(new Path("file:/tmp/foo/a=1/b=foo"),
PartitionValues(Seq("a", "b"), Seq(Literal(1), Literal("foo")))))).trim ===
makeExpectedMessage(
Seq("a", "a, b"),
Seq("file:/tmp/foo/a=1", "file:/tmp/foo/a=1/b=foo")))
}
test("Parallel partition discovery") {
withTempPath { dir =>
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "1") {
val path = dir.getCanonicalPath
val df = spark.range(5).select('id as 'a, 'id as 'b, 'id as 'c).coalesce(1)
df.write.partitionBy("b", "c").parquet(path)
checkAnswer(spark.read.parquet(path), df)
}
}
}
test("SPARK-15895 summary files in non-leaf partition directories") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withSQLConf(
ParquetOutputFormat.JOB_SUMMARY_LEVEL -> "ALL",
SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
spark.range(3).write.parquet(s"$path/p0=0/p1=0")
}
val p0 = new File(path, "p0=0")
val p1 = new File(p0, "p1=0")
// Builds the following directory layout by:
//
// 1. copying Parquet summary files we just wrote into `p0=0`, and
// 2. touching a dot-file `.dummy` under `p0=0`.
//
// <base>
// +- p0=0
// |- _metadata
// |- _common_metadata
// |- .dummy
// +- p1=0
// |- _metadata
// |- _common_metadata
// |- part-00000.parquet
// |- part-00001.parquet
// +- ...
//
// The summary files and the dot-file under `p0=0` should not fail partition discovery.
Files.copy(new File(p1, "_metadata"), new File(p0, "_metadata"))
Files.copy(new File(p1, "_common_metadata"), new File(p0, "_common_metadata"))
Files.touch(new File(p0, ".dummy"))
checkAnswer(spark.read.parquet(s"$path"), Seq(
Row(0, 0, 0),
Row(1, 0, 0),
Row(2, 0, 0)
))
}
}
test("SPARK-22109: Resolve type conflicts between strings and timestamps in partition column") {
val df = Seq(
(1, "2015-01-01 00:00:00"),
(2, "2014-01-01 00:00:00"),
(3, "blah")).toDF("i", "str")
withTempPath { path =>
df.write.format("parquet").partitionBy("str").save(path.getAbsolutePath)
checkAnswer(spark.read.load(path.getAbsolutePath), df)
}
}
test("Resolve type conflicts - decimals, dates and timestamps in partition column") {
withTempPath { path =>
val df = Seq((1, "2014-01-01"), (2, "2016-01-01"), (3, "2015-01-01 00:01:00")).toDF("i", "ts")
df.write.format("parquet").partitionBy("ts").save(path.getAbsolutePath)
checkAnswer(
spark.read.load(path.getAbsolutePath),
Row(1, Timestamp.valueOf("2014-01-01 00:00:00")) ::
Row(2, Timestamp.valueOf("2016-01-01 00:00:00")) ::
Row(3, Timestamp.valueOf("2015-01-01 00:01:00")) :: Nil)
}
withTempPath { path =>
val df = Seq((1, "1"), (2, "3"), (3, "2" * 30)).toDF("i", "decimal")
df.write.format("parquet").partitionBy("decimal").save(path.getAbsolutePath)
checkAnswer(
spark.read.load(path.getAbsolutePath),
Row(1, BigDecimal("1")) ::
Row(2, BigDecimal("3")) ::
Row(3, BigDecimal("2" * 30)) :: Nil)
}
}
test("SPARK-23436: invalid Dates should be inferred as String in partition inference") {
withTempPath { path =>
val data = Seq(("1", "2018-01", "2018-01-01-04", "test"))
.toDF("id", "date_month", "date_hour", "data")
data.write.partitionBy("date_month", "date_hour").parquet(path.getAbsolutePath)
val input = spark.read.parquet(path.getAbsolutePath).select("id",
"date_month", "date_hour", "data")
assert(input.schema.sameType(input.schema))
checkAnswer(input, data)
}
}
}
class ParquetV1PartitionDiscoverySuite extends ParquetPartitionDiscoverySuite {
import testImplicits._
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "parquet")
test("read partitioned table - partition key included in Parquet file") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} {
makeParquetFile(
(1 to 10).map(i => ParquetDataWithKey(i, pi, i.toString, ps)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} yield Row(i, pi, i.toString, ps))
checkAnswer(
sql("SELECT intField, pi FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
_ <- Seq("foo", "bar")
} yield Row(i, pi))
checkAnswer(
sql("SELECT * FROM t WHERE pi = 1"),
for {
i <- 1 to 10
ps <- Seq("foo", "bar")
} yield Row(i, 1, i.toString, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps = 'foo'"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, pi, i.toString, "foo"))
}
}
}
test("read partitioned table - with nulls and partition keys are included in Parquet file") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", null.asInstanceOf[String])
} {
makeParquetFile(
(1 to 10).map(i => ParquetDataWithKey(i, pi, i.toString, ps)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
parquetRelation.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, pi, i.toString, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps IS NULL"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, pi, i.toString, null))
}
}
}
test("SPARK-7749 Non-partitioned table should have empty partition spec") {
withTempPath { dir =>
(1 to 10).map(i => (i, i.toString)).toDF("a", "b").write.parquet(dir.getCanonicalPath)
val queryExecution = spark.read.parquet(dir.getCanonicalPath).queryExecution
queryExecution.analyzed.collectFirst {
case LogicalRelation(
HadoopFsRelation(location: PartitioningAwareFileIndex, _, _, _, _, _), _, _, _) =>
assert(location.partitionSpec() === PartitionSpec.emptySpec)
}.getOrElse {
fail(s"Expecting a matching HadoopFsRelation, but got:\\n$queryExecution")
}
}
}
test("SPARK-18108 Parquet reader fails when data column types conflict with partition ones") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = Seq((1L, 2.0)).toDF("a", "b")
df.write.parquet(s"$path/a=1")
checkAnswer(spark.read.parquet(s"$path"), Seq(Row(1, 2.0)))
}
}
}
test("SPARK-21463: MetadataLogFileIndex should respect userSpecifiedSchema for partition cols") {
withTempDir { tempDir =>
val output = new File(tempDir, "output").toString
val checkpoint = new File(tempDir, "chkpoint").toString
try {
val stream = MemoryStream[(String, Int)]
val df = stream.toDS().toDF("time", "value")
val sq = df.writeStream
.option("checkpointLocation", checkpoint)
.format("parquet")
.partitionBy("time")
.start(output)
stream.addData(("2017-01-01-00", 1), ("2017-01-01-01", 2))
sq.processAllAvailable()
val schema = new StructType()
.add("time", StringType)
.add("value", IntegerType)
val readBack = spark.read.schema(schema).parquet(output)
assert(readBack.schema.toSet === schema.toSet)
checkAnswer(
readBack,
Seq(Row("2017-01-01-00", 1), Row("2017-01-01-01", 2))
)
} finally {
spark.streams.active.foreach(_.stop())
}
}
}
}
class ParquetV2PartitionDiscoverySuite extends ParquetPartitionDiscoverySuite {
import testImplicits._
// TODO: enable Parquet V2 write path after file source V2 writers are workable.
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
test("read partitioned table - partition key included in Parquet file") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} {
makeParquetFile(
(1 to 10).map(i => ParquetDataWithKey(i, pi, i.toString, ps)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
spark.read.parquet(base.getCanonicalPath).createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", "bar")
} yield Row(i, i.toString, pi, ps))
checkAnswer(
sql("SELECT intField, pi FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
_ <- Seq("foo", "bar")
} yield Row(i, pi))
checkAnswer(
sql("SELECT * FROM t WHERE pi = 1"),
for {
i <- 1 to 10
ps <- Seq("foo", "bar")
} yield Row(i, i.toString, 1, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps = 'foo'"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, i.toString, pi, "foo"))
}
}
}
test("read partitioned table - with nulls and partition keys are included in Parquet file") {
withTempDir { base =>
for {
pi <- Seq(1, 2)
ps <- Seq("foo", null.asInstanceOf[String])
} {
makeParquetFile(
(1 to 10).map(i => ParquetDataWithKey(i, pi, i.toString, ps)),
makePartitionDir(base, defaultPartitionName, "pi" -> pi, "ps" -> ps))
}
val parquetRelation = spark.read.format("parquet").load(base.getCanonicalPath)
parquetRelation.createOrReplaceTempView("t")
withTempView("t") {
checkAnswer(
sql("SELECT * FROM t"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
ps <- Seq("foo", null.asInstanceOf[String])
} yield Row(i, i.toString, pi, ps))
checkAnswer(
sql("SELECT * FROM t WHERE ps IS NULL"),
for {
i <- 1 to 10
pi <- Seq(1, 2)
} yield Row(i, i.toString, pi, null))
}
}
}
test("SPARK-7749 Non-partitioned table should have empty partition spec") {
withTempPath { dir =>
(1 to 10).map(i => (i, i.toString)).toDF("a", "b").write.parquet(dir.getCanonicalPath)
val queryExecution = spark.read.parquet(dir.getCanonicalPath).queryExecution
queryExecution.analyzed.collectFirst {
case DataSourceV2Relation(fileTable: FileTable, _, _, _, _) =>
assert(fileTable.fileIndex.partitionSpec() === PartitionSpec.emptySpec)
}.getOrElse {
fail(s"Expecting a matching DataSourceV2Relation, but got:\\n$queryExecution")
}
}
}
test("SPARK-18108 Parquet reader fails when data column types conflict with partition ones") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = Seq((1L, 2.0)).toDF("a", "b")
df.write.parquet(s"$path/a=1")
checkAnswer(spark.read.parquet(s"$path"), Seq(Row(2.0, 1)))
}
}
}
test("SPARK-21463: MetadataLogFileIndex should respect userSpecifiedSchema for partition cols") {
withTempDir { tempDir =>
val output = new File(tempDir, "output").toString
val checkpoint = new File(tempDir, "chkpoint").toString
try {
val stream = MemoryStream[(String, Int)]
val df = stream.toDS().toDF("time", "value")
val sq = df.writeStream
.option("checkpointLocation", checkpoint)
.format("parquet")
.partitionBy("time")
.start(output)
stream.addData(("2017-01-01-00", 1), ("2017-01-01-01", 2))
sq.processAllAvailable()
val schema = new StructType()
.add("time", StringType)
.add("value", IntegerType)
val readBack = spark.read.schema(schema).parquet(output)
assert(readBack.schema.toSet === schema.toSet)
checkAnswer(
readBack,
Seq(Row(1, "2017-01-01-00"), Row(2, "2017-01-01-01"))
)
} finally {
spark.streams.active.foreach(_.stop())
}
}
}
} | darionyaphet/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala | Scala | apache-2.0 | 46,514 |
package ch.wsl.box.rest.metadata
import java.util.UUID
import akka.stream.Materializer
import ch.wsl.box.model.boxentities.{BoxForm, BoxFunction, BoxUser}
import ch.wsl.box.model.shared._
import ch.wsl.box.rest.routes.{Table, View}
import ch.wsl.box.rest.runtime.Registry
import ch.wsl.box.rest.utils.UserProfile
import ch.wsl.box.services.Services
import scribe.Logging
import scala.concurrent.{ExecutionContext, Future}
case class BoxFormMetadataFactory(implicit mat:Materializer, ec:ExecutionContext, services:Services) extends Logging with MetadataFactory {
import ch.wsl.box.jdbc.PostgresProfile.api._
import ch.wsl.box.rest.metadata.box.Constants._
import ch.wsl.box.rest.metadata.box._
val viewsOnly = Registry().fields.views.sorted
val tablesAndViews = (viewsOnly ++ Registry().fields.tables).sorted
def registry = for{
forms <- getForms()
users <- getUsers()
functions <- getFunctions()
} yield Seq(
FormUIDef.main(tablesAndViews,users.sortBy(_.username)),
FormUIDef.page(users.sortBy(_.username)),
FormUIDef.field(tablesAndViews),
FormUIDef.field_no_db(tablesAndViews),
FormUIDef.field_childs(forms.sortBy(_.name)),
FormUIDef.field_static(tablesAndViews,functions.map(_.name)),
FormUIDef.fieldI18n(services.config.langs),
FormUIDef.formI18n(viewsOnly,services.config.langs),
FormUIDef.form_actions(functions.map(_.name)),
FormUIDef.form_navigation_actions(functions.map(_.name)),
FunctionUIDef.main,
FunctionUIDef.field(tablesAndViews),
FunctionUIDef.fieldI18n(services.config.langs),
FunctionUIDef.functionI18n(services.config.langs),
NewsUIDef.main,
NewsUIDef.newsI18n(services.config.langs),
LabelUIDef.label(services.config.langs),
LabelUIDef.labelContainer
)
def getForms():DBIO[Seq[BoxForm.BoxForm_row]] = {
BoxForm.BoxFormTable.result
}
def getFunctions():DBIO[Seq[BoxFunction.BoxFunction_row]] = {
BoxFunction.BoxFunctionTable.result
}
def getUsers():DBIO[Seq[BoxUser.BoxUser_row]] = {
BoxUser.BoxUserTable.result
}
def fieldTypes = Registry().fields.tableFields.mapValues(_.mapValues{col =>
col.jsonType
})
val visibleAdmin = Seq(FUNCTION,FORM,PAGE,NEWS,LABEL)
override def list: DBIO[Seq[String]] = registry.map(_.filter(f => visibleAdmin.contains(f.objId)).map(_.name))
override def of(name: String, lang: String): DBIO[JSONMetadata] = registry.map(_.find(_.name == name).get)
override def of(id: UUID, lang: String): DBIO[JSONMetadata] = registry.map(_.find(_.objId == id).get)
override def children(form: JSONMetadata): DBIO[Seq[JSONMetadata]] = for{
forms <- getForms()
functions <- getFunctions()
} yield {
form match {
case f if f.objId == FORM => Seq(
FormUIDef.field(tablesAndViews),
FormUIDef.field_no_db(tablesAndViews),
FormUIDef.field_static(tablesAndViews,functions.map(_.name)),
FormUIDef.field_childs(forms),
FormUIDef.fieldI18n(services.config.langs),
FormUIDef.formI18n(viewsOnly,services.config.langs),
FormUIDef.form_actions(functions.map(_.name)),
FormUIDef.form_navigation_actions(functions.map(_.name))
)
case f if f.objId == PAGE => Seq(FormUIDef.field_static(tablesAndViews,functions.map(_.name)),FormUIDef.field_childs(forms),FormUIDef.fieldI18n(services.config.langs),FormUIDef.formI18n(viewsOnly,services.config.langs))
case f if f.objId == FORM_FIELD => Seq(FormUIDef.fieldI18n(services.config.langs))
case f if f.objId == FORM_FIELD_NOT_DB => Seq(FormUIDef.fieldI18n(services.config.langs))
case f if f.objId == FORM_FIELD_STATIC => Seq(FormUIDef.fieldI18n(services.config.langs))
case f if f.objId == FORM_FIELD_CHILDS => Seq(FormUIDef.fieldI18n(services.config.langs))
case f if f.objId == FUNCTION => Seq(FunctionUIDef.field(tablesAndViews),FunctionUIDef.fieldI18n(services.config.langs),FunctionUIDef.functionI18n(services.config.langs))
case f if f.objId == FUNCTION_FIELD => Seq(FunctionUIDef.fieldI18n(services.config.langs))
case f if f.objId == NEWS => Seq(NewsUIDef.newsI18n(services.config.langs))
case f if f.objId == LABEL_CONTAINER => Seq(LabelUIDef.label(services.config.langs))
case _ => Seq()
}
}
}
| Insubric/box | server/src/main/scala/ch/wsl/box/rest/metadata/BoxFormMetadataFactory.scala | Scala | apache-2.0 | 4,290 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.language.implicitConversions
import org.apache.spark.annotation.InterfaceStability
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.encoders.{encoderFor, ExpressionEncoder}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.catalyst.util.usePrettyExpression
import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions.lit
import org.apache.spark.sql.types._
private[sql] object Column {
def apply(colName: String): Column = new Column(colName)
def apply(expr: Expression): Column = new Column(expr)
def unapply(col: Column): Option[Expression] = Some(col.expr)
private[sql] def generateAlias(e: Expression): String = {
e match {
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
a.aggregateFunction.toString
case expr => usePrettyExpression(expr).sql
}
}
}
/**
* A [[Column]] where an [[Encoder]] has been given for the expected input and return type.
* To create a [[TypedColumn]], use the `as` function on a [[Column]].
*
* @tparam T The input type expected for this expression. Can be `Any` if the expression is type
* checked by the analyzer instead of the compiler (i.e. `expr("sum(...)")`).
* @tparam U The output type of this column.
*
* @since 1.6.0
*/
@InterfaceStability.Stable
class TypedColumn[-T, U](
expr: Expression,
private[sql] val encoder: ExpressionEncoder[U])
extends Column(expr) {
/**
* Inserts the specific input type and schema into any expressions that are expected to operate
* on a decoded object.
*/
private[sql] def withInputType(
inputEncoder: ExpressionEncoder[_],
inputAttributes: Seq[Attribute]): TypedColumn[T, U] = {
val unresolvedDeserializer = UnresolvedDeserializer(inputEncoder.deserializer, inputAttributes)
val newExpr = expr transform {
case ta: TypedAggregateExpression if ta.inputDeserializer.isEmpty =>
ta.withInputInfo(
deser = unresolvedDeserializer,
cls = inputEncoder.clsTag.runtimeClass,
schema = inputEncoder.schema)
}
new TypedColumn[T, U](newExpr, encoder)
}
/**
* Gives the [[TypedColumn]] a name (alias).
* If the current `TypedColumn` has metadata associated with it, this metadata will be propagated
* to the new column.
*
* @group expr_ops
* @since 2.0.0
*/
override def name(alias: String): TypedColumn[T, U] =
new TypedColumn[T, U](super.name(alias).expr, encoder)
}
/**
* A column that will be computed based on the data in a `DataFrame`.
*
* A new column can be constructed based on the input columns present in a DataFrame:
*
* {{{
* df("columnName") // On a specific `df` DataFrame.
* col("columnName") // A generic column no yet associated with a DataFrame.
* col("columnName.field") // Extracting a struct field
* col("`a.column.with.dots`") // Escape `.` in column names.
* $"columnName" // Scala short hand for a named column.
* }}}
*
* [[Column]] objects can be composed to form complex expressions:
*
* {{{
* $"a" + 1
* $"a" === $"b"
* }}}
*
* @note The internal Catalyst expression can be accessed via [[expr]], but this method is for
* debugging purposes only and can change in any future Spark releases.
*
* @groupname java_expr_ops Java-specific expression operators
* @groupname expr_ops Expression operators
* @groupname df_ops DataFrame functions
* @groupname Ungrouped Support functions for DataFrames
*
* @since 1.3.0
*/
@InterfaceStability.Stable
class Column(val expr: Expression) extends Logging {
def this(name: String) = this(name match {
case "*" => UnresolvedStar(None)
case _ if name.endsWith(".*") =>
val parts = UnresolvedAttribute.parseAttributeName(name.substring(0, name.length - 2))
UnresolvedStar(Some(parts))
case _ => UnresolvedAttribute.quotedString(name)
})
override def toString: String = usePrettyExpression(expr).sql
override def equals(that: Any): Boolean = that match {
case that: Column => that.expr.equals(this.expr)
case _ => false
}
override def hashCode: Int = this.expr.hashCode()
/** Creates a column based on the given expression. */
private def withExpr(newExpr: Expression): Column = new Column(newExpr)
/**
* Returns the expression for this column either with an existing or auto assigned name.
*/
private[sql] def named: NamedExpression = expr match {
// Wrap UnresolvedAttribute with UnresolvedAlias, as when we resolve UnresolvedAttribute, we
// will remove intermediate Alias for ExtractValue chain, and we need to alias it again to
// make it a NamedExpression.
case u: UnresolvedAttribute => UnresolvedAlias(u)
case u: UnresolvedExtractValue => UnresolvedAlias(u)
case expr: NamedExpression => expr
// Leave an unaliased generator with an empty list of names since the analyzer will generate
// the correct defaults after the nested expression's type has been resolved.
case g: Generator => MultiAlias(g, Nil)
case func: UnresolvedFunction => UnresolvedAlias(func, Some(Column.generateAlias))
// If we have a top level Cast, there is a chance to give it a better alias, if there is a
// NamedExpression under this Cast.
case c: Cast =>
c.transformUp {
case c @ Cast(_: NamedExpression, _, _) => UnresolvedAlias(c)
} match {
case ne: NamedExpression => ne
case other => Alias(expr, usePrettyExpression(expr).sql)()
}
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
UnresolvedAlias(a, Some(Column.generateAlias))
// Wait until the struct is resolved. This will generate a nicer looking alias.
case struct: CreateNamedStructLike => UnresolvedAlias(struct)
case expr: Expression => Alias(expr, usePrettyExpression(expr).sql)()
}
/**
* Provides a type hint about the expected return value of this column. This information can
* be used by operations such as `select` on a [[Dataset]] to automatically convert the
* results into the correct JVM types.
* @since 1.6.0
*/
def as[U : Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
/**
* Extracts a value or values from a complex type.
* The following types of extraction are supported:
*
* - Given an Array, an integer ordinal can be used to retrieve a single value.
* - Given a Map, a key of the correct type can be used to retrieve an individual value.
* - Given a Struct, a string fieldName can be used to extract that field.
* - Given an Array of Structs, a string fieldName can be used to extract filed
* of every struct in that array, and return an Array of fields
*
* @group expr_ops
* @since 1.4.0
*/
def apply(extraction: Any): Column = withExpr {
UnresolvedExtractValue(expr, lit(extraction).expr)
}
/**
* Unary minus, i.e. negate the expression.
* {{{
* // Scala: select the amount column and negates all values.
* df.select( -df("amount") )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.select( negate(col("amount") );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def unary_- : Column = withExpr { UnaryMinus(expr) }
/**
* Inversion of boolean expression, i.e. NOT.
* {{{
* // Scala: select rows that are not active (isActive === false)
* df.filter( !df("isActive") )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( not(df.col("isActive")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def unary_! : Column = withExpr { Not(expr) }
/**
* Equality test.
* {{{
* // Scala:
* df.filter( df("colA") === df("colB") )
*
* // Java
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").equalTo(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def === (other: Any): Column = withExpr {
val right = lit(other).expr
if (this.expr == right) {
logWarning(
s"Constructing trivially true equals predicate, '${this.expr} = $right'. " +
"Perhaps you need to use aliases.")
}
EqualTo(expr, right)
}
/**
* Equality test.
* {{{
* // Scala:
* df.filter( df("colA") === df("colB") )
*
* // Java
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").equalTo(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def equalTo(other: Any): Column = this === other
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") =!= df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group expr_ops
* @since 2.0.0
*/
def =!= (other: Any): Column = withExpr{ Not(EqualTo(expr, lit(other).expr)) }
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") !== df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
@deprecated("!== does not have the same precedence as ===, use =!= instead", "2.0.0")
def !== (other: Any): Column = this =!= other
/**
* Inequality test.
* {{{
* // Scala:
* df.select( df("colA") !== df("colB") )
* df.select( !(df("colA") === df("colB")) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.filter( col("colA").notEqual(col("colB")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def notEqual(other: Any): Column = withExpr { Not(EqualTo(expr, lit(other).expr)) }
/**
* Greater than.
* {{{
* // Scala: The following selects people older than 21.
* people.select( people("age") > 21 )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* people.select( people("age").gt(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def > (other: Any): Column = withExpr { GreaterThan(expr, lit(other).expr) }
/**
* Greater than.
* {{{
* // Scala: The following selects people older than 21.
* people.select( people("age") > lit(21) )
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* people.select( people("age").gt(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def gt(other: Any): Column = this > other
/**
* Less than.
* {{{
* // Scala: The following selects people younger than 21.
* people.select( people("age") < 21 )
*
* // Java:
* people.select( people("age").lt(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def < (other: Any): Column = withExpr { LessThan(expr, lit(other).expr) }
/**
* Less than.
* {{{
* // Scala: The following selects people younger than 21.
* people.select( people("age") < 21 )
*
* // Java:
* people.select( people("age").lt(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def lt(other: Any): Column = this < other
/**
* Less than or equal to.
* {{{
* // Scala: The following selects people age 21 or younger than 21.
* people.select( people("age") <= 21 )
*
* // Java:
* people.select( people("age").leq(21) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def <= (other: Any): Column = withExpr { LessThanOrEqual(expr, lit(other).expr) }
/**
* Less than or equal to.
* {{{
* // Scala: The following selects people age 21 or younger than 21.
* people.select( people("age") <= 21 )
*
* // Java:
* people.select( people("age").leq(21) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def leq(other: Any): Column = this <= other
/**
* Greater than or equal to an expression.
* {{{
* // Scala: The following selects people age 21 or older than 21.
* people.select( people("age") >= 21 )
*
* // Java:
* people.select( people("age").geq(21) )
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def >= (other: Any): Column = withExpr { GreaterThanOrEqual(expr, lit(other).expr) }
/**
* Greater than or equal to an expression.
* {{{
* // Scala: The following selects people age 21 or older than 21.
* people.select( people("age") >= 21 )
*
* // Java:
* people.select( people("age").geq(21) )
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def geq(other: Any): Column = this >= other
/**
* Equality test that is safe for null values.
*
* @group expr_ops
* @since 1.3.0
*/
def <=> (other: Any): Column = withExpr { EqualNullSafe(expr, lit(other).expr) }
/**
* Equality test that is safe for null values.
*
* @group java_expr_ops
* @since 1.3.0
*/
def eqNullSafe(other: Any): Column = this <=> other
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def when(condition: Column, value: Any): Column = this.expr match {
case CaseWhen(branches, None) =>
withExpr { CaseWhen(branches :+ (condition.expr, lit(value).expr)) }
case CaseWhen(branches, Some(_)) =>
throw new IllegalArgumentException(
"when() cannot be applied once otherwise() is applied")
case _ =>
throw new IllegalArgumentException(
"when() can only be applied on a Column previously generated by when() function")
}
/**
* Evaluates a list of conditions and returns one of multiple possible result expressions.
* If otherwise is not defined at the end, null is returned for unmatched conditions.
*
* {{{
* // Example: encoding gender string column into integer.
*
* // Scala:
* people.select(when(people("gender") === "male", 0)
* .when(people("gender") === "female", 1)
* .otherwise(2))
*
* // Java:
* people.select(when(col("gender").equalTo("male"), 0)
* .when(col("gender").equalTo("female"), 1)
* .otherwise(2))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def otherwise(value: Any): Column = this.expr match {
case CaseWhen(branches, None) =>
withExpr { CaseWhen(branches, Option(lit(value).expr)) }
case CaseWhen(branches, Some(_)) =>
throw new IllegalArgumentException(
"otherwise() can only be applied once on a Column previously generated by when()")
case _ =>
throw new IllegalArgumentException(
"otherwise() can only be applied on a Column previously generated by when()")
}
/**
* True if the current column is between the lower bound and upper bound, inclusive.
*
* @group java_expr_ops
* @since 1.4.0
*/
def between(lowerBound: Any, upperBound: Any): Column = {
(this >= lowerBound) && (this <= upperBound)
}
/**
* True if the current expression is NaN.
*
* @group expr_ops
* @since 1.5.0
*/
def isNaN: Column = withExpr { IsNaN(expr) }
/**
* True if the current expression is null.
*
* @group expr_ops
* @since 1.3.0
*/
def isNull: Column = withExpr { IsNull(expr) }
/**
* True if the current expression is NOT null.
*
* @group expr_ops
* @since 1.3.0
*/
def isNotNull: Column = withExpr { IsNotNull(expr) }
/**
* Boolean OR.
* {{{
* // Scala: The following selects people that are in school or employed.
* people.filter( people("inSchool") || people("isEmployed") )
*
* // Java:
* people.filter( people("inSchool").or(people("isEmployed")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def || (other: Any): Column = withExpr { Or(expr, lit(other).expr) }
/**
* Boolean OR.
* {{{
* // Scala: The following selects people that are in school or employed.
* people.filter( people("inSchool") || people("isEmployed") )
*
* // Java:
* people.filter( people("inSchool").or(people("isEmployed")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def or(other: Column): Column = this || other
/**
* Boolean AND.
* {{{
* // Scala: The following selects people that are in school and employed at the same time.
* people.select( people("inSchool") && people("isEmployed") )
*
* // Java:
* people.select( people("inSchool").and(people("isEmployed")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def && (other: Any): Column = withExpr { And(expr, lit(other).expr) }
/**
* Boolean AND.
* {{{
* // Scala: The following selects people that are in school and employed at the same time.
* people.select( people("inSchool") && people("isEmployed") )
*
* // Java:
* people.select( people("inSchool").and(people("isEmployed")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def and(other: Column): Column = this && other
/**
* Sum of this expression and another expression.
* {{{
* // Scala: The following selects the sum of a person's height and weight.
* people.select( people("height") + people("weight") )
*
* // Java:
* people.select( people("height").plus(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def + (other: Any): Column = withExpr { Add(expr, lit(other).expr) }
/**
* Sum of this expression and another expression.
* {{{
* // Scala: The following selects the sum of a person's height and weight.
* people.select( people("height") + people("weight") )
*
* // Java:
* people.select( people("height").plus(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def plus(other: Any): Column = this + other
/**
* Subtraction. Subtract the other expression from this expression.
* {{{
* // Scala: The following selects the difference between people's height and their weight.
* people.select( people("height") - people("weight") )
*
* // Java:
* people.select( people("height").minus(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def - (other: Any): Column = withExpr { Subtract(expr, lit(other).expr) }
/**
* Subtraction. Subtract the other expression from this expression.
* {{{
* // Scala: The following selects the difference between people's height and their weight.
* people.select( people("height") - people("weight") )
*
* // Java:
* people.select( people("height").minus(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def minus(other: Any): Column = this - other
/**
* Multiplication of this expression and another expression.
* {{{
* // Scala: The following multiplies a person's height by their weight.
* people.select( people("height") * people("weight") )
*
* // Java:
* people.select( people("height").multiply(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def * (other: Any): Column = withExpr { Multiply(expr, lit(other).expr) }
/**
* Multiplication of this expression and another expression.
* {{{
* // Scala: The following multiplies a person's height by their weight.
* people.select( people("height") * people("weight") )
*
* // Java:
* people.select( people("height").multiply(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def multiply(other: Any): Column = this * other
/**
* Division this expression by another expression.
* {{{
* // Scala: The following divides a person's height by their weight.
* people.select( people("height") / people("weight") )
*
* // Java:
* people.select( people("height").divide(people("weight")) );
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def / (other: Any): Column = withExpr { Divide(expr, lit(other).expr) }
/**
* Division this expression by another expression.
* {{{
* // Scala: The following divides a person's height by their weight.
* people.select( people("height") / people("weight") )
*
* // Java:
* people.select( people("height").divide(people("weight")) );
* }}}
*
* @group java_expr_ops
* @since 1.3.0
*/
def divide(other: Any): Column = this / other
/**
* Modulo (a.k.a. remainder) expression.
*
* @group expr_ops
* @since 1.3.0
*/
def % (other: Any): Column = withExpr { Remainder(expr, lit(other).expr) }
/**
* Modulo (a.k.a. remainder) expression.
*
* @group java_expr_ops
* @since 1.3.0
*/
def mod(other: Any): Column = this % other
/**
* A boolean expression that is evaluated to true if the value of this expression is contained
* by the evaluated values of the arguments.
*
* @group expr_ops
* @since 1.5.0
*/
@scala.annotation.varargs
def isin(list: Any*): Column = withExpr { In(expr, list.map(lit(_).expr)) }
/**
* SQL like expression. Returns a boolean column based on a SQL LIKE match.
*
* @group expr_ops
* @since 1.3.0
*/
def like(literal: String): Column = withExpr { Like(expr, lit(literal).expr) }
/**
* SQL RLIKE expression (LIKE with Regex). Returns a boolean column based on a regex
* match.
*
* @group expr_ops
* @since 1.3.0
*/
def rlike(literal: String): Column = withExpr { RLike(expr, lit(literal).expr) }
/**
* An expression that gets an item at position `ordinal` out of an array,
* or gets a value by key `key` in a `MapType`.
*
* @group expr_ops
* @since 1.3.0
*/
def getItem(key: Any): Column = withExpr { UnresolvedExtractValue(expr, Literal(key)) }
/**
* An expression that gets a field by name in a `StructType`.
*
* @group expr_ops
* @since 1.3.0
*/
def getField(fieldName: String): Column = withExpr {
UnresolvedExtractValue(expr, Literal(fieldName))
}
/**
* An expression that returns a substring.
* @param startPos expression for the starting position.
* @param len expression for the length of the substring.
*
* @group expr_ops
* @since 1.3.0
*/
def substr(startPos: Column, len: Column): Column = withExpr {
Substring(expr, startPos.expr, len.expr)
}
/**
* An expression that returns a substring.
* @param startPos starting position.
* @param len length of the substring.
*
* @group expr_ops
* @since 1.3.0
*/
def substr(startPos: Int, len: Int): Column = withExpr {
Substring(expr, lit(startPos).expr, lit(len).expr)
}
/**
* Contains the other element. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def contains(other: Any): Column = withExpr { Contains(expr, lit(other).expr) }
/**
* String starts with. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def startsWith(other: Column): Column = withExpr { StartsWith(expr, lit(other).expr) }
/**
* String starts with another string literal. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def startsWith(literal: String): Column = this.startsWith(lit(literal))
/**
* String ends with. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def endsWith(other: Column): Column = withExpr { EndsWith(expr, lit(other).expr) }
/**
* String ends with another string literal. Returns a boolean column based on a string match.
*
* @group expr_ops
* @since 1.3.0
*/
def endsWith(literal: String): Column = this.endsWith(lit(literal))
/**
* Gives the column an alias. Same as `as`.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".alias("colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def alias(alias: String): Column = name(alias)
/**
* Gives the column an alias.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".as("colB"))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use `as` with explicitly empty metadata.
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: String): Column = name(alias)
/**
* (Scala-specific) Assigns the given aliases to the results of a table generating function.
* {{{
* // Renames colA to colB in select output.
* df.select(explode($"myMap").as("key" :: "value" :: Nil))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def as(aliases: Seq[String]): Column = withExpr { MultiAlias(expr, aliases) }
/**
* Assigns the given aliases to the results of a table generating function.
* {{{
* // Renames colA to colB in select output.
* df.select(explode($"myMap").as("key" :: "value" :: Nil))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def as(aliases: Array[String]): Column = withExpr { MultiAlias(expr, aliases) }
/**
* Gives the column an alias.
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".as('colB))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use `as` with explicitly empty metadata.
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: Symbol): Column = name(alias.name)
/**
* Gives the column an alias with metadata.
* {{{
* val metadata: Metadata = ...
* df.select($"colA".as("colB", metadata))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def as(alias: String, metadata: Metadata): Column = withExpr {
Alias(expr, alias)(explicitMetadata = Some(metadata))
}
/**
* Gives the column a name (alias).
* {{{
* // Renames colA to colB in select output.
* df.select($"colA".name("colB"))
* }}}
*
* If the current column has metadata associated with it, this metadata will be propagated
* to the new column. If this not desired, use `as` with explicitly empty metadata.
*
* @group expr_ops
* @since 2.0.0
*/
def name(alias: String): Column = withExpr {
expr match {
case ne: NamedExpression => Alias(expr, alias)(explicitMetadata = Some(ne.metadata))
case other => Alias(other, alias)()
}
}
/**
* Casts the column to a different data type.
* {{{
* // Casts colA to IntegerType.
* import org.apache.spark.sql.types.IntegerType
* df.select(df("colA").cast(IntegerType))
*
* // equivalent to
* df.select(df("colA").cast("int"))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def cast(to: DataType): Column = withExpr { Cast(expr, to) }
/**
* Casts the column to a different data type, using the canonical string representation
* of the type. The supported types are: `string`, `boolean`, `byte`, `short`, `int`, `long`,
* `float`, `double`, `decimal`, `date`, `timestamp`.
* {{{
* // Casts colA to integer.
* df.select(df("colA").cast("int"))
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def cast(to: String): Column = cast(CatalystSqlParser.parseDataType(to))
/**
* Returns a sort expression based on the descending order of the column.
* {{{
* // Scala
* df.sort(df("age").desc)
*
* // Java
* df.sort(df.col("age").desc());
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def desc: Column = withExpr { SortOrder(expr, Descending) }
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear before non-null values.
* {{{
* // Scala: sort a DataFrame by age column in descending order and null values appearing first.
* df.sort(df("age").desc_nulls_first)
*
* // Java
* df.sort(df.col("age").desc_nulls_first());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def desc_nulls_first: Column = withExpr { SortOrder(expr, Descending, NullsFirst, Set.empty) }
/**
* Returns a sort expression based on the descending order of the column,
* and null values appear after non-null values.
* {{{
* // Scala: sort a DataFrame by age column in descending order and null values appearing last.
* df.sort(df("age").desc_nulls_last)
*
* // Java
* df.sort(df.col("age").desc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def desc_nulls_last: Column = withExpr { SortOrder(expr, Descending, NullsLast, Set.empty) }
/**
* Returns a sort expression based on ascending order of the column.
* {{{
* // Scala: sort a DataFrame by age column in ascending order.
* df.sort(df("age").asc)
*
* // Java
* df.sort(df.col("age").asc());
* }}}
*
* @group expr_ops
* @since 1.3.0
*/
def asc: Column = withExpr { SortOrder(expr, Ascending) }
/**
* Returns a sort expression based on ascending order of the column,
* and null values return before non-null values.
* {{{
* // Scala: sort a DataFrame by age column in ascending order and null values appearing first.
* df.sort(df("age").asc_nulls_last)
*
* // Java
* df.sort(df.col("age").asc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def asc_nulls_first: Column = withExpr { SortOrder(expr, Ascending, NullsFirst, Set.empty) }
/**
* Returns a sort expression based on ascending order of the column,
* and null values appear after non-null values.
* {{{
* // Scala: sort a DataFrame by age column in ascending order and null values appearing last.
* df.sort(df("age").asc_nulls_last)
*
* // Java
* df.sort(df.col("age").asc_nulls_last());
* }}}
*
* @group expr_ops
* @since 2.1.0
*/
def asc_nulls_last: Column = withExpr { SortOrder(expr, Ascending, NullsLast, Set.empty) }
/**
* Prints the expression to the console for debugging purposes.
*
* @group df_ops
* @since 1.3.0
*/
def explain(extended: Boolean): Unit = {
// scalastyle:off println
if (extended) {
println(expr)
} else {
println(expr.sql)
}
// scalastyle:on println
}
/**
* Compute bitwise OR of this expression with another expression.
* {{{
* df.select($"colA".bitwiseOR($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseOR(other: Any): Column = withExpr { BitwiseOr(expr, lit(other).expr) }
/**
* Compute bitwise AND of this expression with another expression.
* {{{
* df.select($"colA".bitwiseAND($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseAND(other: Any): Column = withExpr { BitwiseAnd(expr, lit(other).expr) }
/**
* Compute bitwise XOR of this expression with another expression.
* {{{
* df.select($"colA".bitwiseXOR($"colB"))
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def bitwiseXOR(other: Any): Column = withExpr { BitwiseXor(expr, lit(other).expr) }
/**
* Defines a windowing column.
*
* {{{
* val w = Window.partitionBy("name").orderBy("id")
* df.select(
* sum("price").over(w.rangeBetween(Window.unboundedPreceding, 2)),
* avg("price").over(w.rowsBetween(Window.currentRow, 4))
* )
* }}}
*
* @group expr_ops
* @since 1.4.0
*/
def over(window: expressions.WindowSpec): Column = window.withAggregate(this)
/**
* Defines an empty analytic clause. In this case the analytic function is applied
* and presented for all rows in the result set.
*
* {{{
* df.select(
* sum("price").over(),
* avg("price").over()
* )
* }}}
*
* @group expr_ops
* @since 2.0.0
*/
def over(): Column = over(Window.spec)
}
/**
* A convenient class used for constructing schema.
*
* @since 1.3.0
*/
@InterfaceStability.Stable
class ColumnName(name: String) extends Column(name) {
/**
* Creates a new `StructField` of type boolean.
* @since 1.3.0
*/
def boolean: StructField = StructField(name, BooleanType)
/**
* Creates a new `StructField` of type byte.
* @since 1.3.0
*/
def byte: StructField = StructField(name, ByteType)
/**
* Creates a new `StructField` of type short.
* @since 1.3.0
*/
def short: StructField = StructField(name, ShortType)
/**
* Creates a new `StructField` of type int.
* @since 1.3.0
*/
def int: StructField = StructField(name, IntegerType)
/**
* Creates a new `StructField` of type long.
* @since 1.3.0
*/
def long: StructField = StructField(name, LongType)
/**
* Creates a new `StructField` of type float.
* @since 1.3.0
*/
def float: StructField = StructField(name, FloatType)
/**
* Creates a new `StructField` of type double.
* @since 1.3.0
*/
def double: StructField = StructField(name, DoubleType)
/**
* Creates a new `StructField` of type string.
* @since 1.3.0
*/
def string: StructField = StructField(name, StringType)
/**
* Creates a new `StructField` of type date.
* @since 1.3.0
*/
def date: StructField = StructField(name, DateType)
/**
* Creates a new `StructField` of type decimal.
* @since 1.3.0
*/
def decimal: StructField = StructField(name, DecimalType.USER_DEFAULT)
/**
* Creates a new `StructField` of type decimal.
* @since 1.3.0
*/
def decimal(precision: Int, scale: Int): StructField =
StructField(name, DecimalType(precision, scale))
/**
* Creates a new `StructField` of type timestamp.
* @since 1.3.0
*/
def timestamp: StructField = StructField(name, TimestampType)
/**
* Creates a new `StructField` of type binary.
* @since 1.3.0
*/
def binary: StructField = StructField(name, BinaryType)
/**
* Creates a new `StructField` of type array.
* @since 1.3.0
*/
def array(dataType: DataType): StructField = StructField(name, ArrayType(dataType))
/**
* Creates a new `StructField` of type map.
* @since 1.3.0
*/
def map(keyType: DataType, valueType: DataType): StructField =
map(MapType(keyType, valueType))
def map(mapType: MapType): StructField = StructField(name, mapType)
/**
* Creates a new `StructField` of type struct.
* @since 1.3.0
*/
def struct(fields: StructField*): StructField = struct(StructType(fields))
/**
* Creates a new `StructField` of type struct.
* @since 1.3.0
*/
def struct(structType: StructType): StructField = StructField(name, structType)
}
| liutang123/spark | sql/core/src/main/scala/org/apache/spark/sql/Column.scala | Scala | apache-2.0 | 36,619 |
import blended.sbt.Dependencies
object BlendedUtil extends ProjectFactory {
private[this] val helper = new ProjectSettings(
projectName = "blended.util",
description = "Utility classes to use in other bundles",
deps = Seq(
Dependencies.akkaActor,
Dependencies.akkaSlf4j,
Dependencies.slf4j,
Dependencies.akkaTestkit % "test",
Dependencies.scalatest % "test",
Dependencies.junit % "test",
Dependencies.logbackClassic % "test",
Dependencies.logbackCore % "test"
),
adaptBundle = b => b.copy(
exportPackage = Seq(b.bundleSymbolicName, s"${b.bundleSymbolicName}.config")
)
)
override val project = helper.baseProject
}
| lefou/blended | project/BlendedUtil.scala | Scala | apache-2.0 | 703 |
package eventsource.slick
import java.time.Instant
import akka.actor.ActorSystem
import eventsource.models.{Event, EventVersion, SomeEntity, SomeEventType}
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.SpanSugar._
import org.scalatest.{BeforeAndAfterEach, MustMatchers, WordSpec}
import play.api.libs.json.{JsNull, Json}
import slick.jdbc.H2Profile
import slick.jdbc.JdbcBackend
import scala.concurrent.Await
class CreateActionSpec
extends WordSpec
with MockFactory
with MustMatchers
with ScalaFutures
with BeforeAndAfterEach {
val system = ActorSystem("retrying")
implicit def ec = system.dispatcher
implicit def scheduler = system.scheduler
val db = JdbcBackend.Database
.forURL("jdbc:h2:mem:test1;DB_CLOSE_DELAY=-1", driver = "org.h2.Driver")
val dao = new SomeEntityDao(H2Profile, db)
override protected def beforeEach(): Unit = {
Await.ready(dao.createSchema(), 2.seconds)
}
override protected def afterEach(): Unit = {
Await.ready(dao.dropSchema(), 2.seconds)
}
"CreateAction" should {
"create a non existing entity from event" in {
val event = Event(
"some_id",
EventVersion(1, 100),
Instant.now(),
SomeEventType.Create,
None,
Some(
Json.obj(
"id" -> "some_id",
"anInt" -> 1,
"aString" -> "string",
"createdVersion" -> JsNull,
"lastModifiedVersion" -> JsNull
)),
None
)
val result =
Await.result(dao.createAction.processEvent(event), 2.seconds)
result.isDefined mustBe true
result.get.id mustBe "some_id"
result.get.createdVersion mustBe EventVersion(1, 100)
result.get.lastModifiedVersion mustBe EventVersion(1, 100)
val entities = Await.result(dao.findAll(), 2.seconds)
entities must have size (1)
entities must contain(
SomeEntity("some_id", 1, "string", EventVersion(1, 100), EventVersion(1, 100)))
}
"not replace an already existing entity" in {
val existingEntity =
SomeEntity("some_id", 1, "string", EventVersion(1, 100), EventVersion(1, 100))
Await.ready(dao.insert(existingEntity), 2.seconds)
val event = Event(
"some_id",
EventVersion(1, 101),
Instant.now(),
SomeEventType.Create,
None,
Some(
Json.obj(
"id" -> "some_id",
"anInt" -> 123,
"aString" -> "other string",
"createdVersion" -> JsNull,
"lastModifiedVersion" -> JsNull
)
),
None
)
val result =
Await.result(dao.createAction.processEvent(event), 2.seconds)
result.isEmpty mustBe true
val entities = Await.result(dao.findAll(), 2.seconds)
entities must have size (1)
entities must contain(existingEntity)
}
"override new entities (first create is the correct one)" in {
val existingEntity =
SomeEntity("some_id", 1, "string", EventVersion(1, 102), EventVersion(1, 102))
Await.ready(dao.insert(existingEntity), 2.seconds)
val event = Event(
"some_id",
EventVersion(1, 101),
Instant.now(),
SomeEventType.Create,
None,
Some(
Json.obj(
"id" -> "some_id",
"anInt" -> 123,
"aString" -> "other string",
"createdVersion" -> JsNull,
"lastModifiedVersion" -> JsNull
)
),
None
)
val result =
Await.result(dao.createAction.processEvent(event), 2.seconds)
result.isDefined mustBe true
result.get.id mustBe "some_id"
result.get.createdVersion mustBe EventVersion(1, 101)
result.get.lastModifiedVersion mustBe EventVersion(1, 101)
val entities = Await.result(dao.findAll(), 2.seconds)
entities must have size (1)
entities must contain(
SomeEntity("some_id", 123, "other string", EventVersion(1, 101), EventVersion(1, 101)))
}
}
}
| 21re/play-eventsource | src/test/scala/eventsource/slick/CreateActionSpec.scala | Scala | mit | 4,290 |
package ir.ast
import ir.interpreter.Interpreter._
import ir._
/**
* Head pattern.
* Code for this pattern can be generated.
*
* The head pattern has the following high-level semantics:
* `Head()( [x,,1,,, ..., x,,n,,] ) = x,,1,,`
*
* The head pattern has the following type:
* `Head() : [a],,I,, -> a`
*/
case class Head() extends Pattern(arity = 1) {
override def checkType(argType: Type,
setType: Boolean): Type = {
argType match {
case ArrayType(t) => ArrayTypeWSWC(t, 1)
case _ => throw new TypeException(argType, "ArrayType", this)
}
}
override def eval(valueMap: ValueMap, args: Any*): Any = {
assert(args.length == arity)
args.head match {
case a: Array[_] => a.head
}
}
}
| lift-project/lift | src/main/ir/ast/Head.scala | Scala | mit | 768 |
import sbt._
import Keys._
object BuildSettings {
lazy val basicSettings = Seq[Setting[_]](
organization := "com.github.splee",
version := "0.2-SNAPSHOT",
description := "Monitors consumer group offset lag in Burrow using InfluxDB",
resolvers ++= Dependencies.resolutionRepos
)
// sbt-assembly settings for building a fat jar
import sbtassembly.Plugin._
import AssemblyKeys._
lazy val sbtAssemblySettings = assemblySettings ++ Seq(
// Slightly cleaner jar name
jarName in assembly := {
name.value + "-" + version.value + ".jar"
},
// Drop these jars
excludedJars in assembly <<= (fullClasspath in assembly) map { cp =>
val excludes = Set(
"jsp-api-2.1-6.1.14.jar",
"jsp-2.1-6.1.14.jar",
"jasper-compiler-5.5.12.jar",
"commons-beanutils-core-1.8.0.jar",
"commons-beanutils-1.7.0.jar",
"servlet-api-2.5-20081211.jar",
"servlet-api-2.5.jar"
)
cp filter { jar => excludes(jar.data.getName) }
},
mergeStrategy in assembly <<= (mergeStrategy in assembly) {
(old) => {
// case "project.clj" => MergeStrategy.discard // Leiningen build files
case x if x.startsWith("META-INF") => MergeStrategy.discard // Bumf
case x if x.endsWith(".html") => MergeStrategy.discard // More bumf
case x if x.endsWith("UnusedStubClass.class") => MergeStrategy.first // really?
case PathList("com", "esotericsoftware", xs @ _*) => MergeStrategy.last // For Log$Logger.class
case x if x.endsWith("project.clj") => MergeStrategy.discard // throw it away.
case x => old(x)
}
}
)
// Leave this here for later so we can add sbtAssemblySettings if we want.
lazy val buildSettings = basicSettings ++ sbtAssemblySettings
}
| splee/burrower | project/BuildSettings.scala | Scala | mit | 1,807 |
package org.bfn.ninetynineprobs
import org.scalatest._
class P88Spec extends UnitSpec {
// TODO
}
| bfontaine/99Scala | src/test/scala/P88Spec.scala | Scala | mit | 105 |
package edu.berkeley.cs.amplab.sparkr
import java.io._
import java.net.ServerSocket
import java.util.{Map => JMap}
import scala.collection.JavaConversions._
import scala.io.Source
import scala.reflect.ClassTag
import scala.util.Try
import org.apache.spark.api.java.{JavaPairRDD, JavaRDD, JavaSparkContext}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{Partition, SparkConf, SparkEnv, SparkException, TaskContext}
import edu.berkeley.cs.amplab.sparkr.SerDe._
private abstract class BaseRRDD[T: ClassTag, U: ClassTag](
parent: RDD[T],
numPartitions: Int,
func: Array[Byte],
deserializer: String,
serializer: String,
packageNames: Array[Byte],
rLibDir: String,
broadcastVars: Array[Broadcast[Object]])
extends RDD[U](parent) {
protected var dataStream: DataInputStream = _
private var bootTime: Double = _
override def getPartitions = parent.partitions
override def compute(split: Partition, context: TaskContext): Iterator[U] = {
// Timing start
bootTime = System.currentTimeMillis / 1000.0
// The parent may be also an RRDD, so we should launch it first.
val parentIterator = firstParent[T].iterator(split, context)
// we expect two connections
val serverSocket = new ServerSocket(0, 2)
val listenPort = serverSocket.getLocalPort()
// The stdout/stderr is shared by multiple tasks, because we use one daemon
// to launch child process as worker.
val errThread = RRDD.createRWorker(rLibDir, listenPort)
// We use two sockets to separate input and output, then it's easy to manage
// the lifecycle of them to avoid deadlock.
// TODO: optimize it to use one socket
// the socket used to send out the input of task
serverSocket.setSoTimeout(10000)
val inSocket = serverSocket.accept()
startStdinThread(inSocket.getOutputStream(), parentIterator, split.index)
// the socket used to receive the output of task
val outSocket = serverSocket.accept()
val inputStream = new BufferedInputStream(outSocket.getInputStream)
dataStream = new DataInputStream(inputStream)
serverSocket.close()
try {
return new Iterator[U] {
def next(): U = {
val obj = _nextObj
if (hasNext) {
_nextObj = read()
}
obj
}
var _nextObj = read()
def hasNext(): Boolean = {
val hasMore = (_nextObj != null)
if (!hasMore) {
dataStream.close()
}
hasMore
}
}
} catch {
case e: Exception =>
throw new SparkException("R computation failed with\\n " + errThread.getLines())
}
}
/**
* Start a thread to write RDD data to the R process.
*/
private def startStdinThread[T](
output: OutputStream,
iter: Iterator[T],
splitIndex: Int) = {
val env = SparkEnv.get
val bufferSize = System.getProperty("spark.buffer.size", "65536").toInt
val stream = new BufferedOutputStream(output, bufferSize)
new Thread("writer for R") {
override def run() {
try {
SparkEnv.set(env)
val dataOut = new DataOutputStream(stream)
dataOut.writeInt(splitIndex)
SerDe.writeString(dataOut, deserializer)
SerDe.writeString(dataOut, serializer)
dataOut.writeInt(packageNames.length)
dataOut.write(packageNames)
dataOut.writeInt(func.length)
dataOut.write(func)
dataOut.writeInt(broadcastVars.length)
broadcastVars.foreach { broadcast =>
// TODO(shivaram): Read a Long in R to avoid this cast
dataOut.writeInt(broadcast.id.toInt)
// TODO: Pass a byte array from R to avoid this cast ?
val broadcastByteArr = broadcast.value.asInstanceOf[Array[Byte]]
dataOut.writeInt(broadcastByteArr.length)
dataOut.write(broadcastByteArr)
}
dataOut.writeInt(numPartitions)
if (!iter.hasNext) {
dataOut.writeInt(0)
} else {
dataOut.writeInt(1)
}
val printOut = new PrintStream(stream)
def writeElem(elem: Any): Unit = {
if (deserializer == SerializationFormats.BYTE) {
val elemArr = elem.asInstanceOf[Array[Byte]]
dataOut.writeInt(elemArr.length)
dataOut.write(elemArr)
} else if (deserializer == SerializationFormats.STRING) {
// write string(for StringRRDD)
printOut.println(elem)
}
}
for (elem <- iter) {
elem match {
case (key, value) =>
writeElem(key)
writeElem(value)
case _ =>
writeElem(elem)
}
}
stream.flush()
} catch {
// TODO: We should propogate this error to the task thread
case e: Exception =>
System.err.println("R Writer thread got an exception " + e)
e.printStackTrace()
} finally {
Try(output.close())
}
}
}.start()
}
protected def readData(length: Int): U
protected def read(): U = {
try {
val length = dataStream.readInt()
length match {
case SpecialLengths.TIMING_DATA =>
// Timing data from R worker
val boot = dataStream.readDouble - bootTime
val init = dataStream.readDouble
val broadcast = dataStream.readDouble
val input = dataStream.readDouble
val compute = dataStream.readDouble
val output = dataStream.readDouble
logInfo(
("Times: boot = %.3f s, init = %.3f s, broadcast = %.3f s, " +
"read-input = %.3f s, compute = %.3f s, write-output = %.3f s, " +
"total = %.3f s").format(
boot,
init,
broadcast,
input,
compute,
output,
boot + init + broadcast + input + compute + output))
read()
case length if length >= 0 =>
readData(length)
}
} catch {
case eof: EOFException =>
throw new SparkException("R worker exited unexpectedly (cranshed)", eof)
}
}
}
/**
* Form an RDD[(Int, Array[Byte])] from key-value pairs returned from R.
* This is used by SparkR's shuffle operations.
*/
private class PairwiseRRDD[T: ClassTag](
parent: RDD[T],
numPartitions: Int,
hashFunc: Array[Byte],
deserializer: String,
packageNames: Array[Byte],
rLibDir: String,
broadcastVars: Array[Object])
extends BaseRRDD[T, (Int, Array[Byte])](parent, numPartitions, hashFunc, deserializer,
SerializationFormats.BYTE, packageNames, rLibDir,
broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) {
override protected def readData(length: Int): (Int, Array[Byte]) = {
length match {
case length if length == 2 =>
val hashedKey = dataStream.readInt()
val contentPairsLength = dataStream.readInt()
val contentPairs = new Array[Byte](contentPairsLength)
dataStream.readFully(contentPairs)
(hashedKey, contentPairs)
case _ => null
}
}
lazy val asJavaPairRDD : JavaPairRDD[Int, Array[Byte]] = JavaPairRDD.fromRDD(this)
}
/**
* An RDD that stores serialized R objects as Array[Byte].
*/
private class RRDD[T: ClassTag](
parent: RDD[T],
func: Array[Byte],
deserializer: String,
serializer: String,
packageNames: Array[Byte],
rLibDir: String,
broadcastVars: Array[Object])
extends BaseRRDD[T, Array[Byte]](parent, -1, func, deserializer,
serializer, packageNames, rLibDir,
broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) {
override protected def readData(length: Int): Array[Byte] = {
length match {
case length if length > 0 =>
val obj = new Array[Byte](length)
dataStream.read(obj, 0, length)
obj
case _ => null
}
}
lazy val asJavaRDD : JavaRDD[Array[Byte]] = JavaRDD.fromRDD(this)
}
/**
* An RDD that stores R objects as Array[String].
*/
private class StringRRDD[T: ClassTag](
parent: RDD[T],
func: Array[Byte],
deserializer: String,
packageNames: Array[Byte],
rLibDir: String,
broadcastVars: Array[Object])
extends BaseRRDD[T, String](parent, -1, func, deserializer, SerializationFormats.STRING,
packageNames, rLibDir,
broadcastVars.map(x => x.asInstanceOf[Broadcast[Object]])) {
override protected def readData(length: Int): String = {
length match {
case length if length > 0 =>
readStringBytes(dataStream, length)
case _ => null
}
}
lazy val asJavaRDD : JavaRDD[String] = JavaRDD.fromRDD(this)
}
private object SpecialLengths {
val TIMING_DATA = -1
}
private[sparkr] class BufferedStreamThread(
in: InputStream,
name: String,
errBufferSize: Int) extends Thread(name) {
val lines = new Array[String](errBufferSize)
var lineIdx = 0
override def run() {
for (line <- Source.fromInputStream(in).getLines) {
synchronized {
lines(lineIdx) = line
lineIdx = (lineIdx + 1) % errBufferSize
}
// TODO: user logger
System.err.println(line)
}
}
def getLines(): String = synchronized {
(0 until errBufferSize).filter { x =>
lines((x + lineIdx) % errBufferSize) != null
}.map { x =>
lines((x + lineIdx) % errBufferSize)
}.mkString("\\n")
}
}
object RRDD {
// Because forking processes from Java is expensive, we prefer to launch
// a single R daemon (daemon.R) and tell it to fork new workers for our tasks.
// This daemon currently only works on UNIX-based systems now, so we should
// also fall back to launching workers (worker.R) directly.
val inWindows = System.getProperty("os.name").startsWith("Windows")
private[this] var errThread: BufferedStreamThread = _
private[this] var daemonChannel: DataOutputStream = _
def createSparkContext(
master: String,
appName: String,
sparkHome: String,
jars: Array[String],
sparkEnvirMap: JMap[Object, Object],
sparkExecutorEnvMap: JMap[Object, Object]): JavaSparkContext = {
val sparkConf = new SparkConf().setAppName(appName)
.setSparkHome(sparkHome)
.setJars(jars)
// Override `master` if we have a user-specified value
if (master != "") {
sparkConf.setMaster(master)
} else {
// If conf has no master set it to "local" to maintain
// backwards compatibility
sparkConf.setIfMissing("spark.master", "local")
}
for ((name, value) <- sparkEnvirMap) {
sparkConf.set(name.asInstanceOf[String], value.asInstanceOf[String])
}
for ((name, value) <- sparkExecutorEnvMap) {
sparkConf.setExecutorEnv(name.asInstanceOf[String], value.asInstanceOf[String])
}
new JavaSparkContext(sparkConf)
}
/**
* Start a thread to print the process's stderr to ours
*/
private def startStdoutThread(proc: Process): BufferedStreamThread = {
val BUFFER_SIZE = 100
val thread = new BufferedStreamThread(proc.getInputStream, "stdout reader for R", BUFFER_SIZE)
thread.setDaemon(true)
thread.start()
thread
}
private def createRProcess(rLibDir: String, port: Int, script: String) = {
val rCommand = "Rscript"
val rOptions = "--vanilla"
val rExecScript = rLibDir + "/SparkR/worker/" + script
val pb = new ProcessBuilder(List(rCommand, rOptions, rExecScript))
// Unset the R_TESTS environment variable for workers.
// This is set by R CMD check as startup.Rs
// (http://svn.r-project.org/R/trunk/src/library/tools/R/testing.R)
// and confuses worker script which tries to load a non-existent file
pb.environment().put("R_TESTS", "")
pb.environment().put("SPARKR_RLIBDIR", rLibDir)
pb.environment().put("SPARKR_WORKER_PORT", port.toString)
pb.redirectErrorStream(true) // redirect stderr into stdout
val proc = pb.start()
val errThread = startStdoutThread(proc)
errThread
}
/**
* ProcessBuilder used to launch worker R processes.
*/
def createRWorker(rLibDir: String, port: Int): BufferedStreamThread = {
val useDaemon = SparkEnv.get.conf.getBoolean("spark.sparkr.use.daemon", true)
if (!inWindows && useDaemon) {
synchronized {
if (daemonChannel == null) {
// we expect one connections
val serverSocket = new ServerSocket(0, 1)
val daemonPort = serverSocket.getLocalPort
errThread = createRProcess(rLibDir, daemonPort, "daemon.R")
// the socket used to send out the input of task
serverSocket.setSoTimeout(10000)
val sock = serverSocket.accept()
daemonChannel = new DataOutputStream(new BufferedOutputStream(sock.getOutputStream))
serverSocket.close()
}
try {
daemonChannel.writeInt(port)
daemonChannel.flush()
} catch {
case e: IOException =>
// daemon process died
daemonChannel.close()
daemonChannel = null
errThread = null
// fail the current task, retry by scheduler
throw e
}
errThread
}
} else {
createRProcess(rLibDir, port, "worker.R")
}
}
/**
* Create an RRDD given a sequence of byte arrays. Used to create RRDD when `parallelize` is
* called from R.
*/
def createRDDFromArray(jsc: JavaSparkContext, arr: Array[Array[Byte]]): JavaRDD[Array[Byte]] = {
JavaRDD.fromRDD(jsc.sc.parallelize(arr, arr.length))
}
def isRunningInYarnContainer(conf: SparkConf): Boolean = {
// These environment variables are set by YARN.
// For Hadoop 0.23.X, we check for YARN_LOCAL_DIRS (we use this below in getYarnLocalDirs())
// For Hadoop 2.X, we check for CONTAINER_ID.
System.getenv("CONTAINER_ID") != null || System.getenv("YARN_LOCAL_DIRS") != null
}
/**
* Get the path of a temporary directory. Spark's local directories can be configured through
* multiple settings, which are used with the following precedence:
*
* - If called from inside of a YARN container, this will return a directory chosen by YARN.
* - If the SPARK_LOCAL_DIRS environment variable is set, this will return a directory from it.
* - Otherwise, if the spark.local.dir is set, this will return a directory from it.
* - Otherwise, this will return java.io.tmpdir.
*
* Some of these configuration options might be lists of multiple paths, but this method will
* always return a single directory.
*/
def getLocalDir(conf: SparkConf): String = {
getOrCreateLocalRootDirs(conf)(0)
}
/**
* Gets or creates the directories listed in spark.local.dir or SPARK_LOCAL_DIRS,
* and returns only the directories that exist / could be created.
*
* If no directories could be created, this will return an empty list.
*/
def getOrCreateLocalRootDirs(conf: SparkConf): Array[String] = {
val confValue = if (isRunningInYarnContainer(conf)) {
// If we are in yarn mode, systems can have different disk layouts so we must set it
// to what Yarn on this system said was available.
getYarnLocalDirs(conf)
} else {
Option(System.getenv("SPARK_LOCAL_DIRS")).getOrElse(
conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")))
}
val rootDirs = confValue.split(',')
rootDirs.flatMap { rootDir =>
val localDir: File = new File(rootDir)
val foundLocalDir = localDir.exists || localDir.mkdirs()
if (!foundLocalDir) {
None
} else {
Some(rootDir)
}
}
}
/** Get the Yarn approved local directories. */
def getYarnLocalDirs(conf: SparkConf): String = {
// Hadoop 0.23 and 2.x have different Environment variable names for the
// local dirs, so lets check both. We assume one of the 2 is set.
// LOCAL_DIRS => 2.X, YARN_LOCAL_DIRS => 0.23.X
val localDirs = Option(System.getenv("YARN_LOCAL_DIRS"))
.getOrElse(Option(System.getenv("LOCAL_DIRS"))
.getOrElse(""))
if (localDirs.isEmpty) {
throw new Exception("Yarn Local dirs can't be empty")
}
localDirs
}
}
| nvoron23/SparkR-pkg | pkg/src/src/main/scala/edu/berkeley/cs/amplab/sparkr/RRDD.scala | Scala | apache-2.0 | 16,618 |
// scalac -language:implicitConversions -language:postfixOps *.scala
import scala.language.implicitConversions
import scala.language.postfixOps
object Main {
implicit class MyInt(i:Int) {
def squared = i*i
}
implicit val implicitInt = 2
implicit def intPair2int(p:(Int,Int)): Int = p._1 + p._2
def mult(a:Int,b:Int)(implicit scalingFactor:Int) = a * b * scalingFactor
def implicitParam = mult(3,3)
def extensionMethod = 2 squared
def typeConversion = {
val p = (1,2)
mult(p, 2)
}
}
| littlenag/scala-enthusiasts | scala-decompiled-pt-2/10_implicits/10_implicits.scala | Scala | mit | 520 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master.ui
import javax.servlet.http.HttpServletRequest
import scala.xml.Node
import org.json4s.JValue
import org.apache.spark.deploy.JsonProtocol
import org.apache.spark.deploy.DeployMessages.{KillDriverResponse, RequestKillDriver, MasterStateResponse, RequestMasterState}
import org.apache.spark.deploy.master._
import org.apache.spark.ui.{WebUIPage, UIUtils}
import org.apache.spark.util.Utils
private[ui] class MasterPage(parent: MasterWebUI) extends WebUIPage("") {
private val master = parent.masterEndpointRef
def getMasterState: MasterStateResponse = {
master.askWithRetry[MasterStateResponse](RequestMasterState)
}
override def renderJson(request: HttpServletRequest): JValue = {
JsonProtocol.writeMasterState(getMasterState)
}
def handleAppKillRequest(request: HttpServletRequest): Unit = {
handleKillRequest(request, id => {
parent.master.idToApp.get(id).foreach { app =>
parent.master.removeApplication(app, ApplicationState.KILLED)
}
})
}
def handleDriverKillRequest(request: HttpServletRequest): Unit = {
handleKillRequest(request, id => {
master.ask[KillDriverResponse](RequestKillDriver(id))
})
}
private def handleKillRequest(request: HttpServletRequest, action: String => Unit): Unit = {
if (parent.killEnabled &&
parent.master.securityMgr.checkModifyPermissions(request.getRemoteUser)) {
val killFlag = Option(request.getParameter("terminate")).getOrElse("false").toBoolean
val id = Option(request.getParameter("id"))
if (id.isDefined && killFlag) {
action(id.get)
}
Thread.sleep(100)
}
}
/** Index view listing applications and executors
* ็ดขๅผ่งๅพๅๅบๅบ็จ็จๅบๅๆง่ก็จๅบ*/
def render(request: HttpServletRequest): Seq[Node] = {
val state = getMasterState
val workerHeaders = Seq("Worker Id", "Address", "State", "Cores", "Memory")
val workers = state.workers.sortBy(_.id)
val aliveWorkers = state.workers.filter(_.state == WorkerState.ALIVE)
val workerTable = UIUtils.listingTable(workerHeaders, workerRow, workers)
val appHeaders = Seq("Application ID", "Name", "Cores", "Memory per Node", "Submitted Time",
"User", "State", "Duration")
val activeApps = state.activeApps.sortBy(_.startTime).reverse
val activeAppsTable = UIUtils.listingTable(appHeaders, appRow, activeApps)
val completedApps = state.completedApps.sortBy(_.endTime).reverse
val completedAppsTable = UIUtils.listingTable(appHeaders, appRow, completedApps)
val driverHeaders = Seq("Submission ID", "Submitted Time", "Worker", "State", "Cores",
"Memory", "Main Class")
val activeDrivers = state.activeDrivers.sortBy(_.startTime).reverse
val activeDriversTable = UIUtils.listingTable(driverHeaders, driverRow, activeDrivers)
val completedDrivers = state.completedDrivers.sortBy(_.startTime).reverse
val completedDriversTable = UIUtils.listingTable(driverHeaders, driverRow, completedDrivers)
// For now we only show driver information if the user has submitted drivers to the cluster.
// This is until we integrate the notion of drivers and applications in the UI.
//็ฐๅจ๏ผๅฆๆ็จๆทๅทฒๅฐ้ฉฑๅจ็จๅบๆไบค็ป็พค้๏ผๆไปฌๅชไผๆพ็คบ้ฉฑๅจ็จๅบไฟกๆฏใ
//็ดๅฐๆไปฌๅฐ้ฉฑๅจ็จๅบๅๅบ็จ็จๅบ็ๆฆๅฟต้ๆๅฐUIไธญใ
def hasDrivers: Boolean = activeDrivers.length > 0 || completedDrivers.length > 0
val content =
<div class="row-fluid">
<div class="span12">
<ul class="unstyled">
<li><strong>URL:</strong> {state.uri}</li>
{
state.restUri.map { uri =>
<li>
<strong>REST URL:</strong> {uri}
<span class="rest-uri"> (cluster mode)</span>
</li>
}.getOrElse { Seq.empty }
}
<li><strong>Alive Workers:</strong> {aliveWorkers.size}</li>
<li><strong>Cores in use:</strong> {aliveWorkers.map(_.cores).sum} Total,
{aliveWorkers.map(_.coresUsed).sum} Used</li>
<li><strong>Memory in use:</strong>
{Utils.megabytesToString(aliveWorkers.map(_.memory).sum)} Total,
{Utils.megabytesToString(aliveWorkers.map(_.memoryUsed).sum)} Used</li>
<li><strong>Applications:</strong>
{state.activeApps.size} Running,
{state.completedApps.size} Completed </li>
<li><strong>Drivers:</strong>
{state.activeDrivers.size} Running,
{state.completedDrivers.size} Completed </li>
<li><strong>Status:</strong> {state.status}</li>
</ul>
</div>
</div>
<div class="row-fluid">
<div class="span12">
<h4> Workers </h4>
{workerTable}
</div>
</div>
<div class="row-fluid">
<div class="span12">
<h4> Running Applications </h4>
{activeAppsTable}
</div>
</div>
<div>
{if (hasDrivers) {
<div class="row-fluid">
<div class="span12">
<h4> Running Drivers </h4>
{activeDriversTable}
</div>
</div>
}
}
</div>
<div class="row-fluid">
<div class="span12">
<h4> Completed Applications </h4>
{completedAppsTable}
</div>
</div>
<div>
{
if (hasDrivers) {
<div class="row-fluid">
<div class="span12">
<h4> Completed Drivers </h4>
{completedDriversTable}
</div>
</div>
}
}
</div>;
UIUtils.basicSparkPage(content, "Spark Master at " + state.uri)
}
private def workerRow(worker: WorkerInfo): Seq[Node] = {
<tr>
<td>
<a href={worker.webUiAddress}>{worker.id}</a>
</td>
<td>{worker.host}:{worker.port}</td>
<td>{worker.state}</td>
<td>{worker.cores} ({worker.coresUsed} Used)</td>
<td sorttable_customkey={"%s.%s".format(worker.memory, worker.memoryUsed)}>
{Utils.megabytesToString(worker.memory)}
({Utils.megabytesToString(worker.memoryUsed)} Used)
</td>
</tr>
}
private def appRow(app: ApplicationInfo): Seq[Node] = {
val killLink = if (parent.killEnabled &&
(app.state == ApplicationState.RUNNING || app.state == ApplicationState.WAITING)) {
val confirm =
s"if (window.confirm('Are you sure you want to kill application ${app.id} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
<form action="app/kill/" method="POST" style="display:inline">
<input type="hidden" name="id" value={app.id.toString}/>
<input type="hidden" name="terminate" value="true"/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
}
<tr>
<td>
<a href={"app?appId=" + app.id}>{app.id}</a>
{killLink}
</td>
<td>
<a href={app.desc.appUiUrl}>{app.desc.name}</a>
</td>
<td>
{app.coresGranted}
</td>
<td sorttable_customkey={app.desc.memoryPerExecutorMB.toString}>
{Utils.megabytesToString(app.desc.memoryPerExecutorMB)}
</td>
<td>{UIUtils.formatDate(app.submitDate)}</td>
<td>{app.desc.user}</td>
<td>{app.state.toString}</td>
<td>{UIUtils.formatDuration(app.duration)}</td>
</tr>
}
private def driverRow(driver: DriverInfo): Seq[Node] = {
val killLink = if (parent.killEnabled &&
(driver.state == DriverState.RUNNING ||
driver.state == DriverState.SUBMITTED ||
driver.state == DriverState.RELAUNCHING)) {
val confirm =
s"if (window.confirm('Are you sure you want to kill driver ${driver.id} ?')) " +
"{ this.parentNode.submit(); return true; } else { return false; }"
<form action="driver/kill/" method="POST" style="display:inline">
<input type="hidden" name="id" value={driver.id.toString}/>
<input type="hidden" name="terminate" value="true"/>
<a href="#" onclick={confirm} class="kill-link">(kill)</a>
</form>
}
<tr>
<td>{driver.id} {killLink}</td>
<td>{driver.submitDate}</td>
<td>{driver.worker.map(w => <a href={w.webUiAddress}>{w.id.toString}</a>).getOrElse("None")}
</td>
<td>{driver.state}</td>
<td sorttable_customkey={driver.desc.cores.toString}>
{driver.desc.cores}
</td>
<td sorttable_customkey={driver.desc.mem.toString}>
{Utils.megabytesToString(driver.desc.mem.toLong)}
</td>
<td>{driver.desc.command.arguments(2)}</td>
</tr>
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/deploy/master/ui/MasterPage.scala | Scala | apache-2.0 | 9,745 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.catalyst.expressions.{BoundReference, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector}
/**
* Helper trait for abstracting scan functionality using [[ColumnarBatch]]es.
*/
private[sql] trait ColumnarBatchScan extends CodegenSupport {
def vectorTypes: Option[Seq[String]] = None
protected def supportsBatch: Boolean = true
protected def needsUnsafeRowConversion: Boolean = true
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"scanTime" -> SQLMetrics.createTimingMetric(sparkContext, "scan time"))
/**
* Generate [[ColumnVector]] expressions for our parent to consume as rows.
* This is called once per [[ColumnarBatch]].
*/
private def genCodeColumnVector(
ctx: CodegenContext,
columnVar: String,
ordinal: String,
dataType: DataType,
nullable: Boolean): ExprCode = {
val javaType = CodeGenerator.javaType(dataType)
val value = CodeGenerator.getValueFromVector(columnVar, dataType, ordinal)
val isNullVar = if (nullable) { ctx.freshName("isNull") } else { "false" }
val valueVar = ctx.freshName("value")
val str = s"columnVector[$columnVar, $ordinal, ${dataType.simpleString}]"
val code = s"${ctx.registerComment(str)}\\n" + (if (nullable) {
s"""
boolean $isNullVar = $columnVar.isNullAt($ordinal);
$javaType $valueVar = $isNullVar ? ${CodeGenerator.defaultValue(dataType)} : ($value);
"""
} else {
s"$javaType $valueVar = $value;"
}).trim
ExprCode(code, isNullVar, valueVar)
}
/**
* Produce code to process the input iterator as [[ColumnarBatch]]es.
* This produces an [[UnsafeRow]] for each row in each batch.
*/
// TODO: return ColumnarBatch.Rows instead
override protected def doProduce(ctx: CodegenContext): String = {
// PhysicalRDD always just has one input
val input = ctx.addMutableState("scala.collection.Iterator", "input",
v => s"$v = inputs[0];")
if (supportsBatch) {
produceBatches(ctx, input)
} else {
produceRows(ctx, input)
}
}
private def produceBatches(ctx: CodegenContext, input: String): String = {
// metrics
val numOutputRows = metricTerm(ctx, "numOutputRows")
val scanTimeMetric = metricTerm(ctx, "scanTime")
val scanTimeTotalNs =
ctx.addMutableState(CodeGenerator.JAVA_LONG, "scanTime") // init as scanTime = 0
val columnarBatchClz = classOf[ColumnarBatch].getName
val batch = ctx.addMutableState(columnarBatchClz, "batch")
val idx = ctx.addMutableState(CodeGenerator.JAVA_INT, "batchIdx") // init as batchIdx = 0
val columnVectorClzs = vectorTypes.getOrElse(
Seq.fill(output.indices.size)(classOf[ColumnVector].getName))
val (colVars, columnAssigns) = columnVectorClzs.zipWithIndex.map {
case (columnVectorClz, i) =>
val name = ctx.addMutableState(columnVectorClz, s"colInstance$i")
(name, s"$name = ($columnVectorClz) $batch.column($i);")
}.unzip
val nextBatch = ctx.freshName("nextBatch")
val nextBatchFuncName = ctx.addNewFunction(nextBatch,
s"""
|private void $nextBatch() throws java.io.IOException {
| long getBatchStart = System.nanoTime();
| if ($input.hasNext()) {
| $batch = ($columnarBatchClz)$input.next();
| $numOutputRows.add($batch.numRows());
| $idx = 0;
| ${columnAssigns.mkString("", "\\n", "\\n")}
| }
| $scanTimeTotalNs += System.nanoTime() - getBatchStart;
|}""".stripMargin)
ctx.currentVars = null
val rowidx = ctx.freshName("rowIdx")
val columnsBatchInput = (output zip colVars).map { case (attr, colVar) =>
genCodeColumnVector(ctx, colVar, rowidx, attr.dataType, attr.nullable)
}
val localIdx = ctx.freshName("localIdx")
val localEnd = ctx.freshName("localEnd")
val numRows = ctx.freshName("numRows")
val shouldStop = if (parent.needStopCheck) {
s"if (shouldStop()) { $idx = $rowidx + 1; return; }"
} else {
"// shouldStop check is eliminated"
}
s"""
|if ($batch == null) {
| $nextBatchFuncName();
|}
|while ($batch != null) {
| int $numRows = $batch.numRows();
| int $localEnd = $numRows - $idx;
| for (int $localIdx = 0; $localIdx < $localEnd; $localIdx++) {
| int $rowidx = $idx + $localIdx;
| ${consume(ctx, columnsBatchInput).trim}
| $shouldStop
| }
| $idx = $numRows;
| $batch = null;
| $nextBatchFuncName();
|}
|$scanTimeMetric.add($scanTimeTotalNs / (1000 * 1000));
|$scanTimeTotalNs = 0;
""".stripMargin
}
private def produceRows(ctx: CodegenContext, input: String): String = {
val numOutputRows = metricTerm(ctx, "numOutputRows")
val row = ctx.freshName("row")
ctx.INPUT_ROW = row
ctx.currentVars = null
// Always provide `outputVars`, so that the framework can help us build unsafe row if the input
// row is not unsafe row, i.e. `needsUnsafeRowConversion` is true.
val outputVars = output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
val inputRow = if (needsUnsafeRowConversion) null else row
s"""
|while ($input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| $numOutputRows.add(1);
| ${consume(ctx, outputVars, inputRow).trim}
| if (shouldStop()) return;
|}
""".stripMargin
}
}
| ioana-delaney/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/ColumnarBatchScan.scala | Scala | apache-2.0 | 6,844 |
package io.flow.reference
import io.flow.reference.data.Currencies.{Eek, Ltl, Lvl}
import io.flow.reference.v0.models.CurrencySymbols
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class CurrenciesSpec extends AnyFunSpec with Matchers {
it("unsupported") {
data.Currencies.unsupported.map(_.iso42173).toSeq.sorted should be(Seq(Ltl, Lvl, Eek).map(_.iso42173).sorted)
}
it("supported") {
data.Currencies.supported.nonEmpty should be(true)
data.Currencies.supported.intersect(data.Currencies.unsupported.toSeq) should be(Nil)
}
it("have unique fields") {
data.Currencies.all.map(_.name) should be(data.Currencies.all.map(_.name).distinct)
data.Currencies.all.map(_.iso42173) should be(data.Currencies.all.map(_.iso42173).distinct)
}
it("be sorted") {
data.Currencies.all.map(_.name) should be(data.Currencies.all.map(_.name).sortBy { _.toLowerCase })
}
it("have codes be in upper case") {
data.Currencies.all.map(_.iso42173) should be(data.Currencies.all.map(_.iso42173.toUpperCase))
}
it("have no blanks") {
data.Currencies.all.find(_.name.trim.isEmpty) should be(None)
data.Currencies.all.find(_.iso42173.trim.isEmpty) should be(None)
}
it("codes in use are defined") {
val all = Seq(
"AED", "AMD", "ARS", "AUD", "BAM", "BGN", "BRL", "BSD", "CAD", "CHF", "CLP", "CNY", "CRC",
"DKK", "DOP", "DZD", "EUR", "FJD", "GBP", "HKD", "ILS", "INR", "JPY", "KRW", "KWD", "MXN", "NOK",
"NZD", "PHP", "PLN", "RUB", "SAR", "SEK", "SGD", "THB", "TND", "TWD", "USD", "XCD", "XOF", "ZAR"
)
all.filter { code =>
Currencies.find(code).isEmpty
} should be(Nil)
}
it("have common currencies defined") {
val usd = data.Currencies.all.find(_.iso42173 == "USD").getOrElse {
sys.error("USD missing")
}
usd.name should be("US Dollars")
usd.numberDecimals should be(2)
val canada = data.Currencies.all.find(_.iso42173 == "CAD").getOrElse {
sys.error("CAD missing")
}
canada.name should be("Canadian Dollar")
canada.numberDecimals should be(2)
val japan = data.Currencies.all.find(_.iso42173 == "JPY").getOrElse {
sys.error("JPY missing")
}
japan.name should be("Japanese Yen")
japan.numberDecimals should be(0)
}
it("find") {
Seq("usd", "USD", " usd ", "us dollars").foreach { name =>
Currencies.find(name).getOrElse {
sys.error(s"$name missing")
}
}
Currencies.find("other") should be(None)
}
it("mustFind") {
Seq("usd", "USD", " usd ", "us dollars").foreach { name =>
Currencies.mustFind(name).iso42173 should be("USD")
}
intercept[Throwable] {
Currencies.mustFind("other")
}.getMessage should be("The following currency is invalid: [other]. See https://api.flow.io/reference/currencies for a list of all valid currencies.")
}
it("should successfully validate an empty list of currencies") {
Currencies.validate(Nil) should be(Right(Nil))
}
it("should successfully validate all valid currencies") {
Currencies.validate(data.Currencies.all.map(_.iso42173)) should be(Right(data.Currencies.all))
}
it("should return a grammatically correct error for a single invalid currency") {
Currencies.validate(Seq("invalid")) should be(Left(List("The following currency is invalid: [invalid]. See https://api.flow.io/reference/currencies for a list of all valid currencies.")))
Currencies.validateSingle("invalid") should be(Left("The following currency is invalid: [invalid]. See https://api.flow.io/reference/currencies for a list of all valid currencies."))
}
it("should return a grammatically correct error for multiple invalid currencies") {
Currencies.validate(Seq("totally invalid", "seriously bad", "not a currency")) should be(Left(List("The following currencies are invalid: [totally invalid], [seriously bad], [not a currency]. See https://api.flow.io/reference/currencies for a list of all valid currencies.")))
}
it("should return an error validating a list containing both valid and invalid currencies") {
Currencies.validate(Seq("USD", "invalid")) should be(Left(List("The following currency is invalid: [invalid]. See https://api.flow.io/reference/currencies for a list of all valid currencies.")))
}
it("numbers") {
data.Currencies.Usd.symbols should be(
Some(CurrencySymbols(primary = "US$", narrow = Some("$")))
)
data.Currencies.Aud.symbols should be(
Some(CurrencySymbols(primary = "A$", narrow = Some("$")))
)
data.Currencies.Eur.symbols should be(
Some(CurrencySymbols(primary = "โฌ", narrow = Some("โฌ")))
)
data.Currencies.Gbp.symbols should be(
Some(CurrencySymbols(primary = "ยฃ", narrow = Some("ยฃ")))
)
data.Currencies.Jpy.symbols should be(
Some(CurrencySymbols(primary = "ยฅ", narrow = Some("ยฅ")))
)
}
it("defaultLocale") {
data.Currencies.Usd.defaultLocale should be(Some("en-US"))
data.Currencies.Aud.defaultLocale should be(Some("en-AU"))
data.Currencies.Eur.defaultLocale should be(Some("de"))
data.Currencies.Gbp.defaultLocale should be(Some("en-GB"))
data.Currencies.Jpy.defaultLocale should be(Some("ja-JP"))
}
}
| flowcommerce/lib-reference-scala | src/test/scala/io/flow/reference/CurrenciesSpec.scala | Scala | mit | 5,274 |
package org.pigsaw.ccpm
/* Copyright Nik Silver 2015.
*
* This file is part of CCPM.
*
* CCPM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* CCPM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with CCPM. If not, see <http://www.gnu.org/licenses/>.
*/
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class RippleAdjusterTest extends FlatSpec with Matchers {
// The "linear shunt" problem is this:
// There is a string of letters and "."s. A letter represents a piece, and
// a "." represents a space. We want to move an letter along the string
// a given number of places rightwards. But it can only move into a space.
// However, we can move another letter along if it's in the way.
// No letter can move beyond the end of the string.
/**
* Description of the desire to move the letter at index `index`
* a given number of `steps`.
*/
case class LinShMove(index: Int, steps: Int) extends RippleMove[LinShMove] {
def samePiece(m2: LinShMove): Boolean = (index == m2.index)
def max(m2: LinShMove): LinShMove = Seq(this, m2).maxBy(_.steps)
}
/**
* A `RippleAdjuster` describing the linear shunt problem.
*/
class LinShRippleAdjuster extends RippleAdjuster[String,LinShMove] {
def attempt(board: String, m: LinShMove): Seq[Attempt[LinShMove]] = {
val maxIndex = board.size - 1
val availableSteps = ((board drop (m.index+1)) takeWhile { _ == '.' }).length
val availableIndex = m.index + availableSteps
if (m.steps <= availableSteps) {
// We can comfortably make the move
Seq(Actual(LinShMove(m.index, m.steps)))
} else if (m.index == maxIndex) {
// We cannot move
Seq()
} else if (availableIndex == maxIndex) {
// We can move, but only to the end of the board
Seq(Actual(LinShMove(m.index, availableSteps)))
} else {
// We're stopped from moving all the way by another piece
// so we have a prerequisite of moving that the remaining number of steps
val prereqLetterIdx = availableIndex + 1
val prereqSteps = Math.min(maxIndex - prereqLetterIdx, m.steps - availableSteps)
Seq(Prerequisite(LinShMove(prereqLetterIdx, prereqSteps)))
}
}
def move(board: String, m: LinShMove): (String, LinShMove) = {
val letter = board(m.index)
val availableSteps = ((board drop (m.index+1)) takeWhile { _ == '.' }).length
val actualSteps = Math.min(m.steps, availableSteps)
val result = board.updated(m.index, ".").updated(m.index + actualSteps, letter)
(result.mkString, LinShMove(m.index, actualSteps))
}
}
"solve" should "solve a simple one-step problem (1)" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(0, 1)
ra.solve("x.", move) should equal (".x")
}
it should "solve a simple one-step problem (2 - to avoid faking)" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(0, 1)
ra.solve("x..", move) should equal (".x.")
}
it should "solve a simple one-step problem with a different letter" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(0, 1)
ra.solve("y..", move) should equal (".y.")
}
it should "solve a simple one-step problem with a different kind of move" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(0, 2)
ra.solve("x...", move) should equal ("..x.")
}
it should "complete even if move is impossible" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(3, 1)
ra.solve("...x", move) should equal ("...x")
}
it should "ripple prerequisites once" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 2)
ra.solve("..ab..", move) should equal ("....ab")
}
it should "ripple prerequisites twice" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 1)
ra.solve("..abc.", move) should equal ("...abc")
}
it should "ripple prerequisites many times" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 2)
ra.solve("..abc.d..", move) should equal ("....abcd.")
}
it should "solve with a partial solution, with rippling, if necessary" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 4)
ra.solve("..abc.d..", move) should equal (".....abcd")
}
it should "solve with a partial solution, even if one end move is impossible" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 3)
ra.solve("..abc.d", move) should equal ("...abcd")
}
it should "return the same state if any rippling of moves is impossible" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(3, 2)
ra.solve("..abcde", move) should equal ("..abcde")
}
"LinShRippleAdjuster.attempt" should "return a prerequisite if necessary (1)" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(1, 1)
ra.attempt(".ab.", move) should equal (Seq(Prerequisite(LinShMove(2, 1))))
}
it should "return a prerequisite if necessary (2 - to avoid faking)" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 1)
ra.attempt("..ab..", move) should equal (Seq(Prerequisite(LinShMove(3, 1))))
}
it should "return a prerequisite if necessary (3 - to avoid faking again)" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 2)
ra.attempt("..ab..", move) should equal (Seq(Prerequisite(LinShMove(3, 2))))
}
it should "require later pieces move forward if current piece can only make it part-way" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(1, 2)
ra.attempt(".a.b.", move) should equal (Seq(Prerequisite(LinShMove(3, 1))))
}
it should "allow the last piece to move only part-way if necessary" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(1, 3)
ra.attempt(".a.b.", move) should equal (Seq(Prerequisite(LinShMove(3, 1))))
}
it should "not require a letter to exist beyond the end of the board" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(1, 3)
ra.attempt(".a.", move) should equal (Seq(Actual(LinShMove(1,1))))
}
it should "return an empty sequence if no move is possible" in {
val ra = new LinShRippleAdjuster
val move = LinShMove(2, 1)
ra.attempt("..a", move) should equal (Seq())
}
}
| niksilver/ccpm | src/test/scala/org/pigsaw/ccpm/RippleAdjusterTest.scala | Scala | gpl-3.0 | 6,835 |
package epam.bdcc_app.json
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.annotation.JsonProperty
import lombok.Data
@Data
@JsonIgnoreProperties(ignoreUnknown = true)
class Vk_follows {
@JsonProperty("count")
val count = 0L
@JsonProperty("items")
val items: List[Long] = List.empty;
}
| mkasatkin/bdcc_app.vk_samza | src/main/scala/epam/bdcc_app/json/Vk_follows.scala | Scala | apache-2.0 | 349 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.poller
import akka.actor.Actor
import akka.actor.ActorRef
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.headers._
import akka.stream.Materializer
import com.netflix.atlas.akka.AccessLogger
import com.netflix.atlas.akka.CustomMediaTypes
import com.netflix.atlas.core.model.Datapoint
import com.netflix.atlas.json.Json
import com.netflix.atlas.poller.Messages.MetricsPayload
import com.netflix.spectator.api.Id
import com.netflix.spectator.api.Registry
import com.netflix.spectator.impl.AsciiSet
import com.typesafe.config.Config
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
/**
* Sink for the poller data that publishes the metrics to Atlas. The actor expects
* [[Messages.MetricsPayload]] messages and does not send a response. Failures will
* be logged and reflected in the `atlas.client.dropped` counter as well as standard
* client access logging.
*/
class ClientActor(registry: Registry, config: Config, implicit val materializer: Materializer)
extends Actor {
private implicit val xc = scala.concurrent.ExecutionContext.global
private val logger = LoggerFactory.getLogger(getClass)
private val uri = config.getString("uri")
private val batchSize = config.getInt("batch-size")
private val shouldSendAck = config.getBoolean("send-ack")
private val validTagChars = AsciiSet.fromPattern(config.getString("valid-tag-characters"))
private val validTagValueChars = {
import scala.jdk.CollectionConverters._
config
.getConfigList("valid-tag-value-characters")
.asScala
.map { cfg =>
cfg.getString("key") -> AsciiSet.fromPattern(cfg.getString("value"))
}
.toMap
}
private val datapointsSent = registry.counter("atlas.client.sent")
private val datapointsDropped = registry.createId("atlas.client.dropped")
def receive: Receive = {
case MetricsPayload(_, ms) =>
val responder = sender()
datapointsSent.increment(ms.size)
ms.grouped(batchSize).foreach { batch =>
val msg = MetricsPayload(metrics = batch.map(fixTags))
post(msg).onComplete {
case Success(response) => handleResponse(responder, response, batch.size)
case Failure(t) => handleFailure(responder, t, batch.size)
}
}
}
private def fixTags(d: Datapoint): Datapoint = {
val tags = d.tags.map {
case (k, v) =>
val nk = validTagChars.replaceNonMembers(k, '_')
val nv = validTagValueChars.getOrElse(nk, validTagChars).replaceNonMembers(v, '_')
nk -> nv
}
d.copy(tags = tags)
}
private def post(data: MetricsPayload): Future[HttpResponse] = {
post(Json.smileEncode(data))
}
/**
* Encode the data and start post to atlas. Method is protected to allow for
* easier testing.
*/
protected def post(data: Array[Byte]): Future[HttpResponse] = {
val request = HttpRequest(
HttpMethods.POST,
uri = uri,
headers = ClientActor.headers,
entity = HttpEntity(CustomMediaTypes.`application/x-jackson-smile`, data)
)
val accessLogger = AccessLogger.newClientLogger("atlas_publish", request)
Http()(context.system).singleRequest(request).andThen { case t => accessLogger.complete(t) }
}
private def handleResponse(responder: ActorRef, response: HttpResponse, size: Int): Unit = {
response.status.intValue match {
case 200 => // All is well
response.discardEntityBytes()
case 202 => // Partial failure
val id = datapointsDropped.withTag("id", "PartialFailure")
incrementFailureCount(id, response, size)
case 400 => // Bad message, all data dropped
val id = datapointsDropped.withTag("id", "CompleteFailure")
incrementFailureCount(id, response, size)
case v => // Unexpected, assume all dropped
response.discardEntityBytes()
val id = datapointsDropped.withTag("id", s"Status_$v")
registry.counter(id).increment(size)
}
if (shouldSendAck) responder ! Messages.Ack
}
private def incrementFailureCount(id: Id, response: HttpResponse, size: Int): Unit = {
response.entity.dataBytes.runReduce(_ ++ _).onComplete {
case Success(bs) =>
val msg = Json.decode[Messages.FailureResponse](bs.toArray)
msg.message.headOption.foreach { reason =>
logger.warn("failed to validate some datapoints, first reason: {}", reason)
}
registry.counter(id).increment(msg.errorCount)
case Failure(_) =>
registry.counter(id).increment(size)
}
}
private def handleFailure(responder: ActorRef, t: Throwable, size: Int): Unit = {
val id = datapointsDropped.withTag("id", t.getClass.getSimpleName)
registry.counter(id).increment(size)
if (shouldSendAck) responder ! Messages.Ack
}
}
object ClientActor {
private val gzip = HttpEncodingRange(HttpEncodings.gzip)
private val headers = List(`Accept-Encoding`(gzip), Accept(MediaTypes.`application/json`))
}
| Netflix-Skunkworks/iep-apps | atlas-cloudwatch/src/main/scala/com/netflix/atlas/poller/ClientActor.scala | Scala | apache-2.0 | 5,876 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.vectorized
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Random
import org.apache.arrow.vector.IntVector
import org.apache.spark.SparkFunSuite
import org.apache.spark.memory.MemoryMode
import org.apache.spark.sql.{RandomDataGenerator, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.arrow.ArrowUtils
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.{ArrowColumnVector, ColumnarBatch, ColumnVector}
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.types.CalendarInterval
class ColumnarBatchSuite extends SparkFunSuite {
private def allocate(capacity: Int, dt: DataType, memMode: MemoryMode): WritableColumnVector = {
if (memMode == MemoryMode.OFF_HEAP) {
new OffHeapColumnVector(capacity, dt)
} else {
new OnHeapColumnVector(capacity, dt)
}
}
private def testVector(
name: String,
size: Int,
dt: DataType)(
block: WritableColumnVector => Unit): Unit = {
test(name) {
Seq(MemoryMode.ON_HEAP, MemoryMode.OFF_HEAP).foreach { mode =>
val vector = allocate(size, dt, mode)
try block(vector) finally {
vector.close()
}
}
}
}
testVector("Null APIs", 1024, IntegerType) {
column =>
val reference = mutable.ArrayBuffer.empty[Boolean]
var idx = 0
assert(!column.hasNull)
assert(column.numNulls() == 0)
column.appendNotNull()
reference += false
assert(!column.hasNull)
assert(column.numNulls() == 0)
column.appendNotNulls(3)
(1 to 3).foreach(_ => reference += false)
assert(!column.hasNull)
assert(column.numNulls() == 0)
column.appendNull()
reference += true
assert(column.hasNull)
assert(column.numNulls() == 1)
column.appendNulls(3)
(1 to 3).foreach(_ => reference += true)
assert(column.hasNull)
assert(column.numNulls() == 4)
idx = column.elementsAppended
column.putNotNull(idx)
reference += false
idx += 1
assert(column.hasNull)
assert(column.numNulls() == 4)
column.putNull(idx)
reference += true
idx += 1
assert(column.hasNull)
assert(column.numNulls() == 5)
column.putNulls(idx, 3)
reference += true
reference += true
reference += true
idx += 3
assert(column.hasNull)
assert(column.numNulls() == 8)
column.putNotNulls(idx, 4)
reference += false
reference += false
reference += false
reference += false
idx += 4
assert(column.hasNull)
assert(column.numNulls() == 8)
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.isNullAt(v._2))
}
}
testVector("Byte APIs", 1024, ByteType) {
column =>
val reference = mutable.ArrayBuffer.empty[Byte]
var values = (10 :: 20 :: 30 :: 40 :: 50 :: Nil).map(_.toByte).toArray
column.appendBytes(2, values, 0)
reference += 10.toByte
reference += 20.toByte
column.appendBytes(3, values, 2)
reference += 30.toByte
reference += 40.toByte
reference += 50.toByte
column.appendBytes(6, 60.toByte)
(1 to 6).foreach(_ => reference += 60.toByte)
column.appendByte(70.toByte)
reference += 70.toByte
var idx = column.elementsAppended
values = (1 :: 2 :: 3 :: 4 :: 5 :: Nil).map(_.toByte).toArray
column.putBytes(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putBytes(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
column.putByte(idx, 9)
reference += 9
idx += 1
column.putBytes(idx, 3, 4)
reference += 4
reference += 4
reference += 4
idx += 3
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getByte(v._2), "VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Short APIs", 1024, ShortType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Short]
var values = (10 :: 20 :: 30 :: 40 :: 50 :: Nil).map(_.toShort).toArray
column.appendShorts(2, values, 0)
reference += 10.toShort
reference += 20.toShort
column.appendShorts(3, values, 2)
reference += 30.toShort
reference += 40.toShort
reference += 50.toShort
column.appendShorts(6, 60.toShort)
(1 to 6).foreach(_ => reference += 60.toShort)
column.appendShort(70.toShort)
reference += 70.toShort
var idx = column.elementsAppended
values = (1 :: 2 :: 3 :: 4 :: 5 :: Nil).map(_.toShort).toArray
column.putShorts(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putShorts(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
column.putShort(idx, 9)
reference += 9
idx += 1
column.putShorts(idx, 3, 4)
reference += 4
reference += 4
reference += 4
idx += 3
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextInt().toShort
column.putShort(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
val v = (n + 1).toShort
column.putShorts(idx, n, v)
var i = 0
while (i < n) {
reference += v
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getShort(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Int APIs", 1024, IntegerType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Int]
var values = (10 :: 20 :: 30 :: 40 :: 50 :: Nil).toArray
column.appendInts(2, values, 0)
reference += 10
reference += 20
column.appendInts(3, values, 2)
reference += 30
reference += 40
reference += 50
column.appendInts(6, 60)
(1 to 6).foreach(_ => reference += 60)
column.appendInt(70)
reference += 70
var idx = column.elementsAppended
values = (1 :: 2 :: 3 :: 4 :: 5 :: Nil).toArray
column.putInts(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putInts(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
val littleEndian = new Array[Byte](8)
littleEndian(0) = 7
littleEndian(1) = 1
littleEndian(4) = 6
littleEndian(6) = 1
column.putIntsLittleEndian(idx, 1, littleEndian, 4)
column.putIntsLittleEndian(idx + 1, 1, littleEndian, 0)
reference += 6 + (1 << 16)
reference += 7 + (1 << 8)
idx += 2
column.putIntsLittleEndian(idx, 2, littleEndian, 0)
reference += 7 + (1 << 8)
reference += 6 + (1 << 16)
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextInt()
column.putInt(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
column.putInts(idx, n, n + 1)
var i = 0
while (i < n) {
reference += (n + 1)
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getInt(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Long APIs", 1024, LongType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Long]
var values = (10L :: 20L :: 30L :: 40L :: 50L :: Nil).toArray
column.appendLongs(2, values, 0)
reference += 10L
reference += 20L
column.appendLongs(3, values, 2)
reference += 30L
reference += 40L
reference += 50L
column.appendLongs(6, 60L)
(1 to 6).foreach(_ => reference += 60L)
column.appendLong(70L)
reference += 70L
var idx = column.elementsAppended
values = (1L :: 2L :: 3L :: 4L :: 5L :: Nil).toArray
column.putLongs(idx, 2, values, 0)
reference += 1
reference += 2
idx += 2
column.putLongs(idx, 3, values, 2)
reference += 3
reference += 4
reference += 5
idx += 3
val littleEndian = new Array[Byte](16)
littleEndian(0) = 7
littleEndian(1) = 1
littleEndian(8) = 6
littleEndian(10) = 1
column.putLongsLittleEndian(idx, 1, littleEndian, 8)
column.putLongsLittleEndian(idx + 1, 1, littleEndian, 0)
reference += 6 + (1 << 16)
reference += 7 + (1 << 8)
idx += 2
column.putLongsLittleEndian(idx, 2, littleEndian, 0)
reference += 7 + (1 << 8)
reference += 6 + (1 << 16)
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextLong()
column.putLong(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
column.putLongs(idx, n, n + 1)
var i = 0
while (i < n) {
reference += (n + 1)
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getLong(v._2), "idx=" + v._2 +
" Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Float APIs", 1024, FloatType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Float]
var values = (.1f :: .2f :: .3f :: .4f :: .5f :: Nil).toArray
column.appendFloats(2, values, 0)
reference += .1f
reference += .2f
column.appendFloats(3, values, 2)
reference += .3f
reference += .4f
reference += .5f
column.appendFloats(6, .6f)
(1 to 6).foreach(_ => reference += .6f)
column.appendFloat(.7f)
reference += .7f
var idx = column.elementsAppended
values = (1.0f :: 2.0f :: 3.0f :: 4.0f :: 5.0f :: Nil).toArray
column.putFloats(idx, 2, values, 0)
reference += 1.0f
reference += 2.0f
idx += 2
column.putFloats(idx, 3, values, 2)
reference += 3.0f
reference += 4.0f
reference += 5.0f
idx += 3
val buffer = new Array[Byte](8)
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET, 2.234f)
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET + 4, 1.123f)
if (ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
// Ensure array contains Little Endian floats
val bb = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET, bb.getFloat(0))
Platform.putFloat(buffer, Platform.BYTE_ARRAY_OFFSET + 4, bb.getFloat(4))
}
column.putFloats(idx, 1, buffer, 4)
column.putFloats(idx + 1, 1, buffer, 0)
reference += 1.123f
reference += 2.234f
idx += 2
column.putFloats(idx, 2, buffer, 0)
reference += 2.234f
reference += 1.123f
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextFloat()
column.putFloat(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
val v = random.nextFloat()
column.putFloats(idx, n, v)
var i = 0
while (i < n) {
reference += v
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getFloat(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("Double APIs", 1024, DoubleType) {
column =>
val seed = System.currentTimeMillis()
val random = new Random(seed)
val reference = mutable.ArrayBuffer.empty[Double]
var values = (.1 :: .2 :: .3 :: .4 :: .5 :: Nil).toArray
column.appendDoubles(2, values, 0)
reference += .1
reference += .2
column.appendDoubles(3, values, 2)
reference += .3
reference += .4
reference += .5
column.appendDoubles(6, .6)
(1 to 6).foreach(_ => reference += .6)
column.appendDouble(.7)
reference += .7
var idx = column.elementsAppended
values = (1.0 :: 2.0 :: 3.0 :: 4.0 :: 5.0 :: Nil).toArray
column.putDoubles(idx, 2, values, 0)
reference += 1.0
reference += 2.0
idx += 2
column.putDoubles(idx, 3, values, 2)
reference += 3.0
reference += 4.0
reference += 5.0
idx += 3
val buffer = new Array[Byte](16)
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET, 2.234)
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET + 8, 1.123)
if (ByteOrder.nativeOrder().equals(ByteOrder.BIG_ENDIAN)) {
// Ensure array contains Little Endian doubles
val bb = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET, bb.getDouble(0))
Platform.putDouble(buffer, Platform.BYTE_ARRAY_OFFSET + 8, bb.getDouble(8))
}
column.putDoubles(idx, 1, buffer, 8)
column.putDoubles(idx + 1, 1, buffer, 0)
reference += 1.123
reference += 2.234
idx += 2
column.putDoubles(idx, 2, buffer, 0)
reference += 2.234
reference += 1.123
idx += 2
while (idx < column.capacity) {
val single = random.nextBoolean()
if (single) {
val v = random.nextDouble()
column.putDouble(idx, v)
reference += v
idx += 1
} else {
val n = math.min(random.nextInt(column.capacity / 20), column.capacity - idx)
val v = random.nextDouble()
column.putDoubles(idx, n, v)
var i = 0
while (i < n) {
reference += v
i += 1
}
idx += n
}
}
reference.zipWithIndex.foreach { v =>
assert(v._1 == column.getDouble(v._2),
"Seed = " + seed + " VectorType=" + column.getClass.getSimpleName)
}
}
testVector("String APIs", 7, StringType) {
column =>
val reference = mutable.ArrayBuffer.empty[String]
assert(column.arrayData().elementsAppended == 0)
val str = "string"
column.appendByteArray(str.getBytes(StandardCharsets.UTF_8),
0, str.getBytes(StandardCharsets.UTF_8).length)
reference += str
assert(column.arrayData().elementsAppended == 6)
var idx = column.elementsAppended
val values = ("Hello" :: "abc" :: Nil).toArray
column.putByteArray(idx, values(0).getBytes(StandardCharsets.UTF_8),
0, values(0).getBytes(StandardCharsets.UTF_8).length)
reference += values(0)
idx += 1
assert(column.arrayData().elementsAppended == 11)
column.putByteArray(idx, values(1).getBytes(StandardCharsets.UTF_8),
0, values(1).getBytes(StandardCharsets.UTF_8).length)
reference += values(1)
idx += 1
assert(column.arrayData().elementsAppended == 14)
// Just put llo
val offset = column.putByteArray(idx, values(0).getBytes(StandardCharsets.UTF_8),
2, values(0).getBytes(StandardCharsets.UTF_8).length - 2)
reference += "llo"
idx += 1
assert(column.arrayData().elementsAppended == 17)
// Put the same "ll" at offset. This should not allocate more memory in the column.
column.putArray(idx, offset, 2)
reference += "ll"
idx += 1
assert(column.arrayData().elementsAppended == 17)
// Put a long string
val s = "abcdefghijklmnopqrstuvwxyz"
column.putByteArray(idx, (s + s).getBytes(StandardCharsets.UTF_8))
reference += (s + s)
idx += 1
assert(column.arrayData().elementsAppended == 17 + (s + s).length)
column.putNull(idx)
assert(column.getUTF8String(idx) == null)
idx += 1
reference.zipWithIndex.foreach { v =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
assert(v._1.length == column.getArrayLength(v._2), errMsg)
assert(v._1 == column.getUTF8String(v._2).toString, errMsg)
}
column.reset()
assert(column.arrayData().elementsAppended == 0)
}
testVector("CalendarInterval APIs", 4, CalendarIntervalType) {
column =>
val reference = mutable.ArrayBuffer.empty[CalendarInterval]
val months = column.getChild(0)
val microseconds = column.getChild(1)
assert(months.dataType() == IntegerType)
assert(microseconds.dataType() == LongType)
months.putInt(0, 1)
microseconds.putLong(0, 100)
reference += new CalendarInterval(1, 100)
months.putInt(1, 0)
microseconds.putLong(1, 2000)
reference += new CalendarInterval(0, 2000)
column.putNull(2)
assert(column.getInterval(2) == null)
reference += null
months.putInt(3, 20)
microseconds.putLong(3, 0)
reference += new CalendarInterval(20, 0)
reference.zipWithIndex.foreach { case (v, i) =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
assert(v == column.getInterval(i), errMsg)
if (v == null) assert(column.isNullAt(i), errMsg)
}
column.close()
}
testVector("Int Array", 10, new ArrayType(IntegerType, true)) {
column =>
// Fill the underlying data with all the arrays back to back.
val data = column.arrayData()
var i = 0
while (i < 6) {
data.putInt(i, i)
i += 1
}
// Populate it with arrays [0], [1, 2], null, [], [3, 4, 5]
column.putArray(0, 0, 1)
column.putArray(1, 1, 2)
column.putNull(2)
column.putArray(3, 3, 0)
column.putArray(4, 3, 3)
assert(column.getArray(0).numElements == 1)
assert(column.getArray(1).numElements == 2)
assert(column.isNullAt(2))
assert(column.getArray(2) == null)
assert(column.getArray(3).numElements == 0)
assert(column.getArray(4).numElements == 3)
val a1 = ColumnVectorUtils.toJavaIntArray(column.getArray(0))
val a2 = ColumnVectorUtils.toJavaIntArray(column.getArray(1))
val a3 = ColumnVectorUtils.toJavaIntArray(column.getArray(3))
val a4 = ColumnVectorUtils.toJavaIntArray(column.getArray(4))
assert(a1 === Array(0))
assert(a2 === Array(1, 2))
assert(a3 === Array.empty[Int])
assert(a4 === Array(3, 4, 5))
// Verify the ArrayData get APIs
assert(column.getArray(0).getInt(0) == 0)
assert(column.getArray(1).getInt(0) == 1)
assert(column.getArray(1).getInt(1) == 2)
assert(column.getArray(4).getInt(0) == 3)
assert(column.getArray(4).getInt(1) == 4)
assert(column.getArray(4).getInt(2) == 5)
// Add a longer array which requires resizing
column.reset()
val array = Array(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)
assert(data.capacity == 10)
data.reserve(array.length)
assert(data.capacity == array.length * 2)
data.putInts(0, array.length, array, 0)
column.putArray(0, 0, array.length)
assert(ColumnVectorUtils.toJavaIntArray(column.getArray(0)) === array)
}
test("toArray for primitive types") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode =>
val len = 4
val columnBool = allocate(len, new ArrayType(BooleanType, false), memMode)
val boolArray = Array(false, true, false, true)
boolArray.zipWithIndex.foreach { case (v, i) => columnBool.arrayData.putBoolean(i, v) }
columnBool.putArray(0, 0, len)
assert(columnBool.getArray(0).toBooleanArray === boolArray)
columnBool.close()
val columnByte = allocate(len, new ArrayType(ByteType, false), memMode)
val byteArray = Array[Byte](0, 1, 2, 3)
byteArray.zipWithIndex.foreach { case (v, i) => columnByte.arrayData.putByte(i, v) }
columnByte.putArray(0, 0, len)
assert(columnByte.getArray(0).toByteArray === byteArray)
columnByte.close()
val columnShort = allocate(len, new ArrayType(ShortType, false), memMode)
val shortArray = Array[Short](0, 1, 2, 3)
shortArray.zipWithIndex.foreach { case (v, i) => columnShort.arrayData.putShort(i, v) }
columnShort.putArray(0, 0, len)
assert(columnShort.getArray(0).toShortArray === shortArray)
columnShort.close()
val columnInt = allocate(len, new ArrayType(IntegerType, false), memMode)
val intArray = Array(0, 1, 2, 3)
intArray.zipWithIndex.foreach { case (v, i) => columnInt.arrayData.putInt(i, v) }
columnInt.putArray(0, 0, len)
assert(columnInt.getArray(0).toIntArray === intArray)
columnInt.close()
val columnLong = allocate(len, new ArrayType(LongType, false), memMode)
val longArray = Array[Long](0, 1, 2, 3)
longArray.zipWithIndex.foreach { case (v, i) => columnLong.arrayData.putLong(i, v) }
columnLong.putArray(0, 0, len)
assert(columnLong.getArray(0).toLongArray === longArray)
columnLong.close()
val columnFloat = allocate(len, new ArrayType(FloatType, false), memMode)
val floatArray = Array(0.0F, 1.1F, 2.2F, 3.3F)
floatArray.zipWithIndex.foreach { case (v, i) => columnFloat.arrayData.putFloat(i, v) }
columnFloat.putArray(0, 0, len)
assert(columnFloat.getArray(0).toFloatArray === floatArray)
columnFloat.close()
val columnDouble = allocate(len, new ArrayType(DoubleType, false), memMode)
val doubleArray = Array(0.0, 1.1, 2.2, 3.3)
doubleArray.zipWithIndex.foreach { case (v, i) => columnDouble.arrayData.putDouble(i, v) }
columnDouble.putArray(0, 0, len)
assert(columnDouble.getArray(0).toDoubleArray === doubleArray)
columnDouble.close()
}
}
test("Int Map") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode =>
val column = allocate(10, new MapType(IntegerType, IntegerType, false), memMode)
(0 to 1).foreach { colIndex =>
val data = column.getChild(colIndex)
(0 to 5).foreach {i =>
data.putInt(i, i * (colIndex + 1))
}
}
// Populate it with maps [0->0], [1->2, 2->4], null, [], [3->6, 4->8, 5->10]
column.putArray(0, 0, 1)
column.putArray(1, 1, 2)
column.putNull(2)
assert(column.getMap(2) == null)
column.putArray(3, 3, 0)
column.putArray(4, 3, 3)
assert(column.getMap(0).numElements == 1)
assert(column.getMap(1).numElements == 2)
assert(column.isNullAt(2))
assert(column.getMap(3).numElements == 0)
assert(column.getMap(4).numElements == 3)
val a1 = ColumnVectorUtils.toJavaIntMap(column.getMap(0))
val a2 = ColumnVectorUtils.toJavaIntMap(column.getMap(1))
val a4 = ColumnVectorUtils.toJavaIntMap(column.getMap(3))
val a5 = ColumnVectorUtils.toJavaIntMap(column.getMap(4))
assert(a1.asScala == Map(0 -> 0))
assert(a2.asScala == Map(1 -> 2, 2 -> 4))
assert(a4.asScala == Map())
assert(a5.asScala == Map(3 -> 6, 4 -> 8, 5 -> 10))
column.close()
}
}
testVector(
"Struct Column",
10,
new StructType().add("int", IntegerType).add("double", DoubleType)) { column =>
val c1 = column.getChild(0)
val c2 = column.getChild(1)
assert(c1.dataType() == IntegerType)
assert(c2.dataType() == DoubleType)
c1.putInt(0, 123)
c2.putDouble(0, 3.45)
column.putNull(1)
assert(column.getStruct(1) == null)
c1.putInt(2, 456)
c2.putDouble(2, 5.67)
val s = column.getStruct(0)
assert(s.getInt(0) == 123)
assert(s.getDouble(1) == 3.45)
assert(column.isNullAt(1))
assert(column.getStruct(1) == null)
val s2 = column.getStruct(2)
assert(s2.getInt(0) == 456)
assert(s2.getDouble(1) == 5.67)
}
testVector("Nest Array in Array", 10, new ArrayType(new ArrayType(IntegerType, true), true)) {
column =>
val childColumn = column.arrayData()
val data = column.arrayData().arrayData()
(0 until 6).foreach {
case 3 => data.putNull(3)
case i => data.putInt(i, i)
}
// Arrays in child column: [0], [1, 2], [], [null, 4, 5]
childColumn.putArray(0, 0, 1)
childColumn.putArray(1, 1, 2)
childColumn.putArray(2, 2, 0)
childColumn.putArray(3, 3, 3)
// Arrays in column: [[0]], [[1, 2], []], [[], [null, 4, 5]], null
column.putArray(0, 0, 1)
column.putArray(1, 1, 2)
column.putArray(2, 2, 2)
column.putNull(3)
assert(column.getArray(0).getArray(0).toIntArray() === Array(0))
assert(column.getArray(1).getArray(0).toIntArray() === Array(1, 2))
assert(column.getArray(1).getArray(1).toIntArray() === Array())
assert(column.getArray(2).getArray(0).toIntArray() === Array())
assert(column.getArray(2).getArray(1).isNullAt(0))
assert(column.getArray(2).getArray(1).getInt(1) === 4)
assert(column.getArray(2).getArray(1).getInt(2) === 5)
assert(column.isNullAt(3))
}
private val structType: StructType = new StructType().add("i", IntegerType).add("l", LongType)
testVector(
"Nest Struct in Array",
10,
new ArrayType(structType, true)) { column =>
val data = column.arrayData()
val c0 = data.getChild(0)
val c1 = data.getChild(1)
// Structs in child column: (0, 0), (1, 10), (2, 20), (3, 30), (4, 40), (5, 50)
(0 until 6).foreach { i =>
c0.putInt(i, i)
c1.putLong(i, i * 10)
}
// Arrays in column: [(0, 0), (1, 10)], [(1, 10), (2, 20), (3, 30)],
// [(4, 40), (5, 50)]
column.putArray(0, 0, 2)
column.putArray(1, 1, 3)
column.putArray(2, 4, 2)
assert(column.getArray(0).getStruct(0, 2).toSeq(structType) === Seq(0, 0))
assert(column.getArray(0).getStruct(1, 2).toSeq(structType) === Seq(1, 10))
assert(column.getArray(1).getStruct(0, 2).toSeq(structType) === Seq(1, 10))
assert(column.getArray(1).getStruct(1, 2).toSeq(structType) === Seq(2, 20))
assert(column.getArray(1).getStruct(2, 2).toSeq(structType) === Seq(3, 30))
assert(column.getArray(2).getStruct(0, 2).toSeq(structType) === Seq(4, 40))
assert(column.getArray(2).getStruct(1, 2).toSeq(structType) === Seq(5, 50))
}
testVector(
"Nest Array in Struct",
10,
new StructType()
.add("int", IntegerType)
.add("array", new ArrayType(IntegerType, true))) { column =>
val c0 = column.getChild(0)
val c1 = column.getChild(1)
c0.putInt(0, 0)
c0.putInt(1, 1)
c0.putInt(2, 2)
val c1Child = c1.arrayData()
(0 until 6).foreach { i =>
c1Child.putInt(i, i)
}
// Arrays in c1: [0, 1], [2], [3, 4, 5]
c1.putArray(0, 0, 2)
c1.putArray(1, 2, 1)
c1.putArray(2, 3, 3)
assert(column.getStruct(0).getInt(0) === 0)
assert(column.getStruct(0).getArray(1).toIntArray() === Array(0, 1))
assert(column.getStruct(1).getInt(0) === 1)
assert(column.getStruct(1).getArray(1).toIntArray() === Array(2))
assert(column.getStruct(2).getInt(0) === 2)
assert(column.getStruct(2).getArray(1).toIntArray() === Array(3, 4, 5))
}
private val subSchema: StructType = new StructType()
.add("int", IntegerType)
.add("int", IntegerType)
testVector(
"Nest Struct in Struct",
10,
new StructType().add("int", IntegerType).add("struct", subSchema)) { column =>
val c0 = column.getChild(0)
val c1 = column.getChild(1)
c0.putInt(0, 0)
c0.putInt(1, 1)
c0.putInt(2, 2)
val c1c0 = c1.getChild(0)
val c1c1 = c1.getChild(1)
// Structs in c1: (7, 70), (8, 80), (9, 90)
c1c0.putInt(0, 7)
c1c0.putInt(1, 8)
c1c0.putInt(2, 9)
c1c1.putInt(0, 70)
c1c1.putInt(1, 80)
c1c1.putInt(2, 90)
assert(column.getStruct(0).getInt(0) === 0)
assert(column.getStruct(0).getStruct(1, 2).toSeq(subSchema) === Seq(7, 70))
assert(column.getStruct(1).getInt(0) === 1)
assert(column.getStruct(1).getStruct(1, 2).toSeq(subSchema) === Seq(8, 80))
assert(column.getStruct(2).getInt(0) === 2)
assert(column.getStruct(2).getStruct(1, 2).toSeq(subSchema) === Seq(9, 90))
}
test("ColumnarBatch basic") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode => {
val schema = new StructType()
.add("intCol", IntegerType)
.add("doubleCol", DoubleType)
.add("intCol2", IntegerType)
.add("string", BinaryType)
val capacity = 4 * 1024
val columns = schema.fields.map { field =>
allocate(capacity, field.dataType, memMode)
}
val batch = new ColumnarBatch(columns.toArray)
assert(batch.numCols() == 4)
assert(batch.numRows() == 0)
assert(batch.rowIterator().hasNext == false)
// Add a row [1, 1.1, NULL]
columns(0).putInt(0, 1)
columns(1).putDouble(0, 1.1)
columns(2).putNull(0)
columns(3).putByteArray(0, "Hello".getBytes(StandardCharsets.UTF_8))
batch.setNumRows(1)
// Verify the results of the row.
assert(batch.numCols() == 4)
assert(batch.numRows() == 1)
assert(batch.rowIterator().hasNext == true)
assert(batch.rowIterator().hasNext == true)
assert(columns(0).getInt(0) == 1)
assert(columns(0).isNullAt(0) == false)
assert(columns(1).getDouble(0) == 1.1)
assert(columns(1).isNullAt(0) == false)
assert(columns(2).isNullAt(0) == true)
assert(columns(3).getUTF8String(0).toString == "Hello")
// Verify the iterator works correctly.
val it = batch.rowIterator()
assert(it.hasNext())
val row = it.next()
assert(row.getInt(0) == 1)
assert(row.isNullAt(0) == false)
assert(row.getDouble(1) == 1.1)
assert(row.isNullAt(1) == false)
assert(row.isNullAt(2) == true)
assert(columns(3).getUTF8String(0).toString == "Hello")
assert(it.hasNext == false)
assert(it.hasNext == false)
// Reset and add 3 rows
columns.foreach(_.reset())
// Add rows [NULL, 2.2, 2, "abc"], [3, NULL, 3, ""], [4, 4.4, 4, "world]
columns(0).putNull(0)
columns(1).putDouble(0, 2.2)
columns(2).putInt(0, 2)
columns(3).putByteArray(0, "abc".getBytes(StandardCharsets.UTF_8))
columns(0).putInt(1, 3)
columns(1).putNull(1)
columns(2).putInt(1, 3)
columns(3).putByteArray(1, "".getBytes(StandardCharsets.UTF_8))
columns(0).putInt(2, 4)
columns(1).putDouble(2, 4.4)
columns(2).putInt(2, 4)
columns(3).putByteArray(2, "world".getBytes(StandardCharsets.UTF_8))
batch.setNumRows(3)
def rowEquals(x: InternalRow, y: Row): Unit = {
assert(x.isNullAt(0) == y.isNullAt(0))
if (!x.isNullAt(0)) assert(x.getInt(0) == y.getInt(0))
assert(x.isNullAt(1) == y.isNullAt(1))
if (!x.isNullAt(1)) assert(x.getDouble(1) == y.getDouble(1))
assert(x.isNullAt(2) == y.isNullAt(2))
if (!x.isNullAt(2)) assert(x.getInt(2) == y.getInt(2))
assert(x.isNullAt(3) == y.isNullAt(3))
if (!x.isNullAt(3)) assert(x.getString(3) == y.getString(3))
}
// Verify
assert(batch.numRows() == 3)
val it2 = batch.rowIterator()
rowEquals(it2.next(), Row(null, 2.2, 2, "abc"))
rowEquals(it2.next(), Row(3, null, 3, ""))
rowEquals(it2.next(), Row(4, 4.4, 4, "world"))
assert(!it.hasNext)
batch.close()
}}
}
private def doubleEquals(d1: Double, d2: Double): Boolean = {
if (d1.isNaN && d2.isNaN) {
true
} else {
d1 == d2
}
}
private def compareStruct(fields: Seq[StructField], r1: InternalRow, r2: Row, seed: Long) {
fields.zipWithIndex.foreach { case (field: StructField, ordinal: Int) =>
assert(r1.isNullAt(ordinal) == r2.isNullAt(ordinal), "Seed = " + seed)
if (!r1.isNullAt(ordinal)) {
field.dataType match {
case BooleanType => assert(r1.getBoolean(ordinal) == r2.getBoolean(ordinal),
"Seed = " + seed)
case ByteType => assert(r1.getByte(ordinal) == r2.getByte(ordinal), "Seed = " + seed)
case ShortType => assert(r1.getShort(ordinal) == r2.getShort(ordinal), "Seed = " + seed)
case IntegerType => assert(r1.getInt(ordinal) == r2.getInt(ordinal), "Seed = " + seed)
case LongType => assert(r1.getLong(ordinal) == r2.getLong(ordinal), "Seed = " + seed)
case FloatType => assert(doubleEquals(r1.getFloat(ordinal), r2.getFloat(ordinal)),
"Seed = " + seed)
case DoubleType => assert(doubleEquals(r1.getDouble(ordinal), r2.getDouble(ordinal)),
"Seed = " + seed)
case t: DecimalType =>
val d1 = r1.getDecimal(ordinal, t.precision, t.scale).toBigDecimal
val d2 = r2.getDecimal(ordinal)
assert(d1.compare(d2) == 0, "Seed = " + seed)
case StringType =>
assert(r1.getString(ordinal) == r2.getString(ordinal), "Seed = " + seed)
case CalendarIntervalType =>
assert(r1.getInterval(ordinal) === r2.get(ordinal).asInstanceOf[CalendarInterval])
case ArrayType(childType, n) =>
val a1 = r1.getArray(ordinal).array
val a2 = r2.getList(ordinal).toArray
assert(a1.length == a2.length, "Seed = " + seed)
childType match {
case DoubleType =>
var i = 0
while (i < a1.length) {
assert(doubleEquals(a1(i).asInstanceOf[Double], a2(i).asInstanceOf[Double]),
"Seed = " + seed)
i += 1
}
case FloatType =>
var i = 0
while (i < a1.length) {
assert(doubleEquals(a1(i).asInstanceOf[Float], a2(i).asInstanceOf[Float]),
"Seed = " + seed)
i += 1
}
case t: DecimalType =>
var i = 0
while (i < a1.length) {
assert((a1(i) == null) == (a2(i) == null), "Seed = " + seed)
if (a1(i) != null) {
val d1 = a1(i).asInstanceOf[Decimal].toBigDecimal
val d2 = a2(i).asInstanceOf[java.math.BigDecimal]
assert(d1.compare(d2) == 0, "Seed = " + seed)
}
i += 1
}
case _ => assert(a1 === a2, "Seed = " + seed)
}
case StructType(childFields) =>
compareStruct(childFields, r1.getStruct(ordinal, fields.length),
r2.getStruct(ordinal), seed)
case _ =>
throw new UnsupportedOperationException("Not implemented " + field.dataType)
}
}
}
}
test("Convert rows") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode => {
val rows = Row(1, 2L, "a", 1.2, 'b'.toByte) :: Row(4, 5L, "cd", 2.3, 'a'.toByte) :: Nil
val schema = new StructType()
.add("i1", IntegerType)
.add("l2", LongType)
.add("string", StringType)
.add("d", DoubleType)
.add("b", ByteType)
val batch = ColumnVectorUtils.toBatch(schema, memMode, rows.iterator.asJava)
assert(batch.numRows() == 2)
assert(batch.numCols() == 5)
val it = batch.rowIterator()
val referenceIt = rows.iterator
while (it.hasNext) {
compareStruct(schema, it.next(), referenceIt.next(), 0)
}
batch.close()
}
}}
/**
* This test generates a random schema data, serializes it to column batches and verifies the
* results.
*/
def testRandomRows(flatSchema: Boolean, numFields: Int) {
// TODO: Figure out why StringType doesn't work on jenkins.
val types = Array(
BooleanType, ByteType, FloatType, DoubleType, IntegerType, LongType, ShortType,
DecimalType.ShortDecimal, DecimalType.IntDecimal, DecimalType.ByteDecimal,
DecimalType.FloatDecimal, DecimalType.LongDecimal, new DecimalType(5, 2),
new DecimalType(12, 2), new DecimalType(30, 10), CalendarIntervalType)
val seed = System.nanoTime()
val NUM_ROWS = 200
val NUM_ITERS = 1000
val random = new Random(seed)
var i = 0
while (i < NUM_ITERS) {
val schema = if (flatSchema) {
RandomDataGenerator.randomSchema(random, numFields, types)
} else {
RandomDataGenerator.randomNestedSchema(random, numFields, types)
}
val rows = mutable.ArrayBuffer.empty[Row]
var j = 0
while (j < NUM_ROWS) {
val row = RandomDataGenerator.randomRow(random, schema)
rows += row
j += 1
}
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode => {
val batch = ColumnVectorUtils.toBatch(schema, memMode, rows.iterator.asJava)
assert(batch.numRows() == NUM_ROWS)
val it = batch.rowIterator()
val referenceIt = rows.iterator
var k = 0
while (it.hasNext) {
compareStruct(schema, it.next(), referenceIt.next(), seed)
k += 1
}
batch.close()
}}
i += 1
}
}
test("Random flat schema") {
testRandomRows(true, 15)
}
test("Random nested schema") {
testRandomRows(false, 30)
}
test("exceeding maximum capacity should throw an error") {
(MemoryMode.ON_HEAP :: MemoryMode.OFF_HEAP :: Nil).foreach { memMode =>
val column = allocate(1, ByteType, memMode)
column.MAX_CAPACITY = 15
column.appendBytes(5, 0.toByte)
// Successfully allocate twice the requested capacity
assert(column.capacity == 10)
column.appendBytes(10, 0.toByte)
// Allocated capacity doesn't exceed MAX_CAPACITY
assert(column.capacity == 15)
val ex = intercept[RuntimeException] {
// Over-allocating beyond MAX_CAPACITY throws an exception
column.appendBytes(10, 0.toByte)
}
assert(ex.getMessage.contains(s"Cannot reserve additional contiguous bytes in the " +
s"vectorized reader"))
}
}
test("create columnar batch from Arrow column vectors") {
val allocator = ArrowUtils.rootAllocator.newChildAllocator("int", 0, Long.MaxValue)
val vector1 = ArrowUtils.toArrowField("int1", IntegerType, nullable = true, null)
.createVector(allocator).asInstanceOf[IntVector]
vector1.allocateNew()
val vector2 = ArrowUtils.toArrowField("int2", IntegerType, nullable = true, null)
.createVector(allocator).asInstanceOf[IntVector]
vector2.allocateNew()
(0 until 10).foreach { i =>
vector1.setSafe(i, i)
vector2.setSafe(i + 1, i)
}
vector1.setNull(10)
vector1.setValueCount(11)
vector2.setNull(0)
vector2.setValueCount(11)
val columnVectors = Seq(new ArrowColumnVector(vector1), new ArrowColumnVector(vector2))
val schema = StructType(Seq(StructField("int1", IntegerType), StructField("int2", IntegerType)))
val batch = new ColumnarBatch(columnVectors.toArray)
batch.setNumRows(11)
assert(batch.numCols() == 2)
assert(batch.numRows() == 11)
val rowIter = batch.rowIterator().asScala
rowIter.zipWithIndex.foreach { case (row, i) =>
if (i == 10) {
assert(row.isNullAt(0))
} else {
assert(row.getInt(0) == i)
}
if (i == 0) {
assert(row.isNullAt(1))
} else {
assert(row.getInt(1) == i - 1)
}
}
batch.close()
allocator.close()
}
testVector("Decimal API", 4, DecimalType.IntDecimal) {
column =>
val reference = mutable.ArrayBuffer.empty[Decimal]
var idx = 0
column.putDecimal(idx, new Decimal().set(10), 10)
reference += new Decimal().set(10)
idx += 1
column.putDecimal(idx, new Decimal().set(20), 10)
reference += new Decimal().set(20)
idx += 1
column.putNull(idx)
assert(column.getDecimal(idx, 10, 0) == null)
reference += null
idx += 1
column.putDecimal(idx, new Decimal().set(30), 10)
reference += new Decimal().set(30)
reference.zipWithIndex.foreach { case (v, i) =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
assert(v == column.getDecimal(i, 10, 0), errMsg)
if (v == null) assert(column.isNullAt(i), errMsg)
}
column.close()
}
testVector("Binary APIs", 4, BinaryType) {
column =>
val reference = mutable.ArrayBuffer.empty[String]
var idx = 0
column.putByteArray(idx, "Hello".getBytes(StandardCharsets.UTF_8))
reference += "Hello"
idx += 1
column.putByteArray(idx, "World".getBytes(StandardCharsets.UTF_8))
reference += "World"
idx += 1
column.putNull(idx)
reference += null
idx += 1
column.putByteArray(idx, "abc".getBytes(StandardCharsets.UTF_8))
reference += "abc"
reference.zipWithIndex.foreach { case (v, i) =>
val errMsg = "VectorType=" + column.getClass.getSimpleName
if (v != null) {
assert(v == new String(column.getBinary(i)), errMsg)
} else {
assert(column.isNullAt(i), errMsg)
assert(column.getBinary(i) == null, errMsg)
}
}
column.close()
}
testVector("WritableColumnVector.reserve(): requested capacity is negative", 1024, ByteType) {
column =>
val ex = intercept[RuntimeException] { column.reserve(-1) }
assert(ex.getMessage.contains(
"Cannot reserve additional contiguous bytes in the vectorized reader (integer overflow)"))
}
}
| WindCanDie/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnarBatchSuite.scala | Scala | apache-2.0 | 43,513 |
/*
* Copyright 2015 Databricks Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.sql.perf.tpcds
import com.databricks.spark.sql.perf.{ExecutionMode, Benchmark}
trait ImpalaKitQueries extends Benchmark {
import ExecutionMode._
// Queries are from
// https://github.com/cloudera/impala-tpcds-kit/tree/master/queries-sql92-modified/queries
val queries = Seq(
("q19", """
|-- start query 1 in stream 0 using template query19.tpl
|select
| i_brand_id,
| i_brand,
| i_manufact_id,
| i_manufact,
| sum(ss_ext_sales_price) ext_price
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join customer on (store_sales.ss_customer_sk = customer.c_customer_sk)
| join customer_address on (customer.c_current_addr_sk = customer_address.ca_address_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
|where
| --ss_date between '1999-11-01' and '1999-11-30'
| ss_sold_date_sk between 2451484 and 2451513
| and d_moy = 11
| and d_year = 1999
| and i_manager_id = 7
| and substr(ca_zip, 1, 5) <> substr(s_zip, 1, 5)
|group by
| i_brand,
| i_brand_id,
| i_manufact_id,
| i_manufact
|order by
| ext_price desc,
| i_brand,
| i_brand_id,
| i_manufact_id,
| i_manufact
|limit 100
|-- end query 1 in stream 0 using template query19.tpl
""".stripMargin),
("q27", """
|-- start query 1 in stream 0 using template query27.tpl
|select
| i_item_id,
| s_state,
| -- grouping(s_state) g_state,
| avg(ss_quantity) agg1,
| avg(ss_list_price) agg2,
| avg(ss_coupon_amt) agg3,
| avg(ss_sales_price) agg4
|from
| store_sales
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join customer_demographics on (store_sales.ss_cdemo_sk = customer_demographics.cd_demo_sk)
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
|where
| -- ss_date between '1998-01-01' and '1998-12-31'
| ss_sold_date_sk between 2450815 and 2451179 -- partition key filter
| and d_year = 1998
| and s_state in ('WI', 'CA', 'TX', 'FL', 'WA', 'TN')
| and cd_gender = 'F'
| and cd_marital_status = 'W'
| and cd_education_status = 'Primary'
|group by
| -- rollup(i_item_id, s_state)
| i_item_id,
| s_state
|order by
| i_item_id,
| s_state
|limit 100
|-- end query 1 in stream 0 using template query27.tpl
""".stripMargin),
("q3", """
|-- start query 1 in stream 0 using template query3.tpl
|select
| dt.d_year,
| -- year(ss_date) as d_year,
| -- case
| -- when ss_sold_date_sk between 2451149 and 2451179 then 1998
| -- when ss_sold_date_sk between 2451514 and 2451544 then 1999
| -- when ss_sold_date_sk between 2451880 and 2451910 then 2000
| -- when ss_sold_date_sk between 2452245 and 2452275 then 2001
| -- when ss_sold_date_sk between 2452610 and 2452640 then 2002
| -- end as d_year,
| item.i_brand_id brand_id,
| item.i_brand brand,
| sum(ss_ext_sales_price) sum_agg
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join date_dim dt on (dt.d_date_sk = store_sales.ss_sold_date_sk)
|where
| item.i_manufact_id = 436
| and dt.d_moy = 12
| -- and (ss_date between '1998-12-01' and '1998-12-31'
| -- or ss_date between '1999-12-01' and '1999-12-31'
| -- or ss_date between '2000-12-01' and '2000-12-31'
| -- or ss_date between '2001-12-01' and '2001-12-31'
| -- or ss_date between '2002-12-01' and '2002-12-31')
| and (ss_sold_date_sk between 2451149 and 2451179
| or ss_sold_date_sk between 2451514 and 2451544
| or ss_sold_date_sk between 2451880 and 2451910
| or ss_sold_date_sk between 2452245 and 2452275
| or ss_sold_date_sk between 2452610 and 2452640)
|group by
| d_year,
| item.i_brand,
| item.i_brand_id
|order by
| d_year,
| sum_agg desc,
| brand_id
|-- end query 1 in stream 0 using template query3.tpl
|limit 100
""".stripMargin),
("q34", """
|-- start query 1 in stream 0 using template query34.tpl
|select
| c_last_name,
| c_first_name,
| c_salutation,
| c_preferred_cust_flag,
| ss_ticket_number,
| cnt
|from
| (select
| ss_ticket_number,
| ss_customer_sk,
| count(*) cnt
| from
| store_sales
| join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| date_dim.d_year in (1998, 1998 + 1, 1998 + 2)
| and (date_dim.d_dom between 1 and 3
| or date_dim.d_dom between 25 and 28)
| and (household_demographics.hd_buy_potential = '>10000'
| or household_demographics.hd_buy_potential = 'unknown')
| and household_demographics.hd_vehicle_count > 0
| and (case when household_demographics.hd_vehicle_count > 0 then household_demographics.hd_dep_count / household_demographics.hd_vehicle_count else null end) > 1.2
| and store.s_county in ('Saginaw County', 'Sumner County', 'Appanoose County', 'Daviess County', 'Fairfield County', 'Raleigh County', 'Ziebach County', 'Williamson County')
| and ss_sold_date_sk between 2450816 and 2451910 -- partition key filter
| group by
| ss_ticket_number,
| ss_customer_sk
| ) dn
|join customer on (dn.ss_customer_sk = customer.c_customer_sk)
|where
| cnt between 15 and 20
|order by
| c_last_name,
| c_first_name,
| c_salutation,
| c_preferred_cust_flag desc,
| ss_ticket_number,
| cnt
|limit 1000
|-- end query 1 in stream 0 using template query34.tpl
""".stripMargin),
("q42", """
|-- start query 1 in stream 0 using template query42.tpl
|select
| d_year,
| i_category_id,
| i_category,
| sum(ss_ext_sales_price) as total_price
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join date_dim dt on (dt.d_date_sk = store_sales.ss_sold_date_sk)
|where
| item.i_manager_id = 1
| and dt.d_moy = 12
| and dt.d_year = 1998
| -- and ss_date between '1998-12-01' and '1998-12-31'
| and ss_sold_date_sk between 2451149 and 2451179 -- partition key filter
|group by
| d_year,
| i_category_id,
| i_category
|order by
| -- sum(ss_ext_sales_price) desc,
| total_price desc,
| d_year,
| i_category_id,
| i_category
|limit 100
|-- end query 1 in stream 0 using template query42.tpl
""".stripMargin),
("q43", """
|-- start query 1 in stream 0 using template query43.tpl
|select
| s_store_name,
| s_store_id,
| sum(case when (d_day_name = 'Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when (d_day_name = 'Monday') then ss_sales_price else null end) mon_sales,
| sum(case when (d_day_name = 'Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when (d_day_name = 'Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when (d_day_name = 'Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when (d_day_name = 'Friday') then ss_sales_price else null end) fri_sales,
| sum(case when (d_day_name = 'Saturday') then ss_sales_price else null end) sat_sales
|from
| store_sales
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
|where
| s_gmt_offset = -5
| and d_year = 1998
| -- and ss_date between '1998-01-01' and '1998-12-31'
| and ss_sold_date_sk between 2450816 and 2451179 -- partition key filter
|group by
| s_store_name,
| s_store_id
|order by
| s_store_name,
| s_store_id,
| sun_sales,
| mon_sales,
| tue_sales,
| wed_sales,
| thu_sales,
| fri_sales,
| sat_sales
|limit 100
|-- end query 1 in stream 0 using template query43.tpl
""".stripMargin),
("q46", """
|-- start query 1 in stream 0 using template query46.tpl
|select
| c_last_name,
| c_first_name,
| ca_city,
| bought_city,
| ss_ticket_number,
| amt,
| profit
|from
| (select
| ss_ticket_number,
| ss_customer_sk,
| ca_city bought_city,
| sum(ss_coupon_amt) amt,
| sum(ss_net_profit) profit
| from
| store_sales
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| join customer_address on (store_sales.ss_addr_sk = customer_address.ca_address_sk)
| where
| store.s_city in ('Midway', 'Concord', 'Spring Hill', 'Brownsville', 'Greenville')
| and (household_demographics.hd_dep_count = 5
| or household_demographics.hd_vehicle_count = 3)
| and date_dim.d_dow in (6, 0)
| and date_dim.d_year in (1999, 1999 + 1, 1999 + 2)
| -- and ss_date between '1999-01-01' and '2001-12-31'
| -- and ss_sold_date_sk between 2451180 and 2452275 -- partition key filter
| and ss_sold_date_sk in (2451181, 2451182, 2451188, 2451189, 2451195, 2451196, 2451202, 2451203, 2451209, 2451210, 2451216, 2451217,
| 2451223, 2451224, 2451230, 2451231, 2451237, 2451238, 2451244, 2451245, 2451251, 2451252, 2451258, 2451259,
| 2451265, 2451266, 2451272, 2451273, 2451279, 2451280, 2451286, 2451287, 2451293, 2451294, 2451300, 2451301,
| 2451307, 2451308, 2451314, 2451315, 2451321, 2451322, 2451328, 2451329, 2451335, 2451336, 2451342, 2451343,
| 2451349, 2451350, 2451356, 2451357, 2451363, 2451364, 2451370, 2451371, 2451377, 2451378, 2451384, 2451385,
| 2451391, 2451392, 2451398, 2451399, 2451405, 2451406, 2451412, 2451413, 2451419, 2451420, 2451426, 2451427,
| 2451433, 2451434, 2451440, 2451441, 2451447, 2451448, 2451454, 2451455, 2451461, 2451462, 2451468, 2451469,
| 2451475, 2451476, 2451482, 2451483, 2451489, 2451490, 2451496, 2451497, 2451503, 2451504, 2451510, 2451511,
| 2451517, 2451518, 2451524, 2451525, 2451531, 2451532, 2451538, 2451539, 2451545, 2451546, 2451552, 2451553,
| 2451559, 2451560, 2451566, 2451567, 2451573, 2451574, 2451580, 2451581, 2451587, 2451588, 2451594, 2451595,
| 2451601, 2451602, 2451608, 2451609, 2451615, 2451616, 2451622, 2451623, 2451629, 2451630, 2451636, 2451637,
| 2451643, 2451644, 2451650, 2451651, 2451657, 2451658, 2451664, 2451665, 2451671, 2451672, 2451678, 2451679,
| 2451685, 2451686, 2451692, 2451693, 2451699, 2451700, 2451706, 2451707, 2451713, 2451714, 2451720, 2451721,
| 2451727, 2451728, 2451734, 2451735, 2451741, 2451742, 2451748, 2451749, 2451755, 2451756, 2451762, 2451763,
| 2451769, 2451770, 2451776, 2451777, 2451783, 2451784, 2451790, 2451791, 2451797, 2451798, 2451804, 2451805,
| 2451811, 2451812, 2451818, 2451819, 2451825, 2451826, 2451832, 2451833, 2451839, 2451840, 2451846, 2451847,
| 2451853, 2451854, 2451860, 2451861, 2451867, 2451868, 2451874, 2451875, 2451881, 2451882, 2451888, 2451889,
| 2451895, 2451896, 2451902, 2451903, 2451909, 2451910, 2451916, 2451917, 2451923, 2451924, 2451930, 2451931,
| 2451937, 2451938, 2451944, 2451945, 2451951, 2451952, 2451958, 2451959, 2451965, 2451966, 2451972, 2451973,
| 2451979, 2451980, 2451986, 2451987, 2451993, 2451994, 2452000, 2452001, 2452007, 2452008, 2452014, 2452015,
| 2452021, 2452022, 2452028, 2452029, 2452035, 2452036, 2452042, 2452043, 2452049, 2452050, 2452056, 2452057,
| 2452063, 2452064, 2452070, 2452071, 2452077, 2452078, 2452084, 2452085, 2452091, 2452092, 2452098, 2452099,
| 2452105, 2452106, 2452112, 2452113, 2452119, 2452120, 2452126, 2452127, 2452133, 2452134, 2452140, 2452141,
| 2452147, 2452148, 2452154, 2452155, 2452161, 2452162, 2452168, 2452169, 2452175, 2452176, 2452182, 2452183,
| 2452189, 2452190, 2452196, 2452197, 2452203, 2452204, 2452210, 2452211, 2452217, 2452218, 2452224, 2452225,
| 2452231, 2452232, 2452238, 2452239, 2452245, 2452246, 2452252, 2452253, 2452259, 2452260, 2452266, 2452267,
| 2452273, 2452274)
| group by
| ss_ticket_number,
| ss_customer_sk,
| ss_addr_sk,
| ca_city
| ) dn
| join customer on (dn.ss_customer_sk = customer.c_customer_sk)
| join customer_address current_addr on (customer.c_current_addr_sk = current_addr.ca_address_sk)
|where
| current_addr.ca_city <> bought_city
|order by
| c_last_name,
| c_first_name,
| ca_city,
| bought_city,
| ss_ticket_number
|limit 100
|-- end query 1 in stream 0 using template query46.tpl
""".stripMargin),
("q52", """
|-- start query 1 in stream 0 using template query52.tpl
|select
| d_year,
| i_brand_id,
| i_brand,
| sum(ss_ext_sales_price) ext_price
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join date_dim dt on (store_sales.ss_sold_date_sk = dt.d_date_sk)
|where
| i_manager_id = 1
| and d_moy = 12
| and d_year = 1998
| -- and ss_date between '1998-12-01' and '1998-12-31'
| and ss_sold_date_sk between 2451149 and 2451179 -- partition key filter
|group by
| d_year,
| i_brand,
| i_brand_id
|order by
| d_year,
| ext_price desc,
| i_brand_id
|limit 100
|-- end query 1 in stream 0 using template query52.tpl
""".stripMargin),
("q53", """
|-- start query 1 in stream 0 using template query53.tpl
|select
| *
|from
| (select
| i_manufact_id,
| sum(ss_sales_price) sum_sales
| -- avg(sum(ss_sales_price)) over(partition by i_manufact_id) avg_quarterly_sales
| from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| ss_sold_date_sk between 2451911 and 2452275 -- partition key filter
| -- ss_date between '2001-01-01' and '2001-12-31'
| and d_month_seq in(1212, 1212 + 1, 1212 + 2, 1212 + 3, 1212 + 4, 1212 + 5, 1212 + 6, 1212 + 7, 1212 + 8, 1212 + 9, 1212 + 10, 1212 + 11)
| and (
| (i_category in('Books', 'Children', 'Electronics')
| and i_class in('personal', 'portable', 'reference', 'self-help')
| and i_brand in('scholaramalgamalg #14', 'scholaramalgamalg #7', 'exportiunivamalg #9', 'scholaramalgamalg #9')
| )
| or
| (i_category in('Women', 'Music', 'Men')
| and i_class in('accessories', 'classical', 'fragrances', 'pants')
| and i_brand in('amalgimporto #1', 'edu packscholar #1', 'exportiimporto #1', 'importoamalg #1')
| )
| )
| group by
| i_manufact_id,
| d_qoy
| ) tmp1
|-- where
|-- case when avg_quarterly_sales > 0 then abs(sum_sales - avg_quarterly_sales) / avg_quarterly_sales else null end > 0.1
|order by
| -- avg_quarterly_sales,
| sum_sales,
| i_manufact_id
|limit 100
|-- end query 1 in stream 0 using template query53.tpl
""".stripMargin),
("q55", """
|-- start query 1 in stream 0 using template query55.tpl
|select
| i_brand_id,
| i_brand,
| sum(ss_ext_sales_price) ext_price
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
|where
| i_manager_id = 36
| and d_moy = 12
| and d_year = 2001
| -- and ss_date between '2001-12-01' and '2001-12-31'
| and ss_sold_date_sk between 2452245 and 2452275 -- partition key filter
|group by
| i_brand,
| i_brand_id
|order by
| ext_price desc,
| i_brand_id
|limit 100
|-- end query 1 in stream 0 using template query55.tpl
""".stripMargin),
("q59", """
|-- start query 1 in stream 0 using template query59.tpl
|select
| s_store_name1,
| s_store_id1,
| d_week_seq1,
| sun_sales1 / sun_sales2,
| mon_sales1 / mon_sales2,
| tue_sales1 / tue_sales2,
| wed_sales1 / wed_sales2,
| thu_sales1 / thu_sales2,
| fri_sales1 / fri_sales2,
| sat_sales1 / sat_sales2
|from
| (select
| s_store_name s_store_name1,
| wss.d_week_seq d_week_seq1,
| s_store_id s_store_id1,
| sun_sales sun_sales1,
| mon_sales mon_sales1,
| tue_sales tue_sales1,
| wed_sales wed_sales1,
| thu_sales thu_sales1,
| fri_sales fri_sales1,
| sat_sales sat_sales1
| from
| (select
| d_week_seq,
| ss_store_sk,
| sum(case when(d_day_name = 'Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when(d_day_name = 'Monday') then ss_sales_price else null end) mon_sales,
| sum(case when(d_day_name = 'Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when(d_day_name = 'Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when(d_day_name = 'Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when(d_day_name = 'Friday') then ss_sales_price else null end) fri_sales,
| sum(case when(d_day_name = 'Saturday') then ss_sales_price else null end) sat_sales
| from
| store_sales
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '1998-10-01' and '1999-09-30'
| ss_sold_date_sk between 2451088 and 2451452
| group by
| d_week_seq,
| ss_store_sk
| ) wss
| join store on (wss.ss_store_sk = store.s_store_sk)
| join date_dim d on (wss.d_week_seq = d.d_week_seq)
| where
| d_month_seq between 1185 and 1185 + 11
| ) y
| join
| (select
| s_store_name s_store_name2,
| wss.d_week_seq d_week_seq2,
| s_store_id s_store_id2,
| sun_sales sun_sales2,
| mon_sales mon_sales2,
| tue_sales tue_sales2,
| wed_sales wed_sales2,
| thu_sales thu_sales2,
| fri_sales fri_sales2,
| sat_sales sat_sales2
| from
| (select
| d_week_seq,
| ss_store_sk,
| sum(case when(d_day_name = 'Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when(d_day_name = 'Monday') then ss_sales_price else null end) mon_sales,
| sum(case when(d_day_name = 'Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when(d_day_name = 'Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when(d_day_name = 'Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when(d_day_name = 'Friday') then ss_sales_price else null end) fri_sales,
| sum(case when(d_day_name = 'Saturday') then ss_sales_price else null end) sat_sales
| from
| store_sales
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '1999-10-01' and '2000-09-30'
| ss_sold_date_sk between 2451088 and 2451452
| group by
| d_week_seq,
| ss_store_sk
| ) wss
| join store on (wss.ss_store_sk = store.s_store_sk)
| join date_dim d on (wss.d_week_seq = d.d_week_seq)
| where
| d_month_seq between 1185 + 12 and 1185 + 23
| ) x
| on (y.s_store_id1 = x.s_store_id2)
|where
| d_week_seq1 = d_week_seq2 - 52
|order by
| s_store_name1,
| s_store_id1,
| d_week_seq1
|limit 100
|-- end query 1 in stream 0 using template query59.tpl
""".stripMargin),
("q63", """
|-- start query 1 in stream 0 using template query63.tpl
|select
| *
|from
| (select
| i_manager_id,
| sum(ss_sales_price) sum_sales
| -- avg(sum(ss_sales_price)) over(partition by i_manager_id) avg_monthly_sales
| from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| ss_sold_date_sk between 2451911 and 2452275 -- partition key filter
| -- ss_date between '2001-01-01' and '2001-12-31'
| and d_month_seq in (1212, 1212 + 1, 1212 + 2, 1212 + 3, 1212 + 4, 1212 + 5, 1212 + 6, 1212 + 7, 1212 + 8, 1212 + 9, 1212 + 10, 1212 + 11)
| and (
| (i_category in('Books', 'Children', 'Electronics')
| and i_class in('personal', 'portable', 'refernece', 'self-help')
| and i_brand in('scholaramalgamalg #14', 'scholaramalgamalg #7', 'exportiunivamalg #9', 'scholaramalgamalg #9')
| )
| or
| (i_category in('Women', 'Music', 'Men')
| and i_class in('accessories', 'classical', 'fragrances', 'pants')
| and i_brand in('amalgimporto #1', 'edu packscholar #1', 'exportiimporto #1', 'importoamalg #1')
| )
| )
| group by
| i_manager_id,
| d_moy
| ) tmp1
|-- where
|-- case when avg_monthly_sales > 0 then abs(sum_sales - avg_monthly_sales) / avg_monthly_sales else null end > 0.1
|order by
| i_manager_id,
| -- avg_monthly_sales,
| sum_sales
|limit 100
|-- end query 1 in stream 0 using template query63.tpl
""".stripMargin),
("q65", """
|--q65
|-- start query 1 in stream 0 using template query65.tpl
|select
| s_store_name,
| i_item_desc,
| sc.revenue,
| i_current_price,
| i_wholesale_cost,
| i_brand
|from
| (select
| ss_store_sk,
| ss_item_sk,
| sum(ss_sales_price) as revenue
| from
| store_sales
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '2001-01-01' and '2001-12-31'
| ss_sold_date_sk between 2451911 and 2452275 -- partition key filter
| and d_month_seq between 1212 and 1212 + 11
| group by
| ss_store_sk,
| ss_item_sk
| ) sc
| join item on (sc.ss_item_sk = item.i_item_sk)
| join store on (sc.ss_store_sk = store.s_store_sk)
| join
| (select
| ss_store_sk,
| avg(revenue) as ave
| from
| (select
| ss_store_sk,
| ss_item_sk,
| sum(ss_sales_price) as revenue
| from
| store_sales
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '2001-01-01' and '2001-12-31'
| ss_sold_date_sk between 2451911 and 2452275 -- partition key filter
| and d_month_seq between 1212 and 1212 + 11
| group by
| ss_store_sk,
| ss_item_sk
| ) sa
| group by
| ss_store_sk
| ) sb on (sc.ss_store_sk = sb.ss_store_sk) -- 676 rows
|where
| sc.revenue <= 0.1 * sb.ave
|order by
| s_store_name,
| i_item_desc
|limit 100
|-- end query 1 in stream 0 using template query65.tpl
""".stripMargin),
("q68", """
|-- start query 1 in stream 0 using template query68.tpl
|select
| c_last_name,
| c_first_name,
| ca_city,
| bought_city,
| ss_ticket_number,
| extended_price,
| extended_tax,
| list_price
|from
| (select
| ss_ticket_number,
| ss_customer_sk,
| ca_city bought_city,
| sum(ss_ext_sales_price) extended_price,
| sum(ss_ext_list_price) list_price,
| sum(ss_ext_tax) extended_tax
| from
| store_sales
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| join customer_address on (store_sales.ss_addr_sk = customer_address.ca_address_sk)
| where
| store.s_city in('Midway', 'Fairview')
| --and date_dim.d_dom between 1 and 2
| --and date_dim.d_year in(1999, 1999 + 1, 1999 + 2)
| -- and ss_date between '1999-01-01' and '2001-12-31'
| -- and dayofmonth(ss_date) in (1,2)
| -- and ss_sold_date_sk in (2451180, 2451181, 2451211, 2451212, 2451239, 2451240, 2451270, 2451271, 2451300, 2451301, 2451331,
| -- 2451332, 2451361, 2451362, 2451392, 2451393, 2451423, 2451424, 2451453, 2451454, 2451484, 2451485,
| -- 2451514, 2451515, 2451545, 2451546, 2451576, 2451577, 2451605, 2451606, 2451636, 2451637, 2451666,
| -- 2451667, 2451697, 2451698, 2451727, 2451728, 2451758, 2451759, 2451789, 2451790, 2451819, 2451820,
| -- 2451850, 2451851, 2451880, 2451881, 2451911, 2451912, 2451942, 2451943, 2451970, 2451971, 2452001,
| -- 2452002, 2452031, 2452032, 2452062, 2452063, 2452092, 2452093, 2452123, 2452124, 2452154, 2452155,
| -- 2452184, 2452185, 2452215, 2452216, 2452245, 2452246)
| and (household_demographics.hd_dep_count = 5
| or household_demographics.hd_vehicle_count = 3)
| and d_date between '1999-01-01' and '1999-03-31'
| and ss_sold_date_sk between 2451180 and 2451269 -- partition key filter (3 months)
| group by
| ss_ticket_number,
| ss_customer_sk,
| ss_addr_sk,
| ca_city
| ) dn
| join customer on (dn.ss_customer_sk = customer.c_customer_sk)
| join customer_address current_addr on (customer.c_current_addr_sk = current_addr.ca_address_sk)
|where
| current_addr.ca_city <> bought_city
|order by
| c_last_name,
| ss_ticket_number
|limit 100
|-- end query 1 in stream 0 using template query68.tpl
""".stripMargin),
("q7", """
|-- start query 1 in stream 0 using template query7.tpl
|select
| i_item_id,
| avg(ss_quantity) agg1,
| avg(ss_list_price) agg2,
| avg(ss_coupon_amt) agg3,
| avg(ss_sales_price) agg4
|from
| store_sales
| join customer_demographics on (store_sales.ss_cdemo_sk = customer_demographics.cd_demo_sk)
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join promotion on (store_sales.ss_promo_sk = promotion.p_promo_sk)
| join date_dim on (ss_sold_date_sk = d_date_sk)
|where
| cd_gender = 'F'
| and cd_marital_status = 'W'
| and cd_education_status = 'Primary'
| and (p_channel_email = 'N'
| or p_channel_event = 'N')
| and d_year = 1998
| -- and ss_date between '1998-01-01' and '1998-12-31'
| and ss_sold_date_sk between 2450815 and 2451179 -- partition key filter
|group by
| i_item_id
|order by
| i_item_id
|limit 100
|-- end query 1 in stream 0 using template query7.tpl
""".stripMargin),
("q73", """
|-- start query 1 in stream 0 using template query73.tpl
|select
| c_last_name,
| c_first_name,
| c_salutation,
| c_preferred_cust_flag,
| ss_ticket_number,
| cnt
|from
| (select
| ss_ticket_number,
| ss_customer_sk,
| count(*) cnt
| from
| store_sales
| join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| -- join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| store.s_county in ('Williamson County','Franklin Parish','Bronx County','Orange County')
| -- and date_dim.d_dom between 1 and 2
| -- and date_dim.d_year in(1998, 1998 + 1, 1998 + 2)
| -- and ss_date between '1999-01-01' and '2001-12-02'
| -- and dayofmonth(ss_date) in (1,2)
| -- partition key filter
| -- and ss_sold_date_sk in (2450816, 2450846, 2450847, 2450874, 2450875, 2450905, 2450906, 2450935, 2450936, 2450966, 2450967,
| -- 2450996, 2450997, 2451027, 2451028, 2451058, 2451059, 2451088, 2451089, 2451119, 2451120, 2451149,
| -- 2451150, 2451180, 2451181, 2451211, 2451212, 2451239, 2451240, 2451270, 2451271, 2451300, 2451301,
| -- 2451331, 2451332, 2451361, 2451362, 2451392, 2451393, 2451423, 2451424, 2451453, 2451454, 2451484,
| -- 2451485, 2451514, 2451515, 2451545, 2451546, 2451576, 2451577, 2451605, 2451606, 2451636, 2451637,
| -- 2451666, 2451667, 2451697, 2451698, 2451727, 2451728, 2451758, 2451759, 2451789, 2451790, 2451819,
| -- 2451820, 2451850, 2451851, 2451880, 2451881)
| and (household_demographics.hd_buy_potential = '>10000'
| or household_demographics.hd_buy_potential = 'unknown')
| and household_demographics.hd_vehicle_count > 0
| and case when household_demographics.hd_vehicle_count > 0 then household_demographics.hd_dep_count / household_demographics.hd_vehicle_count else null end > 1
| and ss_sold_date_sk between 2451180 and 2451269 -- partition key filter (3 months)
| group by
| ss_ticket_number,
| ss_customer_sk
| ) dj
| join customer on (dj.ss_customer_sk = customer.c_customer_sk)
|where
| cnt between 1 and 5
|order by
| cnt desc
|limit 1000
|-- end query 1 in stream 0 using template query73.tpl
""".stripMargin),
("q79", """
|-- start query 1 in stream 0 using template query79.tpl
|select
| c_last_name,
| c_first_name,
| substr(s_city, 1, 30) as city,
| ss_ticket_number,
| amt,
| profit
|from
| (select
| ss_ticket_number,
| ss_customer_sk,
| s_city,
| sum(ss_coupon_amt) amt,
| sum(ss_net_profit) profit
| from
| store_sales
| join household_demographics on (store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| where
| store.s_number_employees between 200 and 295
| and (household_demographics.hd_dep_count = 8
| or household_demographics.hd_vehicle_count > 0)
| and date_dim.d_dow = 1
| and date_dim.d_year in (1998, 1998 + 1, 1998 + 2)
| -- and ss_date between '1998-01-01' and '2000-12-25'
| -- 156 days
| and d_date between '1999-01-01' and '1999-03-31'
| and ss_sold_date_sk between 2451180 and 2451269 -- partition key filter
| group by
| ss_ticket_number,
| ss_customer_sk,
| ss_addr_sk,
| s_city
| ) ms
| join customer on (ms.ss_customer_sk = customer.c_customer_sk)
|order by
| c_last_name,
| c_first_name,
| -- substr(s_city, 1, 30),
| city,
| profit
|limit 100
|-- end query 1 in stream 0 using template query79.tpl
""".stripMargin),
("q8", """
|-- start query 8 in stream 0 using template query8.tpl
|select s_store_name
| ,sum(ss_net_profit)
| from store_sales
| ,date_dim
| ,store,
| (select distinct a01.ca_zip
| from
| (SELECT substr(ca_zip,1,5) ca_zip
| FROM customer_address
| WHERE substr(ca_zip,1,5) IN ('89436', '30868', '65085', '22977', '83927', '77557', '58429', '40697', '80614', '10502', '32779',
| '91137', '61265', '98294', '17921', '18427', '21203', '59362', '87291', '84093', '21505', '17184', '10866', '67898', '25797',
| '28055', '18377', '80332', '74535', '21757', '29742', '90885', '29898', '17819', '40811', '25990', '47513', '89531', '91068',
| '10391', '18846', '99223', '82637', '41368', '83658', '86199', '81625', '26696', '89338', '88425', '32200', '81427', '19053',
| '77471', '36610', '99823', '43276', '41249', '48584', '83550', '82276', '18842', '78890', '14090', '38123', '40936', '34425',
| '19850', '43286', '80072', '79188', '54191', '11395', '50497', '84861', '90733', '21068', '57666', '37119', '25004', '57835',
| '70067', '62878', '95806', '19303', '18840', '19124', '29785', '16737', '16022', '49613', '89977', '68310', '60069', '98360',
| '48649', '39050', '41793', '25002', '27413', '39736', '47208', '16515', '94808', '57648', '15009', '80015', '42961', '63982',
| '21744', '71853', '81087', '67468', '34175', '64008', '20261', '11201', '51799', '48043', '45645', '61163', '48375', '36447',
| '57042', '21218', '41100', '89951', '22745', '35851', '83326', '61125', '78298', '80752', '49858', '52940', '96976', '63792',
| '11376', '53582', '18717', '90226', '50530', '94203', '99447', '27670', '96577', '57856', '56372', '16165', '23427', '54561',
| '28806', '44439', '22926', '30123', '61451', '92397', '56979', '92309', '70873', '13355', '21801', '46346', '37562', '56458',
| '28286', '47306', '99555', '69399', '26234', '47546', '49661', '88601', '35943', '39936', '25632', '24611', '44166', '56648',
| '30379', '59785', '11110', '14329', '93815', '52226', '71381', '13842', '25612', '63294', '14664', '21077', '82626', '18799',
| '60915', '81020', '56447', '76619', '11433', '13414', '42548', '92713', '70467', '30884', '47484', '16072', '38936', '13036',
| '88376', '45539', '35901', '19506', '65690', '73957', '71850', '49231', '14276', '20005', '18384', '76615', '11635', '38177',
| '55607', '41369', '95447', '58581', '58149', '91946', '33790', '76232', '75692', '95464', '22246', '51061', '56692', '53121',
| '77209', '15482', '10688', '14868', '45907', '73520', '72666', '25734', '17959', '24677', '66446', '94627', '53535', '15560',
| '41967', '69297', '11929', '59403', '33283', '52232', '57350', '43933', '40921', '36635', '10827', '71286', '19736', '80619',
| '25251', '95042', '15526', '36496', '55854', '49124', '81980', '35375', '49157', '63512', '28944', '14946', '36503', '54010',
| '18767', '23969', '43905', '66979', '33113', '21286', '58471', '59080', '13395', '79144', '70373', '67031', '38360', '26705',
| '50906', '52406', '26066', '73146', '15884', '31897', '30045', '61068', '45550', '92454', '13376', '14354', '19770', '22928',
| '97790', '50723', '46081', '30202', '14410', '20223', '88500', '67298', '13261', '14172', '81410', '93578', '83583', '46047',
| '94167', '82564', '21156', '15799', '86709', '37931', '74703', '83103', '23054', '70470', '72008', '35709', '91911', '69998',
| '20961', '70070', '63197', '54853', '88191', '91830', '49521', '19454', '81450', '89091', '62378', '31904', '61869', '51744',
| '36580', '85778', '36871', '48121', '28810', '83712', '45486', '67393', '26935', '42393', '20132', '55349', '86057', '21309',
| '80218', '10094', '11357', '48819', '39734', '40758', '30432', '21204', '29467', '30214', '61024', '55307', '74621', '11622',
| '68908', '33032', '52868', '99194', '99900', '84936', '69036', '99149', '45013', '32895', '59004', '32322', '14933', '32936',
| '33562', '72550', '27385', '58049', '58200', '16808', '21360', '32961', '18586', '79307', '15492'
| )) a01
| inner join
| (select ca_zip
| from (SELECT substr(ca_zip,1,5) ca_zip,count(*) cnt
| FROM customer_address, customer
| WHERE ca_address_sk = c_current_addr_sk and
| c_preferred_cust_flag='Y'
| group by ca_zip
| having count(*) > 10)A1
| ) b11
| on (a01.ca_zip = b11.ca_zip )) A2
| where ss_store_sk = s_store_sk
| and ss_sold_date_sk = d_date_sk
| and ss_sold_date_sk between 2451271 and 2451361
| and d_qoy = 2 and d_year = 1999
| and (substr(s_zip,1,2) = substr(a2.ca_zip,1,2))
| group by s_store_name
| order by s_store_name
|limit 100
|-- end query 8 in stream 0 using template query8.tpl
""".stripMargin),
("q82", """
|-- start query 1 in stream 0 using template query82.tpl
|select
| i_item_id,
| i_item_desc,
| i_current_price
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join inventory on (item.i_item_sk = inventory.inv_item_sk)
| -- join date_dim on (inventory.inv_date_sk = date_dim.d_date_sk)
|where
| i_current_price between 30 and 30 + 30
| and i_manufact_id in (437, 129, 727, 663)
| and inv_quantity_on_hand between 100 and 500
| and inv_date between '2002-05-30' and '2002-07-29'
| -- and d_date between cast('2002-05-30' as date) and (cast('2002-05-30' as date) + 60)
|group by
| i_item_id,
| i_item_desc,
| i_current_price
|order by
| i_item_id
|limit 100
|-- end query 1 in stream 0 using template query82.tpl
""".stripMargin),
("q89", """
|-- start query 1 in stream 0 using template query89.tpl
|select
| *
|from
| (select
| i_category,
| i_class,
| i_brand,
| s_store_name,
| s_company_name,
| d_moy,
| sum(ss_sales_price) sum_sales
| -- avg(sum(ss_sales_price)) over (partition by i_category, i_brand, s_store_name, s_company_name) avg_monthly_sales
| from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join store on (store_sales.ss_store_sk = store.s_store_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '2000-01-01' and '2000-12-31'
| ss_sold_date_sk between 2451545 and 2451910 -- partition key filter
| and d_year in (2000)
| and ((i_category in('Home', 'Books', 'Electronics')
| and i_class in('wallpaper', 'parenting', 'musical'))
| or (i_category in('Shoes', 'Jewelry', 'Men')
| and i_class in('womens', 'birdal', 'pants'))
| )
| group by
| i_category,
| i_class,
| i_brand,
| s_store_name,
| s_company_name,
| d_moy
| ) tmp1
|-- where
|-- case when(avg_monthly_sales <> 0) then(abs(sum_sales - avg_monthly_sales) / avg_monthly_sales) else null end > 0.1
|order by
| -- sum_sales - avg_monthly_sales,
| sum_sales,
| s_store_name
|limit 100
|-- end query 1 in stream 0 using template query89.tpl
""".stripMargin),
("q98", """
|-- start query 1 in stream 0 using template query98.tpl
|select
| i_item_desc,
| i_category,
| i_class,
| i_current_price,
| sum(ss_ext_sales_price) as itemrevenue
| -- sum(ss_ext_sales_price) * 100 / sum(sum(ss_ext_sales_price)) over (partition by i_class) as revenueratio
|from
| store_sales
| join item on (store_sales.ss_item_sk = item.i_item_sk)
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
|where
| i_category in('Jewelry', 'Sports', 'Books')
| -- and d_date between cast('2001-01-12' as date) and (cast('2001-01-12' as date) + 30)
| -- and d_date between '2001-01-12' and '2001-02-11'
| -- and ss_date between '2001-01-12' and '2001-02-11'
| -- and ss_sold_date_sk between 2451922 and 2451952 -- partition key filter
| and ss_sold_date_sk between 2451911 and 2451941 -- partition key filter (1 calendar month)
| and d_date between '2001-01-01' and '2001-01-31'
|group by
| i_item_id,
| i_item_desc,
| i_category,
| i_class,
| i_current_price
|order by
| i_category,
| i_class,
| i_item_id,
| i_item_desc
| -- revenueratio
|limit 1000
|-- end query 1 in stream 0 using template query98.tpl
""".stripMargin),
("ss_max", """
|select
| count(*) as total,
| count(ss_sold_date_sk) as not_null_total,
| count(distinct ss_sold_date_sk) as unique_days,
| max(ss_sold_date_sk) as max_ss_sold_date_sk,
| max(ss_sold_time_sk) as max_ss_sold_time_sk,
| max(ss_item_sk) as max_ss_item_sk,
| max(ss_customer_sk) as max_ss_customer_sk,
| max(ss_cdemo_sk) as max_ss_cdemo_sk,
| max(ss_hdemo_sk) as max_ss_hdemo_sk,
| max(ss_addr_sk) as max_ss_addr_sk,
| max(ss_store_sk) as max_ss_store_sk,
| max(ss_promo_sk) as max_ss_promo_sk
|from store_sales
""".stripMargin)
).map {
case (name, sqlText) => Query(name, sqlText, description = "", executionMode = CollectResults)
}
val queriesMap = queries.map(q => q.name -> q).toMap
val originalQueries = Seq(
("q3", """
select d_year
,item.i_brand_id brand_id
,item.i_brand brand
,sum(ss_ext_sales_price) sum_agg
from date_dim dt
JOIN store_sales on dt.d_date_sk = store_sales.ss_sold_date_sk
JOIN item on store_sales.ss_item_sk = item.i_item_sk
where
item.i_manufact_id = 436
and dt.d_moy=12
group by d_year
,item.i_brand
,item.i_brand_id
order by d_year
,sum_agg desc
,brand_id
limit 100"""),
("q7", """
select i_item_id,
avg(ss_quantity) agg1,
avg(ss_list_price) agg2,
avg(ss_coupon_amt) agg3,
avg(ss_sales_price) agg4
from store_sales
JOIN customer_demographics ON store_sales.ss_cdemo_sk = customer_demographics.cd_demo_sk
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN item ON store_sales.ss_item_sk = item.i_item_sk
JOIN promotion ON store_sales.ss_promo_sk = promotion.p_promo_sk
where
cd_gender = 'F' and
cd_marital_status = 'W' and
cd_education_status = 'Primary' and
(p_channel_email = 'N' or p_channel_event = 'N') and
d_year = 1998
group by i_item_id
order by i_item_id
limit 100"""),
("q19", """
select i_brand_id, i_brand, i_manufact_id, i_manufact,
sum(ss_ext_sales_price) as ext_price
from date_dim
JOIN store_sales ON date_dim.d_date_sk = store_sales.ss_sold_date_sk
JOIN item ON store_sales.ss_item_sk = item.i_item_sk
JOIN customer ON store_sales.ss_customer_sk = customer.c_customer_sk
JOIN customer_address ON customer.c_current_addr_sk = customer_address.ca_address_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
where
i_manager_id=7
and d_moy=11
and d_year=1999
and substr(ca_zip,1,5) <> substr(s_zip,1,5)
group by i_brand
,i_brand_id
,i_manufact_id
,i_manufact
order by ext_price desc
,i_brand
,i_brand_id
,i_manufact_id
,i_manufact
limit 100"""),
("q27", """
select i_item_id,
s_state,
avg(ss_quantity) agg1,
avg(ss_list_price) agg2,
avg(ss_coupon_amt) agg3,
avg(ss_sales_price) agg4
from store_sales
JOIN customer_demographics ON store_sales.ss_cdemo_sk = customer_demographics.cd_demo_sk
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
JOIN item ON store_sales.ss_item_sk = item.i_item_sk
where
cd_gender = 'F' and
cd_marital_status = 'W' and
cd_education_status = 'Primary' and
d_year = 1998 and
s_state = 'TN'
group by i_item_id, s_state
order by i_item_id
,s_state
limit 100"""),
("q34", """
select c_last_name
,c_first_name
,c_salutation
,c_preferred_cust_flag
,ss_ticket_number
,cnt from
(select ss_ticket_number
,ss_customer_sk
,count(*) cnt
from store_sales
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
where
(date_dim.d_dom between 1 and 3 or date_dim.d_dom between 25 and 28)
and (household_demographics.hd_buy_potential = '>10000' or
household_demographics.hd_buy_potential = 'unknown')
and household_demographics.hd_vehicle_count > 0
and (case when household_demographics.hd_vehicle_count > 0
then household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count
else null
end) > 1.2
and date_dim.d_year in (1998,1998+1,1998+2)
and store.s_county in ('Williamson County','Williamson County','Williamson County','Williamson County',
'Williamson County','Williamson County','Williamson County','Williamson County')
group by ss_ticket_number,ss_customer_sk) dn
JOIN customer ON dn.ss_customer_sk = customer.c_customer_sk
WHERE
cnt between 15 and 20
order by
c_last_name,
c_first_name,
c_salutation,
c_preferred_cust_flag desc,
ss_ticket_number,
cnt"""),
("q42", """
select d_year
,item.i_category_id
,item.i_category
,sum(ss_ext_sales_price) as s
from date_dim dt
JOIN store_sales ON dt.d_date_sk = store_sales.ss_sold_date_sk
JOIN item ON store_sales.ss_item_sk = item.i_item_sk
where
item.i_manager_id = 1
and dt.d_moy=12
and dt.d_year=1998
group by d_year
,item.i_category_id
,item.i_category
order by s desc,d_year
,i_category_id
,i_category
limit 100"""),
("q43", """
select s_store_name, s_store_id,
sum(case when (d_day_name='Sunday') then ss_sales_price else null end) sun_sales,
sum(case when (d_day_name='Monday') then ss_sales_price else null end) mon_sales,
sum(case when (d_day_name='Tuesday') then ss_sales_price else null end) tue_sales,
sum(case when (d_day_name='Wednesday') then ss_sales_price else null end) wed_sales,
sum(case when (d_day_name='Thursday') then ss_sales_price else null end) thu_sales,
sum(case when (d_day_name='Friday') then ss_sales_price else null end) fri_sales,
sum(case when (d_day_name='Saturday') then ss_sales_price else null end) sat_sales
from date_dim
JOIN store_sales ON date_dim.d_date_sk = store_sales.ss_sold_date_sk
JOIN store ON store.s_store_sk = store_sales.ss_store_sk
where
s_gmt_offset = -5 and
d_year = 1998
group by s_store_name, s_store_id
order by s_store_name, s_store_id,sun_sales,mon_sales,tue_sales,wed_sales,thu_sales,fri_sales,sat_sales
limit 100"""),
("q46", """
select c_last_name
,c_first_name
,ca_city
,bought_city
,ss_ticket_number
,amt,profit
from
(select ss_ticket_number
,ss_customer_sk
,ca_city as bought_city
,sum(ss_coupon_amt) as amt
,sum(ss_net_profit) as profit
from store_sales
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
JOIN customer_address ON store_sales.ss_addr_sk = customer_address.ca_address_sk
where
(household_demographics.hd_dep_count = 5 or
household_demographics.hd_vehicle_count= 3)
and date_dim.d_dow in (6,0)
and date_dim.d_year in (1999,1999+1,1999+2)
and store.s_city in ('Midway','Fairview','Fairview','Fairview','Fairview')
group by ss_ticket_number,ss_customer_sk,ss_addr_sk,ca_city) dn
JOIN customer ON dn.ss_customer_sk = customer.c_customer_sk
JOIN customer_address ON customer.c_current_addr_sk = customer_address.ca_address_sk
where
customer_address.ca_city <> dn.bought_city
order by c_last_name
,c_first_name
,ca_city
,bought_city
,ss_ticket_number
limit 100"""),
("q52", """
select d_year
,item.i_brand_id brand_id
,item.i_brand brand
,sum(ss_ext_sales_price) as ext_price
from date_dim
JOIN store_sales ON date_dim.d_date_sk = store_sales.ss_sold_date_sk
JOIN item ON store_sales.ss_item_sk = item.i_item_sk
where
item.i_manager_id = 1
and date_dim.d_moy=12
and date_dim.d_year=1998
group by d_year
,item.i_brand
,item.i_brand_id
order by d_year
,ext_price desc
,brand_id
limit 100"""),
("q55", """
select i_brand_id as brand_id, i_brand as brand,
sum(store_sales.ss_ext_sales_price) ext_price
from date_dim
JOIN store_sales ON date_dim.d_date_sk = store_sales.ss_sold_date_sk
JOIN item ON store_sales.ss_item_sk = item.i_item_sk
where
i_manager_id=36
and d_moy=12
and d_year=2001
group by i_brand, i_brand_id
order by ext_price desc, brand_id
limit 100 """),
("q59",
"""
|select
| s_store_name1,
| s_store_id1,
| d_week_seq1,
| sun_sales1 / sun_sales2,
| mon_sales1 / mon_sales2,
| tue_sales1 / tue_sales2,
| wed_sales1 / wed_sales2,
| thu_sales1 / thu_sales2,
| fri_sales1 / fri_sales2,
| sat_sales1 / sat_sales2
|from
| (select
| /*+ MAPJOIN(store, date_dim) */
| s_store_name s_store_name1,
| wss.d_week_seq d_week_seq1,
| s_store_id s_store_id1,
| sun_sales sun_sales1,
| mon_sales mon_sales1,
| tue_sales tue_sales1,
| wed_sales wed_sales1,
| thu_sales thu_sales1,
| fri_sales fri_sales1,
| sat_sales sat_sales1
| from
| (select
| /*+ MAPJOIN(date_dim) */
| d_week_seq,
| ss_store_sk,
| sum(case when(d_day_name = 'Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when(d_day_name = 'Monday') then ss_sales_price else null end) mon_sales,
| sum(case when(d_day_name = 'Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when(d_day_name = 'Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when(d_day_name = 'Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when(d_day_name = 'Friday') then ss_sales_price else null end) fri_sales,
| sum(case when(d_day_name = 'Saturday') then ss_sales_price else null end) sat_sales
| from
| store_sales
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '1998-10-01' and '1999-09-30'
| ss_sold_date_sk between 2451088 and 2451452
| group by
| d_week_seq,
| ss_store_sk
| ) wss
| join store on (wss.ss_store_sk = store.s_store_sk)
| join date_dim d on (wss.d_week_seq = d.d_week_seq)
| where
| d_month_seq between 1185 and 1185 + 11
| ) y
| join
| (select
| /*+ MAPJOIN(store, date_dim) */
| s_store_name s_store_name2,
| wss.d_week_seq d_week_seq2,
| s_store_id s_store_id2,
| sun_sales sun_sales2,
| mon_sales mon_sales2,
| tue_sales tue_sales2,
| wed_sales wed_sales2,
| thu_sales thu_sales2,
| fri_sales fri_sales2,
| sat_sales sat_sales2
| from
| (select
| /*+ MAPJOIN(date_dim) */
| d_week_seq,
| ss_store_sk,
| sum(case when(d_day_name = 'Sunday') then ss_sales_price else null end) sun_sales,
| sum(case when(d_day_name = 'Monday') then ss_sales_price else null end) mon_sales,
| sum(case when(d_day_name = 'Tuesday') then ss_sales_price else null end) tue_sales,
| sum(case when(d_day_name = 'Wednesday') then ss_sales_price else null end) wed_sales,
| sum(case when(d_day_name = 'Thursday') then ss_sales_price else null end) thu_sales,
| sum(case when(d_day_name = 'Friday') then ss_sales_price else null end) fri_sales,
| sum(case when(d_day_name = 'Saturday') then ss_sales_price else null end) sat_sales
| from
| store_sales
| join date_dim on (store_sales.ss_sold_date_sk = date_dim.d_date_sk)
| where
| -- ss_date between '1999-10-01' and '2000-09-30'
| ss_sold_date_sk between 2451088 and 2451452
| group by
| d_week_seq,
| ss_store_sk
| ) wss
| join store on (wss.ss_store_sk = store.s_store_sk)
| join date_dim d on (wss.d_week_seq = d.d_week_seq)
| where
| d_month_seq between 1185 + 12 and 1185 + 23
| ) x
| on (y.s_store_id1 = x.s_store_id2)
|where
| d_week_seq1 = d_week_seq2 - 52
|order by
| s_store_name1,
| s_store_id1,
| d_week_seq1
|limit 100
""".stripMargin),
("q68", """
select c_last_name ,c_first_name ,ca_city
,bought_city ,ss_ticket_number ,extended_price
,extended_tax ,list_price
from (select ss_ticket_number
,ss_customer_sk
,ca_city as bought_city
,sum(ss_ext_sales_price) as extended_price
,sum(ss_ext_list_price) as list_price
,sum(ss_ext_tax) as extended_tax
from store_sales
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
JOIN customer_address ON store_sales.ss_addr_sk = customer_address.ca_address_sk
where
date_dim.d_dom between 1 and 2
and (household_demographics.hd_dep_count = 5 or
household_demographics.hd_vehicle_count= 3)
and date_dim.d_year in (1999,1999+1,1999+2)
and store.s_city in ('Midway','Fairview')
group by ss_ticket_number
,ss_customer_sk
,ss_addr_sk,ca_city) dn
JOIN customer ON dn.ss_customer_sk = customer.c_customer_sk
JOIN customer_address ON customer.c_current_addr_sk = customer_address.ca_address_sk
where
customer_address.ca_city <> dn.bought_city
order by c_last_name
,ss_ticket_number
limit 100"""),
("q73", """
select c_last_name
,c_first_name
,c_salutation
,c_preferred_cust_flag
,ss_ticket_number
,cnt from
(select ss_ticket_number
,ss_customer_sk
,count(*) cnt
from store_sales
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
where
date_dim.d_dom between 1 and 2
and (household_demographics.hd_buy_potential = '>10000' or
household_demographics.hd_buy_potential = 'unknown')
and household_demographics.hd_vehicle_count > 0
and case when household_demographics.hd_vehicle_count > 0 then
household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count else null end > 1
and date_dim.d_year in (1998,1998+1,1998+2)
and store.s_county in ('Williamson County','Franklin Parish','Bronx County','Orange County')
group by ss_ticket_number,ss_customer_sk) dj
JOIN customer ON dj.ss_customer_sk = customer.c_customer_sk
where
cnt between 5 and 10
order by cnt desc"""),
("q79", """
select
c_last_name,c_first_name,substr(s_city,1,30) as s_city,ss_ticket_number,amt,profit
from
(select ss_ticket_number
,ss_customer_sk
,store.s_city
,sum(ss_coupon_amt) amt
,sum(ss_net_profit) profit
from store_sales
JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
JOIN store ON store_sales.ss_store_sk = store.s_store_sk
JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
where
(household_demographics.hd_dep_count = 8 or household_demographics.hd_vehicle_count > 0)
and date_dim.d_dow = 1
and date_dim.d_year in (1998,1998+1,1998+2)
and store.s_number_employees between 200 and 295
group by ss_ticket_number,ss_customer_sk,ss_addr_sk,store.s_city) ms
JOIN customer on ms.ss_customer_sk = customer.c_customer_sk
order by c_last_name,c_first_name,s_city, profit
limit 100"""),
("qSsMax",
"""
|select
| count(*) as total,
| count(ss_sold_date_sk) as not_null_total,
| count(distinct ss_sold_date_sk) as unique_days,
| max(ss_sold_date_sk) as max_ss_sold_date_sk,
| max(ss_sold_time_sk) as max_ss_sold_time_sk,
| max(ss_item_sk) as max_ss_item_sk,
| max(ss_customer_sk) as max_ss_customer_sk,
| max(ss_cdemo_sk) as max_ss_cdemo_sk,
| max(ss_hdemo_sk) as max_ss_hdemo_sk,
| max(ss_addr_sk) as max_ss_addr_sk,
| max(ss_store_sk) as max_ss_store_sk,
| max(ss_promo_sk) as max_ss_promo_sk
|from store_sales
""".stripMargin)
).map { case (name, sqlText) =>
Query(name, sqlText, description = "original query", executionMode = CollectResults)
}
val interactiveQueries =
Seq("q19", "q42", "q52", "q55", "q63", "q68", "q73", "q98").map(queriesMap)
val reportingQueries = Seq("q3","q7", "q27","q43", "q53", "q89").map(queriesMap)
val deepAnalyticQueries = Seq("q34", "q46", "q59", "q65", "q79", "ss_max").map(queriesMap)
val impalaKitQueries = interactiveQueries ++ reportingQueries ++ deepAnalyticQueries
}
| levyx/spark-sql-perf | src/main/scala/com/databricks/spark/sql/perf/tpcds/ImpalaKitQueries.scala | Scala | apache-2.0 | 69,628 |
package com.malpeza.solid.isp.entities
class CurrencyRatesService {
}
object CurrencyRatesService {
def apply() = new CurrencyRatesService()
} | lsolano/blog.solid.demo | scala/src/main/scala/com/malpeza/solid/isp/entities/CurrencyRatesService.scala | Scala | mit | 149 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops.io.data
import org.platanios.tensorflow.api.core.Shape
import org.platanios.tensorflow.api.ops.{Basic, Op, Output}
import org.platanios.tensorflow.api.tensors.Tensor
import org.platanios.tensorflow.api.types.{DataType, INT64}
/** Dataset that wraps the application of the `range` op.
*
* $OpDocDatasetRange
*
* @param start Starting value of the number sequence.
* @param limit Ending value (exclusive) of the number sequence.
* @param delta Difference between consecutive numbers in the sequence.
* @param name Name for this dataset.
*
* @author Emmanouil Antonios Platanios
*/
case class RangeDataset(
start: Long,
limit: Long,
delta: Long = 1L,
override val name: String = "RangeDataset"
) extends Dataset[Tensor, Output, DataType, Shape](name) {
override def createHandle(): Output = {
Op.Builder(opType = "RangeDataset", name = name)
.addInput(Op.createWithNameScope(name)(Basic.constant(start)))
.addInput(Op.createWithNameScope(name)(Basic.constant(limit)))
.addInput(Op.createWithNameScope(name)(Basic.constant(delta)))
.setAttribute("output_types", flattenedOutputDataTypes.toArray)
.setAttribute("output_shapes", flattenedOutputShapes.toArray)
.build().outputs(0)
}
override def outputDataTypes: DataType = INT64
override def outputShapes: Shape = Shape.scalar()
}
object RangeDataset {
/** @define OpDocDatasetRange
* The dataset `range` op creates a new dataset that contains a range of values.
*/
private[data] trait Documentation
}
| eaplatanios/tensorflow | tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/ops/io/data/RangeDataset.scala | Scala | apache-2.0 | 2,242 |
package scaladget.highlightjs
import org.querki.jsext
import org.scalajs.dom.Element
import jsext._
import scala.scalajs.js
import scala.scalajs.js.annotation._
/**
* @see [[https://highlightjs.org/usage/]]
*/
@js.native
@JSImport("highlight.js", JSImport.Namespace)
object HighlightJS extends HighlightStatic
@js.native
@JSImport("highlight.js/lib/languages/scala.js", JSImport.Namespace)
object scalamode extends js.Object
@js.native
trait HighlightStatic extends js.Object {
/**
* Core highlighting function. Accepts a language name, or an alias, and a string with the code to highlight. The ignore_illegals parameter, when present and evaluates to a true value, forces highlighting to finish even in case of detecting illegal syntax for the language instead of throwing an exception. The continuation is an optional mode stack representing unfinished parsing. When present, the function will restart parsing from this state instead of initializing a new one.
*
* @return Returns an object with the following properties:
* language: language name, same as the one passed into a function, returned for consistency with highlightAuto
* relevance: integer value
* value: HTML string with highlighting markup
* top: top of the current mode stack
*/
def highlight(name: String, value: String, ignoreIllegals: js.UndefOr[Boolean] = js.native, continuation: js.UndefOr[js.Object] = js.native): HighlightJSResult = js.native
/**
* Highlighting with language detection. Accepts a string with the code to highlight and an optional array of language names and aliases restricting detection to only those languages. The subset can also be set with configure, but the local parameter overrides the option if set.
*
* @return Returns an object with the following properties:
* language: detected language
* relevance: integer value
* value: HTML string with highlighting markup
* second_best: object with the same structure for second-best heuristically detected language, may be absent
*/
def highlightAuto(value: String, languageSubset: js.UndefOr[js.Array[String]] = js.native): HighlightJSResult = js.native
/**
* Applies highlighting to all `<pre><code>..</code></pre>` blocks on a page.
*/
def initHighlighting(): Unit = js.native
/**
* Attaches highlighting to the page load event.
*/
def initHighlightingOnLoad(): Unit = js.native
/**
* Applies highlighting to a DOM node containing code.
* This function is the one to use to apply highlighting dynamically after page load or within initialization code of third-party Javascript frameworks.
* The function uses language detection by default but you can specify the language in the class attribute of the DOM node. See the class reference for all available language names and aliases.
*/
def highlightBlock(el: Element):Unit = js.native
/**
* Post-processing of the highlighted markup. Currently consists of replacing indentation TAB characters and using <br> tags instead of new-line characters. Options are set globally with configure.
* Accepts a string with the highlighted markup.
*/
val fixMarkup: js.UndefOr[String] = js.native
/**
* Configures global options:
* *
* tabReplace: a string used to replace TAB characters in indentation.
* useBR: a flag to generate <br> tags instead of new-line characters in the output, useful when code is marked up using a non-<pre> container.
* classPrefix: a string prefix added before class names in the generated markup, used for backwards compatibility with stylesheets.
* languages: an array of language names and aliases restricting auto detection to only these languages.
* Accepts an object representing options with the values to updated. Other options donโt change
* {{{
* hljs.configure({
* tabReplace: ' ', // 4 spaces
* classPrefix: '' // don't append class prefix
* // โฆ other options aren't changed
* });
* hljs.initHighlighting();
* }}}
*/
val configure: js.UndefOr[js.Object] = js.native
/**
* Adds new language to the library under the specified name. Used mostly internally.
*
* @param name A string with the name of the language being registered
* @param language A function that returns an object which represents the language definition. The function is passed the hljs object to be able to use common regular expressions defined within it.
*/
def registerLanguage(name: String, language: js.Function): Unit = js.native
/**
* Returns the languages names list.
*/
def listLanguages(): js.Array[String] = js.native
/**
* Looks up a language by name or alias.
* Returns the language object if found, `undefined` otherwise.
*/
def getLanguage(name: String): js.UndefOr[js.Object] = js.native
}
object HighlightStatic extends HighlightStaticBuilder(noOpts)
class HighlightStaticBuilder(val dict: OptMap) extends JSOptionBuilder[HighlightStatic, HighlightStaticBuilder](new HighlightStaticBuilder(_)) {
def fixMarkup(v: String) = jsOpt("fixMarkup", v)
def configure(v: js.Object) = jsOpt("configure", v)
}
@js.native
trait HighlightJSResult extends js.Object {
/**
* Detected language
*/
def language: String = js.native
/**
* Integer value
*/
def relevance: Int = js.native
/**
* HTML string with highlighting markup
*/
def value: String = js.native
/**
* Top of the current mode stack
*/
def top: js.Object = js.native
// /**
// * Object with the same structure for second-best heuristically detected language, may be absent
// */
// @JSName("second_best")
// def secondBest: js.Object = js.native
}
| openmole/scaladget | highlightjs/src/main/scala/scaladget/highlightjs/HighlightJS.scala | Scala | agpl-3.0 | 5,871 |
package com.flowy.binance
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.flowy.common.utils.ServerConfig
import com.flowy.common.utils.sql.{DatabaseConfig, SqlDatabase}
import com.flowy.common.database.postgres.{SqlMarketUpdateDao, SqlTheEverythingBagelDao}
import com.typesafe.config.ConfigFactory
import scala.concurrent.ExecutionContext
object Main extends App {
// Override the configuration of the port when specified as program argument
val port = if (args.isEmpty) "2556" else args(0)
lazy val config = new DatabaseConfig with ServerConfig {
override def rootConfig = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port").
withFallback(ConfigFactory.parseString("akka.cluster.roles = [binance-websocket]")).
withFallback(ConfigFactory.load())
}
implicit val system = ActorSystem("cluster", config.rootConfig)
implicit val executor: ExecutionContext = system.dispatcher
implicit val materializer: ActorMaterializer = ActorMaterializer()
lazy val sqlDatabase = SqlDatabase.create(config)
lazy val bagel = new SqlTheEverythingBagelDao(sqlDatabase)
lazy val binanceDao = new SqlBinanceDao(sqlDatabase)
val bittrexFeed = system.actorOf(BinanceWebsocket.props(binanceDao), name = "binance-websocket")
}
| asciiu/fomo | binance-websocket/src/main/scala/com/flowy/binance/Main.scala | Scala | apache-2.0 | 1,289 |
package nodes.learning
import scala.reflect.ClassTag
import scala.util.Random
import scala.collection.mutable.ListBuffer
import breeze.linalg._
import breeze.math._
import breeze.numerics._
import edu.berkeley.cs.amplab.mlmatrix.RowPartitionedMatrix
import edu.berkeley.cs.amplab.mlmatrix.util.{Utils => MLMatrixUtils}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import pipelines.Logging
import workflow.LabelEstimator
import utils._
/**
* Solves a kernel ridge regression problem of the form
* (K(x, x) + \lambda * I) * W = Y
* using Gauss-Seidel based Block Coordinate Descent.
*
* The function K is specified by the kernel generator and this class
* uses the dual formulation of the ridge regression objective to improve
* numerical stability.
*
* @param kernelGenerator kernel function to apply to create kernel matrix
* @param lambda L2 regularization value
* @param blockSize number of columns in each block of BCD
* @param numEpochs number of epochs of BCD to run
* @param blockPermuter seed used for permuting column blocks in BCD
* @param blocksBeforeCheckpoint frequency at which intermediate data should be checkpointed
*/
class KernelRidgeRegression[T: ClassTag](
kernelGenerator: KernelGenerator[T],
lambda: Double,
blockSize: Int,
numEpochs: Int,
blockPermuter: Option[Long] = None,
blocksBeforeCheckpoint: Int = 25)
extends LabelEstimator[T, DenseVector[Double], DenseVector[Double]] {
override def fit(
data: RDD[T],
labels: RDD[DenseVector[Double]]): KernelBlockLinearMapper[T] = {
val kernelTransformer = kernelGenerator.fit(data)
val trainKernelMat = kernelTransformer(data)
val nTrain = data.count
val wLocals = KernelRidgeRegression.trainWithL2(
trainKernelMat,
labels,
lambda,
blockSize,
numEpochs,
blockPermuter,
blocksBeforeCheckpoint)
new KernelBlockLinearMapper(wLocals, blockSize, kernelTransformer, nTrain,
blocksBeforeCheckpoint)
}
}
object KernelRidgeRegression extends Logging {
/**
* Solves a linear system of the form (K + \lambda * I) * W = Y
* using Gauss-Seidel based Block Coordinate Descent as described in
* http://arxiv.org/abs/1602.05310
*
* K is assumed to be a symmetric kernel matrix generated using a kernel
* generator.
*
* @param data the kernel matrix to use
* @param labels training labels RDD
* @param lambda L2 regularization parameter
* @param blockSize number of columns per block of Gauss-Seidel solve
* @param numEpochs number of passes of co-ordinate descent to run
* @param blockPermuter seed to use for permuting column blocks
* @param blocksBeforeCheckpoint frequency at which intermediate data should be checkpointed
*
* @return a model that can be applied on test data.
*/
def trainWithL2[T: ClassTag](
trainKernelMat: KernelMatrix,
labels: RDD[DenseVector[Double]],
lambda: Double,
blockSize: Int,
numEpochs: Int,
blockPermuter: Option[Long],
blocksBeforeCheckpoint: Int = 25): Seq[DenseMatrix[Double]] = {
val nTrain = labels.count.toInt
val nClasses = labels.first.length
// Currently we only support one lambda but the code
// but the code is structured to support multiple lambdas
val lambdas = IndexedSeq(lambda)
val numBlocks = math.ceil(nTrain.toDouble/blockSize).toInt
val preFixLengths = labels.mapPartitions { part =>
Iterator.single(part.length)
}.collect().scanLeft(0)(_+_)
val preFixLengthsBC = labels.context.broadcast(preFixLengths)
// Convert each partition from Iterator[DenseVector[Double]] to
// a single DenseMatrix[Double]. Also cache this as n*k should be small.
val labelsMat = labels.mapPartitions { part =>
MatrixUtils.rowsToMatrixIter(part)
}.setName("labelsMat").cache()
labelsMat.count
val labelsRPM = RowPartitionedMatrix.fromMatrix(labelsMat)
// Model is same size as labels
// NOTE: We create one RDD with a array of matrices as its cheaper
// to compute residuals that way
var model = labelsMat.map { x =>
lambdas.map { l =>
DenseMatrix.zeros[Double](x.rows, x.cols)
}
}.setName("model").cache()
model.count
// We also keep a local copy of the model to use computing residual
// NOTE: The last block might have fewer than blockSize features
// but we will correct this inside the solve
val wLocals = lambdas.map { l =>
(0 until numBlocks).map { x =>
DenseMatrix.zeros[Double](blockSize, nClasses)
}.to[ListBuffer]
}
val blockShuffler = blockPermuter.map(seed => new Random(seed))
(0 until numEpochs).foreach { pass =>
val inOrder = (0 until numBlocks).toIndexedSeq
val blockOrder = blockShuffler.map(rnd => rnd.shuffle(inOrder)).getOrElse(inOrder)
blockOrder.foreach { block =>
val blockBegin = System.nanoTime
// Use the kernel block generator to get kernel matrix for these column blocks
val blockIdxs = (blockSize * block) until (math.min(nTrain, (block + 1) * blockSize))
val blockIdxsSeq = blockIdxs.toArray
val blockIdxsBC = labelsMat.context.broadcast(blockIdxsSeq)
val kernelBlockMat = trainKernelMat(blockIdxsSeq)
val kernelBlockBlockMat = trainKernelMat.diagBlock(blockIdxsSeq)
val kernelGenEnd = System.nanoTime
// Build up the residual
val treeBranchingFactor = labels.context.getConf.getInt(
"spark.mlmatrix.treeBranchingFactor", 2).toInt
val depth = max(math.ceil(math.log(kernelBlockMat.partitions.size) /
math.log(treeBranchingFactor)).toInt, 1)
// Compute K_B^T * W as its easy to do this.
// After this we will subtract out K_BB^T * W_B
// b x k
val residuals = MLMatrixUtils.treeReduce(kernelBlockMat.zip(model).map { x =>
x._2.map { y =>
// this is a b * n1 times n1 * k
x._1.t * y
}
}, MatrixUtils.addMatrices, depth=depth)
val residualEnd = System.nanoTime
// This is b*k
val y_bb = labelsRPM(blockIdxs, ::).collect()
val collectEnd = System.nanoTime
// This is a tuple of (oldBlockBC, newBlockBC)
val wBlockBCs = (0 until lambdas.length).map { l =>
// This is b*k
val wBlockOld = if (pass == 0) {
DenseMatrix.zeros[Double](blockIdxs.size, nClasses)
} else {
wLocals(l)(block)
}
val lhs = kernelBlockBlockMat +
DenseMatrix.eye[Double](kernelBlockBlockMat.rows) * lambdas(l)
// Subtract out K_bb * W_bb from residual
val rhs = y_bb - (residuals(l) - kernelBlockBlockMat.t * wBlockOld)
val wBlockNew = lhs \ rhs
wLocals(l)(block) = wBlockNew
val wBlockOldBC = labels.context.broadcast(wBlockOld)
val wBlockNewBC = labels.context.broadcast(wBlockNew)
(wBlockOldBC, wBlockNewBC)
}
val localSolveEnd = System.nanoTime
var newModel = updateModel(
model, wBlockBCs.map(_._2), blockIdxsBC, preFixLengthsBC).cache()
// This is to truncate the lineage every 50 blocks
if (labels.context.getCheckpointDir.isDefined &&
block % blocksBeforeCheckpoint == (blocksBeforeCheckpoint - 1)) {
newModel = MatrixUtils.truncateLineage(newModel, false)
}
// materialize the new model
newModel.count()
model.unpersist(true)
model = newModel
val updateEnd = System.nanoTime
logInfo(s"EPOCH_${pass}_BLOCK_${block} took " +
((System.nanoTime - blockBegin)/1e9) + " seconds")
logInfo(s"EPOCH_${pass}_BLOCK_${block} " +
s"kernelGen: ${(kernelGenEnd - blockBegin)/1e9} " +
s"residual: ${(residualEnd - kernelGenEnd)/1e9} " +
s"collect: ${(collectEnd - residualEnd)/1e9} " +
s"localSolve: ${(localSolveEnd - collectEnd)/1e9} " +
s"modelUpdate: ${(updateEnd - localSolveEnd)/1e9}")
trainKernelMat.unpersist(blockIdxsSeq)
wBlockBCs.map { case (wBlockOldBC, wBlockNewBC) =>
wBlockOldBC.unpersist(true)
wBlockNewBC.unpersist(true)
}
blockIdxsBC.unpersist(true)
}
}
labelsMat.unpersist(true)
preFixLengthsBC.unpersist(true)
wLocals(0)
}
def updateModel(
model: RDD[IndexedSeq[DenseMatrix[Double]]],
wBlockNewBC: Seq[Broadcast[DenseMatrix[Double]]],
blockIdxsBC: Broadcast[Array[Int]],
preFixLengthsBC: Broadcast[Array[Int]]): RDD[IndexedSeq[DenseMatrix[Double]]] = {
val newModel = model.mapPartitionsWithIndex { case (idx, part) =>
// note that prefix length is *not* cumsum (so the first entry is 0)
val partBegin = preFixLengthsBC.value(idx)
if (part.hasNext) {
val wParts = part.next
assert(part.isEmpty)
wParts.zipWithIndex.foreach { case (wPart, idx) =>
val partLength = wPart.rows
// [partBegin, partBegin + partLength)
// [blockIdxs[0], blockIdxs[-1]]
//
// To compute which indices of the model to update, we take two
// ranges, map them both to example space ([nTrain]) so that we can
// intersect them, and then map the intersection back the delta model
// index space and partition index space.
val responsibleRange = (partBegin until (partBegin + partLength)).toSet
val inds = blockIdxsBC.value.zipWithIndex.filter { case (choice, _) =>
responsibleRange.contains(choice)
}
val partInds = inds.map(x => x._1 - partBegin).toSeq
val blockInds = inds.map(x => x._2).toSeq
val wBlockNewBCvalue = wBlockNewBC(idx).value
wPart(partInds, ::) := wBlockNewBCvalue(blockInds, ::)
}
Iterator.single(wParts)
} else {
Iterator.empty
}
}
newModel
}
}
| tomerk/keystone | src/main/scala/nodes/learning/KernelRidgeRegression.scala | Scala | apache-2.0 | 10,051 |
package slack
object IncomingWebhookClient {
import play.api._
import play.api.mvc._
import play.api.libs.ws._
import play.api.libs.json._
val WebhookUrl = "https://hooks.slack.com/services/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"
def sendMessage(message:String, channel:Option[String]) {
var data = Map(
"username" -> "ChASM BOT",
"text" -> message
)
if(channel.isDefined) {
data += "channel" -> channel.get
}
var json = Json.toJson(data.toMap)
var request = WS.url(WebhookUrl).post(json)
}
def directMessage(username:String, message:String) {
sendMessage(message, Some(s"@$username"))
}
def postInChannel(message:String, channel:String = null) {
sendMessage(message, Some(channel))
}
} | conor-pappas/chasm_bot | app/value_objects/slack/IncomingWebhookClient.scala | Scala | mit | 772 |
package com.twitter.finatra.tests.json.internal.caseclass.validation.validators
import com.twitter.finatra.json.internal.caseclass.validation.validators.UUIDValidator
import com.twitter.finatra.validation.ValidationResult._
import com.twitter.finatra.validation.{UUID, ValidationResult, ValidatorTest}
import java.util.{UUID => JUUID}
case class UUIDExample(
@UUID uuid: String)
class UUIDValidatorTest extends ValidatorTest {
"uuid validator" should {
"pass validation for valid value" in {
val value = JUUID.randomUUID().toString
validate[UUIDExample](value) should equal(valid)
}
"fail validation for invalid value" in {
val value = "bad uuid"
validate[UUIDExample](value) should equal(
invalid(
errorMessage(value)))
}
}
private def validate[C : Manifest](value: String): ValidationResult = {
super.validate(manifest[C].runtimeClass, "uuid", classOf[UUID], value)
}
private def errorMessage(value: String) = {
UUIDValidator.errorMessage(messageResolver, value)
}
}
| tempbottle/finatra | jackson/src/test/scala/com/twitter/finatra/tests/json/internal/caseclass/validation/validators/UUIDValidatorTest.scala | Scala | apache-2.0 | 1,057 |
package clean.tex
import java.io.File
import clean.lib._
import ml.Pattern
import ml.classifiers._
import util.{Datasets, Stat, Tempo}
object metaEscolheAlgPCadaStratSoPCT extends AppWithUsage with LearnerTrait with StratsTrait with RangeGenerator with Rank with MetaTrait {
lazy val arguments = superArguments ++
List("learners:nb,5nn,c45,vfdt,ci,...|eci|i|ei|in|svm", "rank", "ntrees", "vencedorOuPerdedor(use1):1|-1", "runs", "folds", "ini", "fim", "porPool:p", "guardaSohRank:true|false")
val context = this.getClass.getName.split('.').last.dropRight(1)
val dedup = false
val measure = ALCKappa
val dsminSize = 100
//n=2 estraga stats
val n = 1
run()
override def run() = {
super.run()
if (guardaSohRank) println(s"Guardanado sรณ rank!")
if (porPool && (rus != 1 || ks != 90 || !porRank)) justQuit("porPool ativado com parametros errados!")
val ls = learners(learnersStr)
val metaclassifs = (patts: Vector[Pattern]) => if (porRank) Vector()
else Vector(//NB nรฃo funciona porque quebra na discretizaรงรฃo
PCT()
)
println(s"${metaclassifs} <- metaclassifs")
val leastxt = learnerStr
stratsTexForGraficoComplexo foreach { strat =>
Tempo.start
val stratName = strat(NoLearner()).limp
val pares = for {l <- ls} yield strat -> l
val arq = s"/home/davi/wcs/arff/$context-$porPool-n${if (porRank) 1 else n}best${criterio}m$measure-$ini.$fim-${stratName + (if (porRank) "Rank" else "")}-${leastxt.replace(" ", ".")}-U$dsminSize.arff"
val labels = pares.map { case (s, l) => s(l).limpa }
val labelsleas = ls.map {
_.limpa
}
//cada dataset produz um bag de metaexemplos (|bag| >= 25)
println(s"${datasets.size} <- dssss.size")
def bagsNaN = datasets.par map { d =>
val ds = Ds(d, readOnly = true)
ds.open()
val (ti0, th0, tf0, tpass) = ranges(ds)
val ti = ini match {
case "ti" => ti0
case "th" => th0 + 1
}
val tf = fim match {
case "th" => th0
case "tf" => tf0
}
val res = for {
r <- 0 until runs
f <- 0 until folds
} yield {
//descobre vencedores deste pool
val accs = pares map { case (s, l) =>
// p -> measure(ds, Passive(Seq()), ds.bestPassiveLearner, r, f)(ti,tf).read(ds).getOrElse(error("sem medida"))
(s(NoLearner()).limpa, l.limpa) -> measure(ds, s(l), l, r, f)(ti, tf).read(ds).getOrElse(justQuit("sem medida"))
}
//gera metaexemplos
// "\\"#classes\\",\\"#atributos\\",\\"#exemplos\\"," +
// "\\"#exemplos/#atributos\\",\\"%nominais\\",\\"log(#exs)\\",\\"log(#exs/#atrs)\\"," +
// "skewnessesmin,skewavg,skewnessesmax,skewnessesminByskewnessesmax," +
// "kurtosesmin,kurtavg,kurtosesmax,kurtosesminBykurtosesmax," +
// "nominalValuesCountmin,nominalValuesCountAvg,nominalValuesCountmax,nominalValuesCountminBynominalValuesCountmax," +
// "mediasmin,mediasavg,mediasmax,mediasminBymediasmax," +
// "desviosmin,desviosavg,desviosmax,desviosminBydesviosmax," +
// "entropiasmin,entropiasavg,entropiasmax,entropiasminByentropiasmax," +
// "correlsmin,correlsavg,correlsmax,correlsminBycorrelsmax,correleucmah,correleucman,correlmanmah","AH-conect.-Y", "AH-Dunn-Y", "AH-silhueta-Y", "AH-conect.-1.5Y", "AH-Dunn-1.5Y", "AH-silhueta-1.5Y",
// "AH-conect.-2Y", "AH-Dunn-2Y", "AH-silhueta-2Y", "kM-conect.-Y", "kM-Dunn-Y", "kM-silhueta-Y", "kM-conect.-1.5Y", "kM-Dunn-1.5Y",
// "kM-silhueta-1.5Y", "kM-conect.-2Y", "kM-Dunn-2Y", "kM-silhueta-2Y"
//FS nรฃo ajudou, mesmo roubando assim:
val metaatts0 = ds.metaAttsrf(r, f, suav = false).map(x => (x._1, x._2.toString, x._3)) ++ ds.metaAttsFromR(r, f).map(x => (x._1, x._2.toString, x._3))
val metaatts = ("\\"bag_" + pares.size + "\\"", ds.dataset, "string") +: metaatts0
val insts = if (porRank) {
//rank legivel por clus e ELM
List(metaatts ++ ranqueia(accs.map(_._2)).zipWithIndex.map { case (x, i) => (s"class$i", x.toString, "numeric") } -> "")
} else {
//Acc
val melhores = pegaMelhores(accs, n)(_._2 * criterio).map(_._1) //nรฃo preciso me preocupar com "sรณ pode ter 25 em cada bag de teste", pois nรฃo รฉ ranking
melhores map (m => metaatts -> (m._1 + "-" + m._2))
}
//insts.map(x => x._1.tail.map(x => (x._2.toDouble * 100).round / 100d).mkString(" ")) foreach println
// println(s"$r $f")
insts
}
ds.close()
res.flatten
}
def bagsCbases = bagsNaN
if (!new File(arq).exists()) grava(arq, arff(labels.mkString(","), bagsCbases.toList.flatten, print = true, context, porRank))
val patterns = Datasets.arff(arq, dedup, rmuseless = false) match {
case Right(x) =>
//nรฃo consegui descobrir como aumentar qtd de atributos no weka (vai ficar sem atts desvio.
val osbags = x.groupBy(_.base).map(_._2)
val ps = (osbags map meanPattern(porRank)).toVector
patts2file(ps, arq + ".arff")
if (porPool) osbags.flatten.toVector else ps
case Left(m) => error(s"${m} <- m")
}
if (porRank) print(s"$stratName Spearman correl. $rus*$ks-fold CV. " + arq + " ")
else print(s"$stratName Accuracy. $rus*$ks-fold CV. " + arq + " ")
val ra = if (porRank) "ra" else "ac"
val metads = new Db("metanew", readOnly = false)
metads.open()
// select ra,cr,i,f,st,ls,rs,fs,mc,nt,dsminsize from r
val sql69 = s"select mc from r where porPool='$porPool' and ra='$ra' and cr=$criterio and i='$ini' and f='$fim' and st='$stratName' and ls='$leastxt' and rs=$rus and fs=$ks and nt=$ntrees and dsminsize='$dsminSize'"
println(s"${sql69} <- sql69")
metads.readString(sql69) match {
// case x: List[Vector[String]] if x.map(_.head).intersect(metaclassifs(Vector()).map(_.limp)).size == 0 =>
case x: List[Vector[String]] if x.isEmpty | guardaSohRank | true=>
val cvs = cv(porPool, ini, fim, labelsleas, stratName, ntrees, patterns, metaclassifs, porRank, rus, ks, guardaSohRank).toVector
val cvsf = cvs.flatten
case x: List[Vector[String]] => println(s"${x} <- rows already stored")
}
metads.close()
println()
// if (!porRank) Datasets.arff(arq, dedup, rmuseless = false) match {
// case Right(x) => if (apenasUmPorBase) {
// val ps = (x.groupBy(_.base).map(_._2) map meanPattern(porRank)).toVector
// patts2file(ps, arq + "umPorBase")
// C45(laplace = false, 5, 1).tree(arq + "umPorBase.arff", arq + "umPorBase" + ".tex")
// println(s"${arq} <- arq")
// }
// }
}
}
}
/*
active-learning-scala: Active Learning library for Scala
Copyright (c) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
| active-learning/active-learning-scala | src/main/scala/clean/tex/metaEscolheAlgPCadaStratSoPCT.scala | Scala | gpl-2.0 | 7,904 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.io.PrintStream
import java.util.concurrent.CountDownLatch
import java.util.{Properties, Random}
import joptsimple._
import kafka.common.StreamEndException
import kafka.consumer._
import kafka.message._
import kafka.metrics.KafkaMetricsReporter
import kafka.utils._
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.errors.WakeupException
import org.apache.kafka.common.utils.Utils
import scala.collection.JavaConversions._
/**
* Consumer that dumps messages to standard out.
*/
object ConsoleConsumer extends Logging {
var messageCount = 0
private val shutdownLatch = new CountDownLatch(1)
def main(args: Array[String]) {
val conf = new ConsumerConfig(args)
try {
run(conf)
} catch {
case e: Throwable =>
error("Unknown error when running consumer: ", e)
System.exit(1);
}
}
def run(conf: ConsumerConfig) {
val consumer =
if (conf.useNewConsumer) {
val timeoutMs = if (conf.timeoutMs >= 0) conf.timeoutMs else Long.MaxValue
new NewShinyConsumer(Option(conf.topicArg), Option(conf.whitelistArg), getNewConsumerProps(conf), timeoutMs)
} else {
checkZk(conf)
new OldConsumer(conf.filterSpec, getOldConsumerProps(conf))
}
addShutdownHook(consumer, conf)
try {
process(conf.maxMessages, conf.formatter, consumer, conf.skipMessageOnError)
} finally {
consumer.cleanup()
reportRecordCount()
// if we generated a random group id (as none specified explicitly) then avoid polluting zookeeper with persistent group data, this is a hack
if (!conf.groupIdPassed)
ZkUtils.maybeDeletePath(conf.options.valueOf(conf.zkConnectOpt), "/consumers/" + conf.consumerProps.get("group.id"))
shutdownLatch.countDown()
}
}
def checkZk(config: ConsumerConfig) {
if (!checkZkPathExists(config.options.valueOf(config.zkConnectOpt), "/brokers/ids")) {
System.err.println("No brokers found in ZK.")
System.exit(1)
}
if (!config.options.has(config.deleteConsumerOffsetsOpt) && config.options.has(config.resetBeginningOpt) &&
checkZkPathExists(config.options.valueOf(config.zkConnectOpt), "/consumers/" + config.consumerProps.getProperty("group.id") + "/offsets")) {
System.err.println("Found previous offset information for this group " + config.consumerProps.getProperty("group.id")
+ ". Please use --delete-consumer-offsets to delete previous offsets metadata")
System.exit(1)
}
}
def addShutdownHook(consumer: BaseConsumer, conf: ConsumerConfig) {
Runtime.getRuntime.addShutdownHook(new Thread() {
override def run() {
consumer.stop()
shutdownLatch.await()
}
})
}
def process(maxMessages: Integer, formatter: MessageFormatter, consumer: BaseConsumer, skipMessageOnError: Boolean) {
while (messageCount < maxMessages || maxMessages == -1) {
messageCount += 1
val msg: BaseConsumerRecord = try {
consumer.receive()
} catch {
case nse: StreamEndException =>
trace("Caught StreamEndException because consumer is shutdown, ignore and terminate.")
// Consumer is already closed
return
case nse: WakeupException =>
trace("Caught WakeupException because consumer is shutdown, ignore and terminate.")
// Consumer will be closed
return
case e: Throwable =>
error("Error processing message, terminating consumer process: ", e)
// Consumer will be closed
return
}
try {
formatter.writeTo(msg.key, msg.value, System.out)
} catch {
case e: Throwable =>
if (skipMessageOnError) {
error("Error processing message, skipping this message: ", e)
} else {
// Consumer will be closed
throw e
}
}
checkErr(formatter)
}
}
def reportRecordCount() {
System.err.println(s"Processed a total of $messageCount messages")
}
def checkErr(formatter: MessageFormatter) {
if (System.out.checkError()) {
// This means no one is listening to our output stream any more, time to shutdown
System.err.println("Unable to write to standard out, closing consumer.")
formatter.close()
System.exit(1)
}
}
def getOldConsumerProps(config: ConsumerConfig): Properties = {
val props = new Properties
props.putAll(config.consumerProps)
props.put("auto.offset.reset", if (config.fromBeginning) "smallest" else "largest")
props.put("zookeeper.connect", config.zkConnectionStr)
if (!config.options.has(config.deleteConsumerOffsetsOpt) && config.options.has(config.resetBeginningOpt) &&
checkZkPathExists(config.options.valueOf(config.zkConnectOpt), "/consumers/" + props.getProperty("group.id") + "/offsets")) {
System.err.println("Found previous offset information for this group " + props.getProperty("group.id")
+ ". Please use --delete-consumer-offsets to delete previous offsets metadata")
System.exit(1)
}
if (config.options.has(config.deleteConsumerOffsetsOpt))
ZkUtils.maybeDeletePath(config.options.valueOf(config.zkConnectOpt), "/consumers/" + config.consumerProps.getProperty("group.id"))
if (config.timeoutMs >= 0)
props.put("consumer.timeout.ms", config.timeoutMs.toString)
props
}
def getNewConsumerProps(config: ConsumerConfig): Properties = {
val props = new Properties
props.putAll(config.consumerProps)
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, if (config.options.has(config.resetBeginningOpt)) "earliest" else "latest")
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, config.bootstrapServer)
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, if (config.keyDeserializer != null) config.keyDeserializer else "org.apache.kafka.common.serialization.ByteArrayDeserializer")
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, if (config.valueDeserializer != null) config.valueDeserializer else "org.apache.kafka.common.serialization.ByteArrayDeserializer")
props
}
class ConsumerConfig(args: Array[String]) {
val parser = new OptionParser
val topicIdOpt = parser.accepts("topic", "The topic id to consume on.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val whitelistOpt = parser.accepts("whitelist", "Whitelist of topics to include for consumption.")
.withRequiredArg
.describedAs("whitelist")
.ofType(classOf[String])
val blacklistOpt = parser.accepts("blacklist", "Blacklist of topics to exclude from consumption.")
.withRequiredArg
.describedAs("blacklist")
.ofType(classOf[String])
val zkConnectOpt = parser.accepts("zookeeper", "REQUIRED: The connection string for the zookeeper connection in the form host:port. " +
"Multiple URLS can be given to allow fail-over.")
.withRequiredArg
.describedAs("urls")
.ofType(classOf[String])
val consumerConfigOpt = parser.accepts("consumer.config", "Consumer config properties file.")
.withRequiredArg
.describedAs("config file")
.ofType(classOf[String])
val messageFormatterOpt = parser.accepts("formatter", "The name of a class to use for formatting kafka messages for display.")
.withRequiredArg
.describedAs("class")
.ofType(classOf[String])
.defaultsTo(classOf[DefaultMessageFormatter].getName)
val messageFormatterArgOpt = parser.accepts("property")
.withRequiredArg
.describedAs("prop")
.ofType(classOf[String])
val deleteConsumerOffsetsOpt = parser.accepts("delete-consumer-offsets", "If specified, the consumer path in zookeeper is deleted when starting up")
val resetBeginningOpt = parser.accepts("from-beginning", "If the consumer does not already have an established offset to consume from, " +
"start with the earliest message present in the log rather than the latest message.")
val maxMessagesOpt = parser.accepts("max-messages", "The maximum number of messages to consume before exiting. If not set, consumption is continual.")
.withRequiredArg
.describedAs("num_messages")
.ofType(classOf[java.lang.Integer])
val timeoutMsOpt = parser.accepts("timeout-ms", "If specified, exit if no message is available for consumption for the specified interval.")
.withRequiredArg
.describedAs("timeout_ms")
.ofType(classOf[java.lang.Integer])
val skipMessageOnErrorOpt = parser.accepts("skip-message-on-error", "If there is an error when processing a message, " +
"skip it instead of halt.")
val csvMetricsReporterEnabledOpt = parser.accepts("csv-reporter-enabled", "If set, the CSV metrics reporter will be enabled")
val metricsDirectoryOpt = parser.accepts("metrics-dir", "If csv-reporter-enable is set, and this parameter is" +
"set, the csv metrics will be outputed here")
.withRequiredArg
.describedAs("metrics directory")
.ofType(classOf[java.lang.String])
val useNewConsumerOpt = parser.accepts("new-consumer", "Use the new consumer implementation.")
val bootstrapServerOpt = parser.accepts("bootstrap-server")
.withRequiredArg
.describedAs("server to connect to")
.ofType(classOf[String])
val keyDeserializerOpt = parser.accepts("key-deserializer")
.withRequiredArg
.describedAs("deserializer for key")
.ofType(classOf[String])
val valueDeserializerOpt = parser.accepts("value-deserializer")
.withRequiredArg
.describedAs("deserializer for values")
.ofType(classOf[String])
if (args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "The console consumer is a tool that reads data from Kafka and outputs it to standard output.")
var groupIdPassed = true
val options: OptionSet = tryParse(parser, args)
val useNewConsumer = options.has(useNewConsumerOpt)
// If using old consumer, exactly one of whitelist/blacklist/topic is required.
// If using new consumer, topic must be specified.
var topicArg: String = null
var whitelistArg: String = null
var filterSpec: TopicFilter = null
if (useNewConsumer) {
val topicOrFilterOpt = List(topicIdOpt, whitelistOpt).filter(options.has)
if (topicOrFilterOpt.size != 1)
CommandLineUtils.printUsageAndDie(parser, "Exactly one of whitelist/topic is required.")
topicArg = options.valueOf(topicIdOpt)
whitelistArg = options.valueOf(whitelistOpt)
} else {
val topicOrFilterOpt = List(topicIdOpt, whitelistOpt, blacklistOpt).filter(options.has)
if (topicOrFilterOpt.size != 1)
CommandLineUtils.printUsageAndDie(parser, "Exactly one of whitelist/blacklist/topic is required.")
topicArg = options.valueOf(topicOrFilterOpt.head)
filterSpec = if (options.has(blacklistOpt)) new Blacklist(topicArg) else new Whitelist(topicArg)
}
val consumerProps = if (options.has(consumerConfigOpt))
Utils.loadProps(options.valueOf(consumerConfigOpt))
else
new Properties()
val zkConnectionStr = options.valueOf(zkConnectOpt)
val fromBeginning = options.has(resetBeginningOpt)
val skipMessageOnError = if (options.has(skipMessageOnErrorOpt)) true else false
val messageFormatterClass = Class.forName(options.valueOf(messageFormatterOpt))
val formatterArgs = CommandLineUtils.parseKeyValueArgs(options.valuesOf(messageFormatterArgOpt))
val maxMessages = if (options.has(maxMessagesOpt)) options.valueOf(maxMessagesOpt).intValue else -1
val timeoutMs = if (options.has(timeoutMsOpt)) options.valueOf(timeoutMsOpt).intValue else -1
val bootstrapServer = options.valueOf(bootstrapServerOpt)
val keyDeserializer = options.valueOf(keyDeserializerOpt)
val valueDeserializer = options.valueOf(valueDeserializerOpt)
val formatter: MessageFormatter = messageFormatterClass.newInstance().asInstanceOf[MessageFormatter]
formatter.init(formatterArgs)
CommandLineUtils.checkRequiredArgs(parser, options, if (useNewConsumer) bootstrapServerOpt else zkConnectOpt)
if (options.has(csvMetricsReporterEnabledOpt)) {
val csvReporterProps = new Properties()
csvReporterProps.put("kafka.metrics.polling.interval.secs", "5")
csvReporterProps.put("kafka.metrics.reporters", "kafka.metrics.KafkaCSVMetricsReporter")
if (options.has(metricsDirectoryOpt))
csvReporterProps.put("kafka.csv.metrics.dir", options.valueOf(metricsDirectoryOpt))
else
csvReporterProps.put("kafka.csv.metrics.dir", "kafka_metrics")
csvReporterProps.put("kafka.csv.metrics.reporter.enabled", "true")
val verifiableProps = new VerifiableProperties(csvReporterProps)
KafkaMetricsReporter.startReporters(verifiableProps)
}
//Provide the consumer with a randomly assigned group id
if(!consumerProps.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG,"console-consumer-" + new Random().nextInt(100000))
groupIdPassed=false
}
def tryParse(parser: OptionParser, args: Array[String]) = {
try
parser.parse(args: _*)
catch {
case e: OptionException =>
Utils.croak(e.getMessage)
null
}
}
}
def checkZkPathExists(zkUrl: String, path: String): Boolean = {
try {
val zk = ZkUtils.createZkClient(zkUrl, 30 * 1000, 30 * 1000)
zk.exists(path)
} catch {
case _: Throwable => false
}
}
}
trait MessageFormatter {
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream)
def init(props: Properties) {}
def close() {}
}
class DefaultMessageFormatter extends MessageFormatter {
var printKey = false
var keySeparator = "\\t".getBytes
var lineSeparator = "\\n".getBytes
override def init(props: Properties) {
if (props.containsKey("print.key"))
printKey = props.getProperty("print.key").trim.toLowerCase.equals("true")
if (props.containsKey("key.separator"))
keySeparator = props.getProperty("key.separator").getBytes
if (props.containsKey("line.separator"))
lineSeparator = props.getProperty("line.separator").getBytes
}
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
if (printKey) {
output.write(if (key == null) "null".getBytes() else key)
output.write(keySeparator)
}
output.write(if (value == null) "null".getBytes() else value)
output.write(lineSeparator)
}
}
class NoOpMessageFormatter extends MessageFormatter {
override def init(props: Properties) {}
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {}
}
class ChecksumMessageFormatter extends MessageFormatter {
private var topicStr: String = _
override def init(props: Properties) {
topicStr = props.getProperty("topic")
if (topicStr != null)
topicStr = topicStr + ":"
else
topicStr = ""
}
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
val chksum = new Message(value, key).checksum
output.println(topicStr + "checksum:" + chksum)
}
}
| eljefe6a/kafka | core/src/main/scala/kafka/tools/ConsoleConsumer.scala | Scala | apache-2.0 | 16,060 |
package org.sbuild.plugins.aether
import de.tototec.sbuild._
/**
* Central configuration of the SBuild Aether Plugin.
* The Aether Plugin will register a `[[de.tototec.sbuild.SchemeHandler]]` under the name `[[Aether.schemeName schemeName]]`.
*
* The following settings are available:
* - `[[Aether.remoteRepos]]` - A list of remote repositories to use.
* - `[[Aether.schemeName]]` - The name of the registered scheme handler.
* - `[[Aether.scopeDeps]]` - Dependencies collections by scope, whereas the scope is an alias for the dependencies.
*
* For further documentation refer to the respective methods/fields.
*
* @param schemeName The name of the registered scheme handler.
* @param remoteRepos Remote repositories Aether will refer to, to resolve the requested dependencies.
* @param scopeDeps Dependencies collected by scope, whereas the scope is an alias for the dependencies.
*/
case class Aether(schemeName: String,
remoteRepos: Seq[Repository] = Seq(Repository.Central),
scopeDeps: Map[String, Seq[Dependency]] = Map(),
scopeExcludes: Map[String, Seq[Exclude]] = Map()) {
def addDeps(scope: String)(deps: Dependency*): Aether =
copy(scopeDeps = scopeDeps + (scope -> (scopeDeps.withDefault(scope => Seq())(scope) ++ deps)))
def addExcludes(scope: String)(excludes: Exclude*): Aether =
copy(scopeExcludes = scopeExcludes + (scope -> (scopeExcludes.withDefault(scope => Seq())(scope) ++ excludes)))
}
| SBuild-org/sbuild-aether-plugin | org.sbuild.plugins.aether/src/main/scala/org/sbuild/plugins/aether/Aether.scala | Scala | apache-2.0 | 1,498 |
object T9Spelling extends App {
val source = scala.io.Source.fromFile(args(0))
val lines = source.getLines.filter(_.length > 0)
lines.next //forget about the test cases number
var numpad = Array(
" ",
"",
"abc",
"def",
"ghi",
"jkl",
"mno",
"pqrs",
"tuv",
"wxyz"
)
var num_case = 0
//Reading the test cases
while(lines.hasNext){
var res = "";
var prev_char = 0
var test_case = lines.next
//For every character on the test case find its number on the keypad
for(i <- 0 to test_case.length - 1){
var char = test_case.charAt(i)
for(j <- 0 to numpad.length -1) {
if(numpad(j).indexOf(char) > -1) {
if (prev_char == j) res += " ";
var num_index = numpad(j).indexOf(char)
for(k <- 0 to num_index){
res += j
}
prev_char = j
}
}
}
num_case += 1
println("Case #" + num_case + ": " +res)
}
}
| DarthCharles/google-code-jam | solutions/t9-spelling/scala/tNine.scala | Scala | mit | 981 |
package controllers
import play.api.mvc.{ Action, Controller }
import play.api.mvc.WebSocket
import play.api.libs.iteratee.Enumerator
import play.api.libs.json.Reads
import play.api.libs.json.JsValue
import play.api.libs.iteratee.Iteratee
import play.api.libs.functional.syntax._
import play.api.libs.json.JsPath
import models.DAO
import models.MissedCall
import play.api.libs.json.JsError
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import java.sql.Timestamp
import play.api.libs.iteratee.Concurrent
import play.api.data.Form
import play.api.data.Forms._
import models.AppReg
import play.libs.Akka
import akka.actor.Props
import akka.pattern.ask
import scala.concurrent.Promise
import models.CallEventsActor
import models.Join
import models.Call
import akka.util.Timeout
import scala.concurrent.Future
import play.api.libs.json.Json
import models.CallTriggerActor
import models.Send
import models.CallFrom
import play.api.Logger
/**
* case classes representing the form data
*/
case class AddDeviceFormData(simNumber: String, phoneNumber: String)
case class RemoveDeviceFormData(phoneNumber: String)
case class MissedCallData(simNumber: String, phno: String)
object Application extends Controller with Secured {
/**
* creating actors
*/
val callEventsActor = Akka.system().actorOf(Props[CallEventsActor])
val callTriggerActor = Akka.system().actorOf(Props[CallTriggerActor])
/**
* index page
*/
def index = Action {
Ok(views.html.index("Welcome to Foo Service"))
}
/**
* Android App makes a post request to this action for App or device registration
*/
def registerApp = Action(parse.json) { request =>
/**
* log message
*/
Logger.info("register request from App")
(request.body \\ "simId").asOpt[String].map {simId =>{
println("simId "+simId)
/**
* check if this sim if registered
*/
if(DAO.isSimExists(simId.trim())) {
Logger.info(simId + " sim exists")
/**
* check if the sim is whitelisted
*/
if(DAO.isWhitelisted(simId)) {
/**
* send the status
*/
Ok(Json.toJson(Map("status" -> 0)))
}else {
/**
*white list the sim
*/
DAO.whitelist(simId)
/**
* send the status
*/
Ok(Json.toJson(Map("status" -> 1)))
}
}else {
Logger.info("sim: "+simId+" not registered")
/**
* send the status
*/
BadRequest(Json.toJson(Map("status" -> -1)))
}
}
}.getOrElse( BadRequest(Json.toJson(Map("status" -> -1))))
}
/**
* Android App does a post request to this action to unregister itself
* once the app is unregistered entry will be removed from whitelist
*/
def unregisterApp = Action(parse.json) { request =>
(request.body \\ "simId").asOpt[String].map {simId =>{
/**
* check if sim is added by the admin
*/
if(DAO.isSimExists(simId)) {
/**
* check is its whitelisted
*/
if(DAO.isWhitelisted(simId)) {
/**
* remove from white list
*/
DAO.blacklist(simId)
/**
* send the status
*/
Ok(Json.toJson(Map("status" -> 1)))
}else {
/**
* send the status
*/
Ok(Json.toJson(Map("status" -> 0)))
}
}else {
BadRequest(Json.toJson(Map("status" -> -1)))
}
}
}.getOrElse( BadRequest(Json.toJson(Map("status" -> -1))))
}
/**
* foo websocket talks to the Android app over WebSockets
* Android App continuosly sends the call information that it receives
* logs the call information to the database along with the timestamp
* later this is used to check is the call is done with in 2 minutes of time span
*/
def foo = WebSocket.async[JsValue] { implicit request =>
/**
* Implicit reads
*/
implicit val missedCallDataReads: Reads[MissedCallData] = (
(JsPath \\ "simNumber").read[String] and
(JsPath \\ "phno").read[String])(MissedCallData.apply _)
/**
* console message
*/
Logger.info("missed call manager websocket")
/**
* WebSocket.Async excepts a future
*/
Future {
/**
* read json messages using iteratee from the App
*/
val iteratee = Iteratee.foreach[JsValue] {
json =>
{
/**
*Send the Call Events actor json message
* this actor sends messages to Admin console
*/
callEventsActor ! Call(json)
/**
* parsing json data
*/
json.validate[MissedCallData].fold(
invalid = {
errors => println("errors")
},
valid = {
data =>
{
/**
* console message
*/
Logger.info("got a missed call from "+data.simNumber+" phno "+data.phno)
val date = new java.util.Date
val t = new Timestamp(date.getTime)
/**
* check if sim is registered
*/
val sim = DAO.isSimExists(data.simNumber)
if(sim){
/**
* write the missed call data to database
*/
DAO.saveMissedCall(MissedCall(data.simNumber, data.phno, t, None))
/**
* console message
*/
Logger.info("call from user logged "+data.phno)
}else {
/**
* console message
*/
Logger.info("call from user not logged " + data.phno)
}
/**
*
*/
callTriggerActor ! CallFrom(data.phno)
}
})
}
}
/**
*Empty enumerator as i, am not sending any message back to Android App
*/
val enumerator = Enumerator.empty[JsValue]
/**
* return iteratee and enumerator
*/
(iteratee, enumerator)
}
}
/**
* for using scala prefix notations
*/
import scala.concurrent.duration._
implicit val timeout = Timeout(1 hours)
/**
* Subscribe to missed call events
*/
def events = WebSocket.async[String] { implicit request =>
val future = (callEventsActor ? Join)
future.mapTo[(Iteratee[String, _], Enumerator[String])]
}
/**
* Add Device form
*/
val addDeviceForm = Form(
mapping(
"simId" -> nonEmptyText,
"phoneNumber" -> nonEmptyText
)(AddDeviceFormData.apply)(AddDeviceFormData.unapply) verifying("Sim Id or Phno already exists", data =>
!DAO.isAppReg(data.simNumber, data.phoneNumber)
)
)
/**
* add device action for displaying the add device from
*/
def addDevice = withAdmin { admin => implicit request =>
Ok(views.html.addDevice(addDeviceForm))
}
/**
* add device post action
*/
def addDevicePost = withAdmin { admin => implicit request =>
addDeviceForm.bindFromRequest().fold(
formWithErrors => {
BadRequest(views.html.addDevice(formWithErrors))
},
data => {
val date = new java.util.Date
val t = new Timestamp(date.getTime())
/**
* add the device along with timestamp (t)
*/
DAO.regApp(AppReg(data.simNumber, "+91"+data.phoneNumber, t))
Redirect(routes.Application.addDevice()).flashing("addSuccess" -> "addedDevice")
}
)
}
/**
* remove device form
*/
val removeDeviceForm = Form(
mapping(
"phoneNumber" -> nonEmptyText
)(RemoveDeviceFormData.apply)(RemoveDeviceFormData.unapply)
)
/**
* remove device action
*/
def removeDevice = withAdmin { admin => implicit request =>
Ok(views.html.removeDevice(removeDeviceForm))
}
/**
* remove device post action
*/
def removeDevicePost = withAdmin { admin => implicit request =>
removeDeviceForm.bindFromRequest().fold(
formWithErrors => {
BadRequest(views.html.removeDevice(removeDeviceForm))
},
data => {
/**
* unreg the app or device
*/
DAO.appUnReg(data.phoneNumber)
Redirect(routes.Application.removeDevice()).flashing("removeSuccess" -> "Device removed")
}
)
}
/**
* admin logout action
*/
def adminLogout = withAdmin { admin => implicit request =>
Redirect(routes.Auth.adminLogin()).withNewSession
}
/**
* admin home action
*/
def adminHome = withAdmin { admin => implicit request =>
Ok(views.html.adminHome())
}
/**
* user signin count down
*/
def userCountDown = withUserAsync { user => implicit request =>
/**
* insert the user entry (timestamp is provided by default and it is current timestamp)
*/
DAO.insertUserEntry(user.id.get)
/**
* pick a phone number from the whitelist to display to the user
*/
val phno = DAO.getPhnoFromWhiteList();
/**
* console message
*/
Logger.info(user.email+" phno user count down "+user.phno)
/**
* display the user count down page sending phno of callee
*/
Ok(views.html.userCountDown((phno, user.phno)))
}
/**
* this action is called from user count down
* this action checks if a call has been logged within 2 minutes of time span and then provides access
*/
def redirect = withUserAsync { user => implicit request =>
/**
* get phno of user using email
*/
val option = DAO.getPhno(user.email)
/**
* check if there is a call from this user's phno number within 2 minutes time span
*/
val call = option match {
/**
* notice 2 minutes here
*/
case Some(phno) => DAO.isMissedCallInInterval(user.id.get, phno, 2)
case None => false
}
if(!call) {
/**
* set the signin status to down (0)
*/
DAO.signinOff(user.email)
/**
* redirect to user login
*/
Redirect(routes.Auth.userLogin)
}else {
/**
* set the signin status to up (1)
*/
DAO.signinOff(user.email)
Redirect(routes.Application.userHome).withSession("callAuthed" -> user.email)
}
}
/**
* logout action
* notice it redirects to login page clearing the existing session
*/
def userLogout = withCallAuthedUser(user => implicit request => {
Redirect(routes.Auth.userLogin).withNewSession
})
/**
* user home action
*/
def userHome = withCallAuthedUser(user => implicit request => {
Ok(views.html.userHome())
}
)
/**
* Subscribe to get the events
* used to subscribe for events such as missed call from specific phno number
*/
def callTrigger(phno: String) = WebSocket.async[String]{ implicit request =>
//implicit val timeout = Timeout(60 minutes)
val future = callTriggerActor ? Send(phno)
future.mapTo[(Iteratee[String, _], Enumerator[String])]
}
} | pamu/FooService | FooService2/app/controllers/Application.scala | Scala | apache-2.0 | 11,668 |
package org.jetbrains.plugins.scala.lang.psi.impl.base
import com.intellij.lang.ASTNode
import com.intellij.openapi.util.TextRange
import com.intellij.psi.{PsiElement, PsiReference}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScBegin
import org.jetbrains.plugins.scala.lang.psi.api.base.ScEnd
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScClass
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScEndImpl.Name
import org.jetbrains.plugins.scala.lang.psi.impl.{ScalaPsiElementFactory, ScalaPsiElementImpl}
class ScEndImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScEnd with PsiReference {
override def begin: Option[ScBegin] = this.parentsInFile.findByType[ScBegin]
override def keyword: PsiElement = getFirstChild
override def tag: PsiElement = getLastChild
override def getName: String = tag.getText
override def setName(name: String): PsiElement = {
tag.replace(ScalaPsiElementFactory.createIdentifier(name).getPsi)
}
override def getReference: PsiReference = this
override def getElement: PsiElement = this
override def getRangeInElement: TextRange = tag.getTextRangeInParent
// Enable Rename and Find Usages, but don't highlight the reference as usage, SCL-19675
override def resolve(): PsiElement = if (!tag.isIdentifier) null else {
val target = ScalaPsiElementFactory.createScalaFileFromText(s"class ${Name}")(this).typeDefinitions.head
target.context = this
target
}
override def handleElementRename(newElementName: String): PsiElement = this
override def bindToElement(element: PsiElement): PsiElement = this
override def isReferenceTo(element: PsiElement): Boolean = false
override def isSoft: Boolean = true
override def getCanonicalText: String = "ScEnd"
override def toString: String = "End: " + getName
}
object ScEndImpl {
private final val Name = "ScEndTarget2cf17ff3b2a54d14b64914496f02dc65" // Random unique ID
object Target {
/** @return ScEnd element of the target */
def unapply(target: PsiElement): Option[ScEnd] = target match {
case target: ScClass if target.name == Name => Some(target.getContext.asInstanceOf[ScEnd])
case _ => None
}
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/ScEndImpl.scala | Scala | apache-2.0 | 2,255 |
// Copyright 2011 Kiel Hodges
package replicant.support
import replicant.ResponseFallback
private[support] sealed trait Response[Result] {
def value(fallback: ResponseFallback[Result]): Result
}
private case class ValueResponse[Result](result: () => Result) extends Response[Result] {
def value(fallback: ResponseFallback[Result]): Result = result()
}
private case class UnknownResponse[Result](description: String) extends Response[Result] {
def value(fallback: ResponseFallback[Result]): Result = fallback(description)
}
| greenbar/replicant | scala/src/main/scala/replicant/support/Response.scala | Scala | mit | 533 |
package walfie.gbf.raidfinder.client
import com.thoughtworks.binding.Binding._
import org.scalajs.dom
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
import walfie.gbf.raidfinder.protocol.BossName
object ViewModel {
trait Labeled {
def label: String
def id: String
}
sealed abstract class DialogTab(val label: String, val icon: String) extends Labeled {
def id: String = s"gbfrf-dialog__$label"
}
object DialogTab {
case object Follow extends DialogTab("Follow", "add")
case object Settings extends DialogTab("Settings", "settings")
val all: List[DialogTab] = List(Follow, Settings)
val fromString: String => Option[DialogTab] =
all.map(tab => tab.label -> tab).toMap.get _
}
sealed abstract class ImageQuality(val label: String, val suffix: String) extends Labeled {
def id: String = s"gbfrf-settings__image-quality--$label"
}
object ImageQuality {
case object Off extends ImageQuality("Off", "")
case object Low extends ImageQuality("Low", ":thumb")
case object High extends ImageQuality("High", ":small")
val Default = Off
val all = List(Off, Low, High)
val fromString: String => Option[ImageQuality] =
all.map(q => q.label -> q).toMap.get _
}
sealed abstract class TimeFormat(val label: String) extends Labeled {
def id: String = s"gbfrf-settings__time-format--$label"
}
object TimeFormat {
case object Relative extends TimeFormat("Relative")
case object TwelveHour extends TimeFormat("12H")
case object TwentyFourHour extends TimeFormat("24H")
val Default = Relative
val all: List[TimeFormat] = List(Relative, TwelveHour, TwentyFourHour)
val fromString: String => Option[TimeFormat] =
all.map(format => format.label -> format).toMap.get _
}
// TODO: Maybe put this somewhere else
private val StateStorageKey = "settings"
private val storage = dom.window.localStorage
def persistState(state: State): Unit = {
val jsString = js.JSON.stringify(state.toJsObject)
storage.setItem(StateStorageKey, jsString)
}
def loadState(): State = {
Option(storage.getItem(StateStorageKey)).map { jsString =>
val jsState = js.JSON.parse(jsString).asInstanceOf[JsState]
State.fromJsObject(jsState)
}.getOrElse(State())
}
case class State(
currentTab: Var[DialogTab] = Var(DialogTab.Follow),
imageQuality: Var[ImageQuality] = Var(ImageQuality.Default),
timeFormat: Var[TimeFormat] = Var(TimeFormat.Default),
showUserImages: Var[Boolean] = Var(false),
nightMode: Var[Boolean] = Var(false),
columnWidthScale: Var[Double] = Var(1.0)
) { state =>
def toJsObject: JsState = new JsState {
val currentTab: js.UndefOr[String] = state.currentTab.get.label
val imageQuality: js.UndefOr[String] = state.imageQuality.get.label
val timeFormat: js.UndefOr[String] = state.timeFormat.get.label
val showUserImages: js.UndefOr[Boolean] = state.showUserImages.get
val nightMode: js.UndefOr[Boolean] = state.nightMode.get
val columnWidthScale: js.UndefOr[Double] = state.columnWidthScale.get
}
}
object State {
def fromJsObject(jsState: JsState): State = State(
currentTab = Var(fromField(jsState.currentTab, DialogTab.fromString, DialogTab.Follow)),
imageQuality = Var(fromField(jsState.imageQuality, ImageQuality.fromString, ImageQuality.Default)),
timeFormat = Var(fromField(jsState.timeFormat, TimeFormat.fromString, TimeFormat.Default)),
showUserImages = Var(jsState.showUserImages.getOrElse(false)),
nightMode = Var(jsState.nightMode.getOrElse(false)),
columnWidthScale = Var(jsState.columnWidthScale.getOrElse(1.0))
)
}
private def fromField[T, U](jsField: js.UndefOr[T], f: T => Option[U], default: U): U = {
jsField.toOption.flatMap(f).getOrElse(default)
}
@ScalaJSDefined
trait JsState extends js.Object {
def currentTab: js.UndefOr[String]
def imageQuality: js.UndefOr[String]
def timeFormat: js.UndefOr[String]
def showUserImages: js.UndefOr[Boolean]
def nightMode: js.UndefOr[Boolean]
def columnWidthScale: js.UndefOr[Double]
}
}
| xheres/api-gbfraidfinder | client/src/main/scala/walfie/gbf/raidfinder/client/ViewModel.scala | Scala | mit | 4,228 |
package wtf.shekels.alice.als.db.objects
import reactivemongo.bson.{BSONDocument, BSONDocumentReader}
import sx.blah.discord.handle.obj.IChannel
/**
* @author alice
* @since 10/16/17.
*/
class Channel(val name: String,
val id: String,
val position: Int,
val topic: String,
val isPrivate: Boolean) {
def toBSON: BSONDocument = {
BSONDocument("id" -> id,
"name" -> name,
"topic" -> topic,
"private" -> isPrivate)
}
override def toString = s"$id - $name - $position - $topic"
implicit object ChannelReader extends BSONDocumentReader[Channel] {
override def read(bson: BSONDocument): Channel = {
def get[A](s: String) {
bson.getAs[A](s)
}
val channel: Option[Channel] = for {
name: String <- get[String]("name")
id: String <- get[String]("id")
position: Int <- get[String]("position")
topic: String <- get[String]("topic")
isPrivate: Boolean <- get[Boolean]("isPrivate")
} yield Option(new Channel(name, id, position, topic, isPrivate))
channel match {
case Some(c) => c
case None => null
}
}
}
}
object Channel {
implicit class ChannelImplicit(chan: IChannel) {
implicit val name: String = chan.getName
implicit val id: String = chan.getStringID
implicit val isPrivate: Boolean = chan.isPrivate
implicit val position: Int = if (isPrivate) 0 else chan.getPosition
implicit val topic: String = if (isPrivate || chan.getTopic.eq(null)) "" else chan.getTopic
implicit val channel: Channel = new Channel(name, id, position, topic, isPrivate)
implicit def toBSON: BSONDocument = channel.toBSON
}
}
| antflga/AL-S | src/main/scala/wtf.shekels.alice.als/db/objects/Channel.scala | Scala | gpl-3.0 | 1,670 |
/*
* Copyright (c) 2017 Lucas Satabin
*
* Licensed under the Apache License Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BAStep.IS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package toolxit
package util
import scala.annotation.tailrec
import scala.util.{
Try,
Success,
Failure
}
import java.io.{
File,
Reader,
FileReader,
BufferedReader,
LineNumberReader,
InputStreamReader
}
/** Set of standard enumerators. */
object Enumerator {
@inline
private def feedI[Elt, A](k: K[Elt, A], s: Stream[Elt]): Try[Iteratee[Elt, A]] =
k(s).flatMap { case (it, _) => Try(it) }
/** Feeds an end-of-stream to the iteratee. */
def eos[Elt, A]: Enumerator[Elt, A] = {
case Cont(None, k) => feedI(k, Eos(None)).flatMap {
case i @ Done(_) => Try(i)
case Cont(None, _) => Try(throwError(exnDivergent))
case Cont(Some(e), _) => Try(throwError(e))
}
case i => Try(i)
}
/** Feeds an error to the iteratee. */
def err[Elt, A](e: Exception): Enumerator[Elt, A] = {
case Cont(None, k) => feedI(k, Eos(Some(e))).flatMap {
case i @ Done(_) => Try(i)
case Cont(None, _) => Try(throwError(exnDivergent))
case Cont(Some(e), _) => Try(throwError(e))
}
case i => Try(i)
}
/** Feeds a string to the iteratee. */
def string[A](in: String): Enumerator[Char, A] = {
case Cont(None, k) => feedI(k, Chunk(in.toVector))
case i => Try(i)
}
/** Feeds a sequence to the iteratee. */
def seq[T, A](s: Seq[T]): Enumerator[T, A] = {
case Cont(None, k) => feedI(k, Chunk(s))
case i => Try(i)
}
/** Feeds the iteratee with the string content of a reader, line by line.
* New lines are represented by the `\\n` character and spaces at the end of lines are trimmed.
*/
def reader[A](reader: Reader, chunkSize: Int = 1024): Enumerator[(Char, Int, Int), A] = {
case Cont(None, k) =>
@tailrec
def loop(line: Int, reader: BufferedReader, k: K[(Char, Int, Int), A]): Try[Iteratee[(Char, Int, Int), A]] =
Try(reader.readLine()) match {
case Success(null) => feedI(k, Eos(None))
case Success(s) =>
feedI(k, Chunk(s.replaceAll("\\\\s*$", "\\n").zipWithIndex.map { case (c, idx) => (c, line, idx + 1) })) match {
case Success(Cont(None, k)) => loop(line + 1, reader, k)
case i => i
}
case Failure(e: Exception) => feedI(k, Eos(Some(e)))
case Failure(t) => throw t
}
def check(line: Int, reader: BufferedReader)(it: Iteratee[(Char, Int, Int), A]) = it match {
case Cont(None, k) => loop(line, reader, k)
case i => Try(i)
}
for {
r <- loop(1, new BufferedReader(reader, chunkSize), k)
_ <- Try(reader.close).orElse(Try(()))
} yield r
case i => Try(i)
}
/** Feeds the iteratee with the string content of a file, character by character. */
def textFile[A](file: File, chunkSize: Int = 1024): Enumerator[(Char, Int, Int), A] =
reader[A](new FileReader(file), chunkSize)
/** Feeds the iteratee with the string content of a resource, character by character. */
def resource[A](name: String, chunkSize: Int = 1024): Enumerator[(Char, Int, Int), A] =
reader(new InputStreamReader(getClass.getResourceAsStream(name)), chunkSize)
def env[A](env: TeXEnvironment): Enumerator[(Char, Option[String], Int, Int), A] = {
case it @ Cont(None, k) =>
@tailrec
def loop(k: K[(Char, Option[String], Int, Int), A]): Try[Iteratee[(Char, Option[String], Int, Int), A]] =
env.popInput() match {
case None =>
// no input left, end of the story
Try(it)
case Some((reader, name, None)) =>
// no line currently read by this reader, process next one
if (env.endinputEncountered) {
// actually we encountered an endinput command, close
// current input and process next open input
env.endinputEncountered = false
Try(reader.close()) match {
case Success(()) => loop(k)
case Failure(e: Exception) => feedI(k, Eos(Some(e)))
case Failure(t) => throw t
}
} else
// yes we really do want to read the next line
Try(reader.readLine()) match {
case Success(null) =>
// however none is left in the input, close and notify eos
Try(reader.close()) match {
case Success(()) => loop(k)
case Failure(e: Exception) => feedI(k, Eos(Some(e)))
case Failure(t) => throw t
}
case Success(s) =>
// there is something left to read in the current input!
// buffer the line and feed it character by character
// push the new buffered line with first character processed
// ToolXiT reads the line, remove trailing spaces and add a \\n in the end
// so a line is never empty
val line = s.replaceAll("\\\\s*$", "\\n")
env.pushInput(reader, name, Some(line -> 1))
feedI(k, Chunk(List((line(0), name, reader.getLineNumber, 1)))) match {
case Success(Cont(None, k)) => loop(k)
case i => i
}
case Failure(e: Exception) =>
env.pushInput(reader, name, None)
feedI(k, Eos(Some(e)))
case Failure(t) => throw t
}
case Some((reader, name, Some((line, col)))) =>
// currently reading a buffered line
if (env.endOfLineEncountered || col >= line.length) {
// but we encountered an end of line character, then drop it and goto next
// or we reached the end of line, goto next
env.endOfLineEncountered = false
env.pushInput(reader, name, None)
loop(k)
} else {
// feed with next character and continue processing line
env.pushInput(reader, name, Some(line -> (col + 1)))
feedI(k, Chunk(List((line(col), name, reader.getLineNumber, col + 1)))) match {
case Success(Cont(None, k)) => loop(k)
case i => i
}
}
}
for {
r <- loop(k)
} yield r
case i => Try(i)
}
}
| satabin/toolxit-ng | core/src/main/scala/toolxit/util/Enumerator.scala | Scala | apache-2.0 | 7,079 |
/* Copyright 2015 White Label Personal Clouds Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package me.welcomer.framework.pico.dsl
import scala.concurrent.Future
import scala.concurrent.duration._
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import play.api.libs.json.JsObject
import me.welcomer.framework.pico.EventedEvent
import me.welcomer.framework.pico.EventedFunction
import me.welcomer.framework.pico.EventedMessage
import me.welcomer.framework.pico.EventedResult
import me.welcomer.framework.pico.PicoRulesetContainer
trait PicoRaiseRemoteEventDSL { this: Actor with ActorLogging =>
import akka.pattern.ask
import akka.util.Timeout
import context.dispatcher
def rulesetContainer: ActorRef
def raiseRemoteEvent(evented: EventedMessage): Unit = {
log.debug("[raiseRemoteEvent] {}", evented)
rulesetContainer ! PicoRulesetContainer.RaiseRemoteEvented(evented)
}
def raiseRemoteEventWithReplyTo(evented: EventedFunction): Future[EventedResult[_]] = {
// def raiseRemoteEventWithReplyTo(evented: EventedMessage): Future[EventedResult[_]] = {
log.debug("[raiseRemoteEventWithReplyTo] {}", evented)
// implicit val timeout: Timeout = evented match {
// case event: EventedEvent => 5.seconds // TODO: Figure a better way to handle timeouts for events.. (probably only matters once we have directives?)
// case func @ EventedFunction(module, _, _, _) => module.timeout
// }
implicit def timeout: Timeout = evented.module.timeout
(rulesetContainer ? PicoRulesetContainer.RaiseRemoteEventedWithReplyTo(evented)).mapTo[EventedResult[_]]
}
def raiseRemoteEvent(eventDomain: String, eventType: String, attributes: JsObject, entityId: String): Unit = {
raiseRemoteEvent(EventedEvent(eventDomain, eventType, attributes = attributes, entityId = Some(entityId)))
}
}
| welcomer/framework | src/main/scala/me/welcomer/framework/pico/dsl/PicoRaiseRemoteEventDSL.scala | Scala | apache-2.0 | 2,440 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package scaps.eclipse.core.adapters
import java.io.ByteArrayInputStream
import org.eclipse.core.resources.IResource
import org.eclipse.jdt.core.ICompilationUnit
import org.eclipse.jdt.core.IJavaElement
import org.eclipse.jdt.core.IJavaProject
import org.eclipse.jdt.core.IPackageFragmentRoot
import org.eclipse.jdt.internal.core.PackageFragment
import org.junit.Test
import org.scalaide.core.IScalaProject
import org.scalaide.core.testsetup.TestProjectSetup
import org.junit.After
import org.junit.Before
import org.junit.Assert._
import com.typesafe.scalalogging.StrictLogging
import org.eclipse.core.resources.ResourcesPlugin
import org.eclipse.jdt.internal.core.JavaProject
class ScapsAdapterIntegrationTest extends TestProjectSetup("simple-structure-builder") with StrictLogging {
@Test
def testProjectIndexing: Unit = {
val indexDir = ResourcesPlugin.getWorkspace.getRoot.getLocation.toOSString + "/testing/index"
logger.info(s"IndexDir: $indexDir")
val scapsAdapter = new ScapsAdapter(indexDir)
// setup
addSourceFile(project)("Calculator.scala", """
class Home {
def plus(num1: Int, num2: Int): Int = num1 + num2
}""")
val javaProject = project.javaProject
val classPath = extractClassPath(javaProject)
val projectSourceFragmentRoots = javaProject.getAllPackageFragmentRoots.filter(_.getKind == IPackageFragmentRoot.K_SOURCE).toList
val compilationUnits = projectSourceFragmentRoots.flatMap(findSourceFiles)
// SUT
val indexResetResult = scapsAdapter.indexReset
val indexResult = scapsAdapter.indexProject(classPath, compilationUnits)
val indexFinalizeResult = scapsAdapter.indexFinalize
val searchResult = scapsAdapter.search("Home")
// verify
assertTrue(indexResetResult.isRight)
assertTrue(indexResult.isRight)
assertTrue(indexFinalizeResult.isRight)
assertTrue(searchResult.isRight)
}
private def extractClassPath(javaProject: IJavaProject): List[String] = {
val resolvedClassPath = javaProject.getResolvedClasspath(true)
resolvedClassPath.map(_.getPath.toString).toList
}
private def findSourceFiles(fragmentRoot: IPackageFragmentRoot): Seq[ICompilationUnit] = {
def recursiveFindSourceFiles(javaElements: Array[IJavaElement]): Array[ICompilationUnit] = {
val packageFragments = javaElements.collect { case p: PackageFragment => p }
packageFragments.toList match {
case Nil => Array()
case _ =>
val elements = packageFragments.map { p => (p.getCompilationUnits, p.getChildren) }.unzip
val sourceFiles = elements._1.flatten
val subPackageFragments = elements._2.flatten
sourceFiles ++ recursiveFindSourceFiles(subPackageFragments)
}
}
recursiveFindSourceFiles(fragmentRoot.getChildren)
}
def addSourceFile(project: IScalaProject)(name: String, contents: String) = {
val folder = project.underlying.getFolder("src")
if (!folder.exists())
folder.create(IResource.NONE, true, null)
val file = folder.getFile(name)
if (!file.exists()) {
val source = new ByteArrayInputStream(contents.getBytes())
file.create(source, IResource.FORCE, null)
}
}
}
| flomerz/scala-ide-scaps | scala-ide-scaps-tests/src/scaps/eclipse/core/adapters/ScapsAdapterIntegrationTest.scala | Scala | mpl-2.0 | 3,414 |
package ucesoft.cbm.expansion
import javax.sound.sampled.AudioFormat
import javax.sound.sampled.DataLine
import javax.sound.sampled.SourceDataLine
import javax.sound.sampled.AudioSystem
import ucesoft.cbm.ChipID
class DigiMaxCart(digiAddress:Int) extends ExpansionPort {
val TYPE : ExpansionPortType.Value = ExpansionPortType.DIGIMAX
val name = "DigiMAX"
val EXROM = true
val GAME = true
val ROML = null
val ROMH = null
DigiMAX.enabled(true,false)
private[this] val soundData = Array(0,0,0,0)
@inline private def checkAddress(address:Int) : Boolean = (address & 0xFFFC) == digiAddress
final override def read(address: Int, chipID: ChipID.ID = ChipID.CPU) = {
if (checkAddress(address)) soundData(address & 3) else 0
}
final override def write(address: Int, value: Int, chipID: ChipID.ID = ChipID.CPU) : Unit = {
if (checkAddress(address)) {
val channel = address & 3
DigiMAX.selectChannel(channel)
DigiMAX.write(value)
}
}
override def eject : Unit = {
DigiMAX.enabled(false,false)
}
}
object DigiMAX {
private[this] var _enabled,enabledOnUserPort = false
private[this] final val DEFAULT_SAMPLE_RATE = 44100
private[this] var sampleRate = DEFAULT_SAMPLE_RATE
private[this] var lines : Array[SourceDataLine] = _
private[this] lazy val buffers = Array.ofDim[Byte](4,256)
private[this] lazy val pos = Array.ofDim[Int](4)
private[this] var channel = 0
private def createLines(fHz:Int) = {
(for(i <- 0 to 3) yield {
val af = new AudioFormat(fHz,8,1,false, false)
val dli = new DataLine.Info(classOf[SourceDataLine], af, fHz * 2)
val dataLine = try {
AudioSystem.getLine(dli).asInstanceOf[SourceDataLine]
}
catch {
case t:Throwable =>
null
}
if (dataLine != null) dataLine.open(dataLine.getFormat,fHz * 2)
dataLine
}).toArray
}
def getSampleRate : Int = sampleRate
def setSampleRate(fHz:Int) : Unit = {
sampleRate = fHz
if (lines != null) {
for(l <- lines) if (l != null) l.close
}
lines = createLines(fHz)
if (_enabled) for(l <- lines) if (l != null) l.start
}
def selectChannel(channel:Int) : Unit = {
this.channel = channel
}
def enabled = _enabled
def isEnabledOnUserPort = _enabled && enabledOnUserPort
def enabled(on:Boolean,enabledOnUserPort:Boolean = false) : Unit = {
_enabled = on
if (on && lines == null) setSampleRate(DEFAULT_SAMPLE_RATE)
this.enabledOnUserPort = enabledOnUserPort
if (lines != null) {
for (dl <- lines) {
if (dl != null) on match {
case true =>
dl.start
case false =>
dl.stop
}
}
}
}
def write(value:Int) : Unit = {
val buffer = buffers(channel)
buffer(pos(channel)) = value.asInstanceOf[Byte]
pos(channel) = pos(channel) + 1
if (pos(channel) == buffer.length) {
pos(channel) = 0
val dataLine = lines(channel)
if (dataLine != null) dataLine.write(buffer,0,buffer.length)
}
}
} | abbruzze/kernal64 | Kernal64/src/ucesoft/cbm/expansion/DigiMAX.scala | Scala | mit | 3,098 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.abstractnn.{Initializable, TensorModule}
import com.intel.analytics.bigdl.optim.Regularizer
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.{Shape, T, Table}
import scala.reflect.ClassTag
/**
* The `Linear` module applies a linear transformation to the input data,
* i.e. `y = Wx + b`. The `input` given in `forward(input)` must be either
* a vector (1D tensor) or matrix (2D tensor). If the input is a vector, it must
* have the size of `inputSize`. If it is a matrix, then each row is assumed to be
* an input sample of given batch (the number of rows means the batch size and
* the number of columns should be equal to the `inputSize`).
*
* @param inputSize the size the each input sample
* @param outputSize the size of the module output of each sample
* @param wRegularizer: instance of [[Regularizer]]
* (eg. L1 or L2 regularization), applied to the input weights matrices.
* @param bRegularizer: instance of [[Regularizer]]
* applied to the bias.
*/
@SerialVersionUID( 359656776803598943L)
class Linear[T: ClassTag](
val inputSize: Int,
val outputSize: Int,
val withBias: Boolean = true,
var wRegularizer: Regularizer[T] = null,
var bRegularizer: Regularizer[T] = null,
private val initWeight: Tensor[T] = null,
private val initBias: Tensor[T] = null,
private val initGradWeight: Tensor[T] = null,
private val initGradBias: Tensor[T] = null
)(implicit ev: TensorNumeric[T]) extends TensorModule[T] with Initializable {
val weight: Tensor[T] =
if (initWeight != null) initWeight else Tensor[T](outputSize, inputSize)
val bias: Tensor[T] =
if (initBias != null) initBias else if (withBias) Tensor[T](outputSize) else null
val addBuffer: Tensor[T] = Tensor[T]()
val gradWeight: Tensor[T] =
if (initGradWeight != null) initGradWeight else Tensor[T]()
val gradBias: Tensor[T] =
if (initGradBias != null) initGradBias else if (withBias) Tensor[T]() else null
{
val stdv = 1.0 / math.sqrt(weight.size(2))
val wInit: InitializationMethod = RandomUniform(-stdv, stdv)
val bInit: InitializationMethod = RandomUniform(-stdv, stdv)
setInitMethod(wInit, bInit)
}
override def reset(): Unit = {
if (initWeight == null) {
weightInitMethod.init(weight, VariableFormat.OUT_IN)
}
if (initBias == null) {
Option(bias).foreach(biasInitMethod.init(_, VariableFormat.ONE_D))
}
zeroGradParameters()
}
override def updateOutput(input: Tensor[T]): Tensor[T] = {
require(input.dim() == 1 || input.dim() == 2,
"Linear: " + ErrorInfo.constrainInputAsVectorOrBatch +
s"input dim ${input.dim()}")
if (input.dim() == 1) {
output.resize(Array(outputSize))
if (withBias) output.copy(bias) else output.zero()
output.addmv(ev.fromType[Int](1), weight, input)
}
else if (input.dim() == 2) {
val nFrame = input.size(1)
val nElement = output.nElement
val t = Array(nFrame, weight.size(1))
output.resize(t)
if (output.nElement() != nElement) {
output.zero()
}
if (addBuffer.nElement() != nFrame) {
addBuffer.resize(Array(nFrame)).fill(ev.one)
}
output.addmm(ev.zero, output, ev.one, input, weight.t)
if (withBias) output.addr(ev.one, addBuffer, bias)
}
output
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
require(input.dim() == 1 || input.dim() == 2,
"Linear: " + ErrorInfo.constrainInputAsVectorOrBatch +
s"input dim ${input.dim()}")
val nElement = gradInput.nElement()
gradInput.resizeAs(input)
if (nElement != gradInput.nElement()) {
gradInput.zero()
}
if (input.dim() == 1) {
gradInput.addmv(ev.fromType[Int](0), ev.fromType[Int](1), weight.t(), gradOutput)
} else if (input.dim() == 2) {
gradInput.addmm(ev.fromType[Int](0), ev.fromType[Int](1), gradOutput, weight)
}
gradInput
}
override def accGradParameters(input: Tensor[T], gradOutput: Tensor[T]): Unit = {
require(input.dim() == 1 || input.dim() == 2,
"Linear: " + ErrorInfo.constrainInputAsVectorOrBatch +
s"input dim ${input.dim()}")
gradWeight.resize(outputSize, inputSize)
if (withBias) {
gradBias.resize(outputSize)
}
if (input.dim() == 1) {
if (scaleW != 0) {
gradWeight.addr(ev.fromType[Double](scaleW), gradOutput, input)
}
if (withBias && scaleB != 0) {
gradBias.add(ev.fromType[Double](scaleB), gradOutput)
}
}
else if (input.dim() == 2) {
if (scaleW != 0) {
gradWeight.addmm(ev.fromType[Double](scaleW), gradOutput.t, input)
}
if (withBias && scaleB != 0) {
gradBias.addmv(ev.fromType[Double](scaleB), gradOutput.t, addBuffer)
}
}
if (null != wRegularizer && scaleW != 0) {
wRegularizer.accRegularization(weight, gradWeight, scaleW)
}
if (null != bRegularizer && scaleB != 0) {
bRegularizer.accRegularization(bias, gradBias, scaleB)
}
}
override def clearState() : this.type = {
super.clearState()
addBuffer.set()
this
}
override def parameters(): (Array[Tensor[T]], Array[Tensor[T]]) = {
if (null == bias) {
(Array(this.weight), Array(this.gradWeight))
} else {
(Array(this.weight, this.bias), Array(this.gradWeight, this.gradBias))
}
}
override def equals(obj: Any): Boolean = {
if (!super.equals(obj)) {
return false
}
if (!obj.isInstanceOf[Linear[T]]) {
return false
}
val other = obj.asInstanceOf[Linear[T]]
if (this.eq(other)) {
return true
}
gradWeight == other.gradWeight &&
gradBias == other.gradBias &&
weight == other.weight &&
bias == other.bias
}
override def hashCode() : Int = {
val seed = 37
var hash = super.hashCode()
hash = hash * seed + gradWeight.hashCode()
hash = hash * seed + gradBias.hashCode()
hash = hash * seed + weight.hashCode()
hash = hash * seed + bias.hashCode()
hash
}
override def toString(): String = {
s"${getPrintName}($inputSize -> $outputSize)"
}
}
object Linear extends quantized.Quantizable {
def apply[@specialized(Float, Double) T: ClassTag](
inputSize: Int,
outputSize: Int,
withBias: Boolean = true,
wRegularizer: Regularizer[T] = null,
bRegularizer: Regularizer[T] = null,
initWeight: Tensor[T] = null,
initBias: Tensor[T] = null,
initGradWeight: Tensor[T] = null,
initGradBias: Tensor[T] = null
)(implicit ev: TensorNumeric[T]) : Linear[T] = {
new Linear[T](inputSize, outputSize,
withBias, wRegularizer, bRegularizer, initWeight, initBias, initGradWeight, initGradBias)
}
override def quantize[T: ClassTag](module: Module[T])(
implicit ev: TensorNumeric[T]): Module[T] = {
val linear = module.asInstanceOf[Linear[T]]
val quantizedLinear = quantized.Linear[T](linear.weight.size(2), linear.weight.size(1),
initWeight = linear.weight, initBias = linear.bias)
quantizedLinear.setName(linear.getName())
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/Linear.scala | Scala | apache-2.0 | 7,957 |
package org.scalaide.core.sbtbuilder
import org.eclipse.core.resources.IProject
import org.eclipse.core.resources.IncrementalProjectBuilder
import org.eclipse.core.runtime.IPath
import org.eclipse.core.runtime.NullProgressMonitor
import org.eclipse.jdt.core.IClasspathEntry
import org.eclipse.jdt.core.IJavaModelMarker
import org.eclipse.jdt.core.IJavaProject
import org.eclipse.jdt.core.JavaCore
import org.junit.AfterClass
import org.junit.Assert
import org.junit.BeforeClass
import org.junit.Test
import org.scalaide.core.IScalaProject
import org.scalaide.core.SdtConstants
import org.scalaide.core.testsetup.IProjectHelpers
import org.scalaide.core.testsetup.IProjectOperations
import org.scalaide.core.testsetup.SDTTestUtils.createProjectInWorkspace
import org.scalaide.core.testsetup.SDTTestUtils.findProjectProblemMarkers
import org.scalaide.core.testsetup.SDTTestUtils.markersMessages
import org.scalaide.core.testsetup.SDTTestUtils.workspace
import org.scalaide.ui.internal.preferences.ScalaPluginSettings
import org.scalaide.util.eclipse.EclipseUtils
import org.scalaide.util.internal.SettingConverterUtil
import NameHashingVulnerabilityTest.project
object NameHashingVulnerabilityTest extends IProjectOperations {
import org.scalaide.core.testsetup.SDTTestUtils._
private val projectName = "nameHashingVulnerability"
private var project: IScalaProject = _
private val bundleName = "org.scala-ide.sdt.core.tests"
private def withSrcOutputStructure(project: IProject, jProject: IJavaProject): Seq[IClasspathEntry] = {
val mainSourceFolder = project.getFolder("/src/main")
val mainOutputFolder = project.getFolder("/target/main")
val testSourceFolder = project.getFolder("/src/test")
val testOutputFolder = project.getFolder("/target/test")
Seq(mainSourceFolder -> mainOutputFolder,
testSourceFolder -> testOutputFolder).map {
case (src, out) => JavaCore.newSourceEntry(
jProject.getPackageFragmentRoot(src).getPath,
Array[IPath](),
jProject.getPackageFragmentRoot(out).getPath)
}
}
@BeforeClass def setup(): Unit = {
initializeProjects(bundleName, Seq(projectName)) {
project = createProjectInWorkspace(projectName, withSrcOutputStructure _)
}
}
@AfterClass def cleanup(): Unit = {
EclipseUtils.workspaceRunnableIn(EclipseUtils.workspaceRoot.getWorkspace) { _ =>
project.underlying.delete( /* force = */ true, /* monitor = */ null)
}
}
}
class NameHashingVulnerabilityTest extends IProjectOperations with IProjectHelpers {
import NameHashingVulnerabilityTest._
private val On = true
private val Off = false
private val errorTypes = Array(SdtConstants.ProblemMarkerId, IJavaModelMarker.JAVA_MODEL_PROBLEM_MARKER)
@Test def shouldCorrectlyBuildProjectForAllPossibleSettingsOfNameHashingFlag(): Unit = {
givenCleanWorkspaceForProjects(project)
whenNameHashingIs(Off)(thenThereIsNoErrors)
whenNameHashingIs(On)(thenThereIsNoErrors)
}
private def thenThereIsNoErrors(): Unit = {
val errors = markersMessages(findProjectProblemMarkers(project, errorTypes: _*).toList)
Assert.assertTrue("no error expected: " + errors.mkString(", "), errors.isEmpty)
}
private def whenNameHashingIs(isOn: Boolean)(otherwise: => Unit): Unit = {
val nameHashingProperty = SettingConverterUtil.convertNameToProperty(ScalaPluginSettings.nameHashing.name)
project.storage.setValue(nameHashingProperty, isOn)
workspace.build(IncrementalProjectBuilder.CLEAN_BUILD, new NullProgressMonitor)
workspace.build(IncrementalProjectBuilder.FULL_BUILD, new NullProgressMonitor)
otherwise
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/sbtbuilder/NameHashingVulnerabilityTest.scala | Scala | bsd-3-clause | 3,651 |
package coursier.cli.internal
import java.io.File
import java.nio.file.{Path, Paths}
object PathUtil {
def isInPath(p: Path): Boolean = {
val p0 = p.toAbsolutePath.normalize
val pathValue = Option(System.getenv("PATH")).getOrElse("")
val pathEntries = pathValue.split(File.pathSeparator).filter(_.nonEmpty)
def pathDirs = pathEntries.iterator.map(Paths.get(_).toAbsolutePath.normalize)
pathDirs.exists { pathDir =>
p0.getNameCount == pathDir.getNameCount + 1 &&
p0.startsWith(pathDir)
}
}
}
| alexarchambault/coursier | modules/cli/src/main/scala/coursier/cli/internal/PathUtil.scala | Scala | apache-2.0 | 549 |
package eu.phisikus.plotka.examples.ricart.agrawala
import java.util.concurrent.{CountDownLatch, Executors}
import com.typesafe.scalalogging.Logger
import eu.phisikus.plotka.conf.providers.FileConfigurationProvider
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{FunSuite, Matchers}
class RicartAgrawalaNodeTest extends FunSuite with Eventually with Matchers {
private val logger = Logger("EntryPoint")
private val configuration1Provider = new FileConfigurationProvider(Some("node1/application"))
private val configuration2Provider = new FileConfigurationProvider(Some("node2/application"))
private val node1Conf = configuration1Provider.loadConfiguration
private val node2Conf = configuration2Provider.loadConfiguration
test("Two nodes should execute their critical sections") {
val executor = Executors.newFixedThreadPool(2)
val barrier = new CountDownLatch(2)
val testNode1 = new RicartAgrawalaNode(node1Conf, () => {
logger.info("First node: I've reached my critical section! ")
barrier.countDown()
})
val testNode2 = new RicartAgrawalaNode(node2Conf, () => {
logger.info("Second node: I did my job!")
barrier.countDown()
})
executor.execute(() => testNode1.start())
executor.execute(() => testNode2.start())
eventually(timeout(Span(5, Seconds)), interval(Span(300, Millis))) {
barrier.await()
}
testNode1.stop()
testNode2.stop()
}
}
| phisikus/plotka | examples/ricart-agrawala/src/test/scala/eu/phisikus/plotka/examples/ricart/agrawala/RicartAgrawalaNodeTest.scala | Scala | bsd-3-clause | 1,509 |
import sbt._
import Keys._
import com.typesafe.sbt.osgi.SbtOsgi._
object Build extends Build {
import BuildSettings._
import Dependencies._
override lazy val settings = super.settings :+ {
shellPrompt := { s => Project.extract(s).currentProject.id + " > " }
}
lazy val root = Project("root",file("."))
.aggregate(docs, srv)
.settings(basicSettings: _*)
.settings(noPublishing: _*)
lazy val srv = Project("srv", file("srv"))
.settings(siteSettings: _*)
.settings(resolvers ++= resolutionRepos)
.settings(javaOptions += "-Xmx4G")
.settings(libraryDependencies ++=
compile(akkaActor, sprayJson, sprayCan, sprayCaching, sprayRouting, gt, gtServices) ++
runtime(akkaSlf4j, logback) ++
test(specs2)
)
lazy val docs = Project("docs", file("docs"))
.settings(docsSettings: _*)
.settings(libraryDependencies ++= test(akkaActor, sprayJson, gtGeotools,gtJetty))
}
| geotrellis/geotrellis-site | service/project/Build.scala | Scala | apache-2.0 | 936 |
package org.fayalite.ui.app.comm
import org.scalajs.dom.{Event, MessageEvent, WebSocket}
import rx._
import rx.core.Obs
import scala.scalajs.js.Dynamic.{global => g}
import scala.scalajs.js._
import scala.util.Try
object PersistentWebSocket {
// val cookies = document.cookie
// case class Register(cookies: String)
val host = org.scalajs.dom.document.location.host
var pws = new PersistentWebSocket()
case class PRTemp(
)
def send(s: String) = pws.send(s)
/*
def sendPR(pr: ParseRequest) = {
import upickle._
send(write(pr))
}
*/
def getWSURI = {
val uri = "ws://" +
org.scalajs.dom.document.location.host
if (uri.endsWith("/")) uri.dropRight(1) else uri
}
}
// TODO : Switch to upickle once errors are resolved.
// def sendV(v: String) = sendKV("tab", v)
/* def sendKV(k: String, v: String, f: Dynamic => Unit = (d: Dynamic) => (),
seqKV: Seq[(String, String)] = Seq()): String = {
val id = Random.nextInt().toString
val kvStr = {Seq(("requestId", id)) ++ seqKV}.map{
case (ks,vs) => s""""$ks": "$vs""""}.mkString(",")
def send() = pws.ws.send(
s"""{"$k": "$v", $kvStr, "cookies":"${document.cookie}"}"""
)
if (pws.open) {
send()
}
else {
pws.ws.onopen = (e: Event) => {
pws.defaultOnOpen(e)
send()
}
}
// return future of function here on timeout also.
// set an obs on the future to terminate the obs on parsed message
val o: Obs = Obs(parsedMessage) {
Try {
if (parsedMessage().requestId.toString == id) {
f(parsedMessage())
}
}
}
id*/
class PersistentWebSocket(
wsUri: String = PersistentWebSocket.getWSURI
) {
val onOpen: Var[Event] = Var(null.asInstanceOf[Event])
val onClose: Var[Event] = Var(null.asInstanceOf[Event])
val onError: Var[Event] = Var(null.asInstanceOf[Event])
val message: Var[MessageEvent] = Var(null.asInstanceOf[MessageEvent])
val parsedMessage = Var(null.asInstanceOf[Dynamic])
val messageStr: Var[String] = Var(null.asInstanceOf[String])
var open = Var(false)
// haoyi li workbench
def mkSocket = socket() = new WebSocket(wsUri)
val socket = Var(new WebSocket(wsUri))
var toSend = Array[String]()
def send(s: String) = {
if (open()) {
Try {
socket().send(s)
}
} else toSend = toSend :+ s
}
/* val heartBeat = Input.heartBeat.foreach{
hb =>
Schema.TryPrintOpt{
// println("heartbeat sent" + open())
if (open()) send("heartbeat") //Window.metaData)
}
}*/
/*
val msgPrinter = Obs(messageStr, skipInitial = true) {
println("ws msg " + messageStr().slice(0, 100))
}*/
val socketWatch = Obs(socket) {
val ws = socket()
ws.onopen = (e: Event) => {onOpen() = e; open() = true; println("open") ;
// send("debug")
toSend.foreach{send}
toSend = Array()
}
ws.onclose = (e: Event) => {onClose() = e; open() = false; println("closed")}
ws.onerror = (e: Event) => {onError() = e; open() = false ; println("wserr" + e.toString)}
ws.onmessage = (me: MessageEvent) => {
Try {
message() = me
messageStr() = me.data.toString
parsedMessage() = JSON.parse(me.data.toString)
}
}
}
}
| ryleg/fayalite | src/main/scala/org/fayalite/sjs/comm/PersistentWebSocket.scala | Scala | mit | 3,361 |
package net.room271.wk4
object Exercises {
/**
* Run the tests via sbt:
*
* $ sbt
* $ test-only net.room271.wk4.ExercisesSpec
*/
/**
* The rules:
*
* Return a list for 0 to n where each item is the next number in the List, unless:
*
* it is divisible by 3 -> in which case return 'fizz'
* it is divisible by 5 -> return 'buzz'
* it is divisible by both -> return 'fizz buzz'
*
* e.g. fizzBuzz(3) would give List(1, 2, 'fizz')
*/
def fizzBuzz(n: Int): List[String] = {
val result = for (i <- (1 to n).toList) yield {
if (i % 3 ==0 && i % 5 == 0) "fizz buzz"
else if (i % 3 == 0) "fizz"
else if (i % 5 == 0) "buzz"
else i.toString
}
result.toList
}
/**
* Return only even numbers in a list
*
* Hint: use filter
*/
def filterEven(list: List[Int]): List[Int] = list.filter(_ % 2 == 0)
/**
* Return only odd numbers in a list
*
* Restriction: filter is not allowed this time!
*/
def filterOdd(list: List[Int]): List[Int] = for (l <- list if l % 2 != 0) yield l
/**
* Define a map function for List[Int]
*
* Note, you can use for .. yield (in fact, the two are equivalent)
*/
def map(list: List[Int], f: Int => Int): List[Int] =
for (l <- list) yield f(l)
/**
* Write a function which returns the intersection of two sets.
*
* The intersection is the subset of items which are in both sets
*
* Restriction: the intersect Set method is not allowed!
*/
def intersection(s1: Set[Int], s2: Set[Int]): Set[Int] = {
for (elem <- s1 if s2 contains elem) yield elem
}
}
| nicl/scala-school | src/main/scala/net/room271/wk4/Exercises.scala | Scala | gpl-3.0 | 1,642 |
package listV
//import reflect.Selectable.reflectiveSelectable
import scalaLibV.*
object sci {
val isEmpty = iftTrue
} | dotty-staging/dotty | tests/pos/i7711/crash2_1.scala | Scala | apache-2.0 | 122 |
//======================================================================================================================
// Facsimile: A Discrete-Event Simulation Library
// Copyright ยฉ 2004-2020, Michael J Allen.
//
// This file is part of Facsimile.
//
// Facsimile is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later
// version.
//
// Facsimile is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
// warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
// details.
//
// You should have received a copy of the GNU Lesser General Public License along with Facsimile. If not, see:
//
// http://www.gnu.org/licenses/lgpl.
//
// The developers welcome all comments, suggestions and offers of assistance. For further information, please visit the
// project home page at:
//
// http://facsim.org/
//
// Thank you for your interest in the Facsimile project!
//
// IMPORTANT NOTE: All patches (modifications to existing files and/or the addition of new files) submitted for
// inclusion as part of the official Facsimile code base, must comply with the published Facsimile Coding Standards. If
// your code fails to comply with the standard, then your patches will be rejected. For further information, please
// visit the coding standards at:
//
// http://facsim.org/Documentation/CodingStandards/
//======================================================================================================================
//======================================================================================================================
// Scala source file belonging to the org.facsim.collection.immutable.test package.
//======================================================================================================================
package org.facsim.collection.immutable.test
import org.facsim.collection.immutable.BinomialHeap
import org.scalacheck.Gen
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
import scala.annotation.tailrec
import scala.math.Ordering.Double
import org.scalatest.funspec.AnyFunSpec
// Disable test-problematic Scalastyle checkers.
//scalastyle:off scaladoc
//scalastyle:off public.methods.have.type
//scalastyle:off multiple.string.literals
//scalastyle:off magic.numbers
/** Test harness for the [[org.facsim.collection.immutable.BinomialHeap]] class. */
final class BinomialHeapTest
extends AnyFunSpec
with ScalaCheckPropertyChecks {
/** Implicit ordering for Double values. */
private implicit val doubleOrdering: Ordering[Double] = Double.TotalOrdering
/** Check that an empty heap responds as such.
*
* @tparam A Type of element being stored in the heap.
*
* @param h An empty heap which is to be verified.
*/
private def verifyEmptyHeap[A](h: BinomialHeap[A]): Unit = {
// Verify that the heap gives the correct replies for an empty heap.
assert(h.isEmpty === true)
assert(h.nonEmpty === false)
val em: Option[A] = None
assert(h.minimum === em)
val eh: Option[BinomialHeap[A]] = None
assert(h.removeMinimum === eh)
val (em2, eh2) = h.minimumRemove
assert(em2 === None)
assert(eh2.isEmpty)
() // Return unit to avoid "discarded non-unit value" compiler warning
}
/** Check that a heap with a single member responds as such.
*
* @param h A heap with a single member which is to be verified.
*
* @param a Value of the sole member of the heap.
*/
private def verifyOneMemberHeap[A](h: BinomialHeap[A], a: A): Unit = {
// Verify that the heap is not empty.
assert(h.isEmpty === false)
assert(h.nonEmpty === true)
// Finding the minimum should result in the specified value, wrapped in Some.
assert(h.minimum === Some(a))
// Removing the minimum should result in an empty heap.
val oh = h.removeMinimum
assert(oh !== None)
verifyEmptyHeap(oh.get)
// Finding and removing the minimum should result in a tuple of the specified value and an empty heap.
val omh = h.minimumRemove
val (min, eh) = omh
assert(min === Some(a))
verifyEmptyHeap(eh)
}
/** Check that a heap with any number of members correctly sorts those members.
*
* The heap must also contain the same number of elements, as well as have them in the same order.
*
* @tparam A Type of value being stored in the heap.
*
* @param h A heap to be verified.
*
* @param la Unsorted list of elements stored in the heap.
*
* @param ordering Ordering used for sorting values of type `A`.
*/
private def verifyMultiMemberHeap[A](h: BinomialHeap[A], la: List[A])(implicit ordering: Ordering[A]): Unit = {
// Helper function to check each value in turn.
@tailrec
def nextMinimum(rh: BinomialHeap[A], rla: List[A]): Unit = {
// If the heap is empty, then the list must be too. Verify that the heap is empty.
if(rh.isEmpty) {
assert(rla.isEmpty === true)
verifyEmptyHeap(rh)
}
// Otherwise, it must have a minimum value. Check that this is the expected minimum value.
else {
// Firstly, check that we have a matching value in the sorted list.
assert(rla.nonEmpty === true)
// Check that the minimum value is as expected.
val min = rh.minimum
assert(min === Some(rla.head))
// Retrieve the heap for the next iteration.
val nextH = rh.removeMinimum.get
// Get and remove the minimum, verifying the result matches the same information from previous sources.
val (min2, nextH2) = rh.minimumRemove
assert(min2 === min)
assert(nextH2 === nextH)
// Perform the next iteration.
nextMinimum(nextH, rla.tail)
}
}
// Start the ball rolling, by sorting the expected contents.
nextMinimum(h, la.sorted)
}
// Look at the companion element.
describe(BinomialHeap.getClass.getCanonicalName) {
// Create empty heap.
describe(".empty[A]") {
// Verify that an empty tree is empty.
it("must create an empty heap") {
verifyEmptyHeap(BinomialHeap.empty[Int])
}
}
// Test the apply method..
describe(".apply[A](A*)") {
// Verify that it can handle empty argument lists.
it("can create an empty heap") {
verifyEmptyHeap(BinomialHeap[Int]())
}
// Verify that it can handle a single argument.
it("can create a heap with one member") {
forAll {i: Int =>
verifyOneMemberHeap(BinomialHeap(i), i)
}
}
// Verify that it can handle arbitrary numbers of elements.
it("can create a heap with any number of members") {
forAll {li: List[Int] =>
verifyMultiMemberHeap(BinomialHeap(li: _*), li)
}
}
}
}
// Now for the class methods.
describe(classOf[BinomialHeap[_]].getCanonicalName) {
// Test the canEqual method.
describe(".canEqual(Any)") {
// Verify that it reports false for different types of object, including heaps of different types.
it("must reject objects of a different type") {
forAll {li: List[Int] =>
val h = BinomialHeap(li: _*)
assert(h.canEqual(li) === false, "Fails on List[Int] comparison")
forAll {i: Int =>
assert(h.canEqual(i) === false, "Fails on Int comparison")
}
forAll {d: Double =>
assert(h.canEqual(d) === false, "Fails on Double comparison")
}
forAll {s: String =>
assert(h.canEqual(s) === false, "Fails on String comparison")
}
forAll {ld: List[Double] =>
val hd = BinomialHeap(ld: _*)
assert(h.canEqual(hd) === false, "Fails on BinomialHeap[Double] comparison")
}
}
}
// Verify that it reports true for heaps of the same type.
it("must accept heaps of the same type") {
forAll {(l1: List[Int], l2: List[Int]) =>
val h1 = BinomialHeap(l1: _*)
val h2 = BinomialHeap(l2: _*)
assert(h1.canEqual(h2))
}
}
// Verify that it accepts itself.
it("must accept itself") {
forAll {li: List[Int] =>
val h = BinomialHeap(li: _*)
assert(h.canEqual(h) === true)
}
}
}
// Test the equals method.
describe(".equals(Any)") {
// Verify that it reports false for different types of object, including heaps of different types.
it("must reject objects of a different type") {
forAll {li: List[Int] =>
val h = BinomialHeap(li: _*)
assert(h.equals(li) === false, "Fails on List[Int] comparison")
forAll {i: Int =>
assert(h.equals(i) === false, "Fails on Int comparison")
}
forAll {d: Double =>
assert(h.equals(d) === false, "Fails on Double comparison")
}
forAll {s: String =>
assert(h.equals(s) === false, "Fails on String comparison")
}
forAll {ld: List[Double] =>
val hd = BinomialHeap(ld: _*)
assert(h.equals(hd) === false, "Fails on BinomialHeap[Double] comparison")
}
}
}
// Verify that it reports the correct result for heaps of the same type.
it("must compare heaps of the same type correctly") {
forAll {(l1: List[Int], l2: List[Int]) =>
val l1s = l1.sorted
val l2s = l2.sorted
val h1 = BinomialHeap(l1: _*)
val h2 = BinomialHeap(l2: _*)
assert(h1.equals(h2) === l1s.equals(l2s))
}
}
// Verify that it compares equal to itself.
it("must equal itself") {
forAll {li: List[Int] =>
val h = BinomialHeap(li: _*)
assert(h.equals(h) === true)
}
}
}
// Test the hashcode method.
describe(".hashCode") {
// Verify that it reports the same value for heaps that should compare equal.
it("must return the same value for heaps that compare equal") {
forAll {li: List[Int] =>
val h1 = BinomialHeap(li: _*)
val h2 = BinomialHeap(li.reverse: _*)
assert(h1 === h2)
assert(h1.hashCode === h2.hashCode)
}
}
// Verify that it reports reasonably unique values for each heap. This may fail due to pure chance, but it's
// highly unlikely if the hash function is any good.
it("must return reasonably unique values") {
def updateState(hashCodes: Set[Int], count: Int, heap: BinomialHeap[Int]): (Set[Int], Int) = {
(hashCodes + heap.hashCode, count + 1)
}
var state = (Set.empty[Int], 0) //scalastyle:ignore var.field
forAll {li: List[Int] =>
val h = BinomialHeap(li: _*)
state = updateState(state._1, state._2, h)
}
assert(state._1.size / state._2.toDouble >= 0.9)
}
}
// Test the member addition operator.
describe(".+(A)") {
// It must add new member to a heap, resulting in a new heap.
it("must accept a new member, resulting in a new heap") {
forAll {(e: Int, li: List[Int]) =>
val h = BinomialHeap(li: _*)
val newH = h + e
assert(h !== newH)
verifyMultiMemberHeap(newH, (e :: li).sorted)
}
}
}
// Test the heap merge operator.
describe(".++(BinomialHeap[A])") {
// One of the heaps is empty.
it("must handle empty heaps correctly") {
// Use positive numbers, as there is no generator for just numbers, right now.
forAll(Gen.nonEmptyListOf(Gen.posNum[Int])) {li =>
val h = BinomialHeap(li: _*)
val eh = BinomialHeap.empty[Int]
val newH1 = h ++ eh
assert(newH1 === h)
val newH2 = eh ++ h
assert(newH2 === h)
val newH3 = eh ++ eh
assert(newH3 === eh)
}
}
// It must create a new heap out of the initial two heaps.
it("must create a new heap from the initial two heaps") {
// Use positive numbers, as there is no generator for just numbers, right now.
forAll(Gen.nonEmptyListOf(Gen.posNum[Int]), Gen.nonEmptyListOf(Gen.posNum[Int])) {(l1, l2) =>
val h1 = BinomialHeap(l1: _*)
val h2 = BinomialHeap(l2: _*)
val newH = h1 ++ h2
val newL = l1 ::: l2
verifyMultiMemberHeap(newH, newL.sorted)
}
}
}
}
}
// Re-enable test-problematic Scalastyle checkers.
//scalastyle:on magic.numbers
//scalastyle:on multiple.string.literals
//scalastyle:on public.methods.have.type
//scalastyle:on scaladoc | MichaelJAllen/facsimile | facsimile-collection/src/test/scala/org/facsim/collection/immutable/test/BinomialHeapTest.scala | Scala | lgpl-3.0 | 12,901 |
package com.shellhive.angular.components
import biz.enef.angulate._
import biz.enef.angulate.core.Attributes
import biz.enef.angulate.core.Timeout
import biz.enef.angulate.core.JQLite
import scalajs.js
import org.scalajs.jquery._
/**
* @author Omar Castro <[email protected]>, 29-05-2016.
*/
class TipDirective($timeout: Timeout) extends Directive {
// the type of the scope object passed to postLink() and controller()
override type ScopeType = js.Dynamic
// the type of the controller instance passed to postLink() and controller()
override val restrict = "C"
override val scope = true
// -- or --
// override def template(element,attrs) = ...
// -- or --
// override val templateUrl = "/url"
// -- or --
// override def templateUrl(element,attrs) = ...
// -- or --
// override val scope = true
override def postLink(scope: ScopeType,
element: JQLite,
attrs: Attributes,
controller: ControllerType) = {
val jqueryElement = element.asInstanceOf[JQuery]
val hoverIn = (x: JQueryEventObject) => {
if(!js.isUndefined(scope.status) && !scope.status.noTooltip.asInstanceOf[Boolean]){
scope.showTooltip = true; scope.$digest()
}
null
}
val hoverOut = (x: JQueryEventObject) => {
if(js.typeOf(scope.showTooltip) == "boolean" && scope.showTooltip.asInstanceOf[Boolean]){
scope.showTooltip = false; scope.$digest()
}
null
}
jqueryElement.hover(hoverIn,hoverOut)
}
// override def compile(tElement: js.Dynamic, tAttrs: Attributes) : js.Any = ...
}
| OmarCastro/ShellHive-scala | client/src/main/scala/com/shellhive/angular/components/TipDirective.scala | Scala | mit | 1,659 |
package edu.berkeley.nlp.summ.preprocess
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import edu.berkeley.nlp.futile.LightRunner
import edu.berkeley.nlp.futile.classify.ClassifyUtils
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import edu.berkeley.nlp.futile.fig.basic.Indexer
import edu.berkeley.nlp.futile.syntax.Trees.PennTreeRenderer
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.entity.ConllDoc
import edu.berkeley.nlp.summ.data.DiscourseDepEx
import edu.berkeley.nlp.summ.LikelihoodAndGradientComputer
@SerialVersionUID(1L)
class EDUSegmenterSemiMarkovFeaturizer(val featIdx: Indexer[String],
val wrappedFeaturizer: EDUSegmenterFeaturizer) extends Serializable {
private def maybeAdd(feats: ArrayBuffer[Int], addToIndexer: Boolean, feat: String) {
if (addToIndexer) {
feats += featIdx.getIndex(feat)
} else {
val idx = featIdx.indexOf(feat)
if (idx != -1) {
feats += idx
}
}
}
def extractFeaturesCached(ex: DiscourseDepEx, addToIndexer: Boolean): Array[Array[Array[Array[Int]]]] = {
// Only featurize sentence-internal boundaries (sentence boundaries are trivially EDU segments)
if (ex.cachedEduSemiMarkovFeatures == null) {
ex.cachedEduSemiMarkovFeatures = extractFeatures(ex.conllDoc, addToIndexer)
}
ex.cachedEduSemiMarkovFeatures
}
def extractFeatures(doc: ConllDoc, addToIndexer: Boolean): Array[Array[Array[Array[Int]]]] = {
// Features on boundaries from the binary version
val wrappedFeats = wrappedFeaturizer.extractFeatures(doc, addToIndexer)
Array.tabulate(doc.numSents)(sentIdx => {
Array.tabulate(doc.words(sentIdx).size)(startIdx => {
Array.tabulate(doc.words(sentIdx).size + 1)(endIdx => {
if (endIdx > startIdx) {
extractFeatures(doc, sentIdx, startIdx, endIdx, wrappedFeats, addToIndexer)
} else {
Array[Int]()
}
})
})
})
}
private def extractFeatures(doc: ConllDoc, sentIdx: Int, startIdx: Int, endIdx: Int, wrappedFeats: Array[Array[Array[Int]]], addToIndexer: Boolean): Array[Int] = {
val feats = new ArrayBuffer[Int]
def add(feat: String) = maybeAdd(feats, addToIndexer, feat)
val bucketedLen = wrappedFeaturizer.bucket(endIdx - startIdx)
if (startIdx > 0) {
// Don't add these features because they'll be the end of some other span by definition
// feats ++= wrappedFeaturizer.extractFeatures(doc, sentIdx, endIdx - 1, addToIndexer)
} else {
add("StartSent,Len=" + bucketedLen)
}
if (endIdx < doc.words(sentIdx).size - 1) {
feats ++= wrappedFeats(sentIdx)(endIdx - 1)
} else {
add("EndSent,Len=" + bucketedLen)
if (startIdx == 0) {
add("WholeSent,Len=" + bucketedLen)
}
}
if (endIdx - startIdx == 1) {
add("SingleWord=" + doc.words(sentIdx)(startIdx))
} else if (endIdx - startIdx == 2) {
add("TwoWords,First=" + doc.words(sentIdx)(startIdx))
add("TwoWords,Second=" + doc.words(sentIdx)(startIdx+1))
}
// Look at first and last, also the context words
val beforePos = if (startIdx == 0) "<S>" else doc.pos(sentIdx)(startIdx-1)
val firstPos = doc.pos(sentIdx)(startIdx)
val lastPos = doc.pos(sentIdx)(endIdx - 1)
val afterPos = if (endIdx == doc.pos(sentIdx).size) "</S>" else doc.pos(sentIdx)(endIdx)
add("FirstLastPOS=" + firstPos + "-" + afterPos)
add("BeforeAfterPOS=" + beforePos + "-" + afterPos)
// add("BFLAPOS=" + beforePos + "-" + firstPos + "-" + lastPos + "-" + afterPos)
var dominatingConstituents = doc.trees(sentIdx).getAllConstituentTypes(startIdx, endIdx)
if (dominatingConstituents.isEmpty) {
dominatingConstituents = Seq("None")
} else {
// None of these dependency features seem to help
// // We have a valid span, fire features on dependencies
// val headIdx = doc.trees(sentIdx).getSpanHead(startIdx, endIdx)
//// add("HeadWord=" + doc.words(sentIdx)(headIdx))
// add("HeadPos=" + doc.pos(sentIdx)(headIdx))
// val parentIdx = doc.trees(sentIdx).childParentDepMap(headIdx)
// if (parentIdx == -1) {
// add("Parent=ROOT")
// } else {
//// add("ParentWord=" + doc.words(sentIdx)(parentIdx))
// add("ParentPos=" + doc.pos(sentIdx)(parentIdx))
// add("ParentDist=" + Math.signum(parentIdx - headIdx) + ":" + wrappedFeaturizer.bucket(parentIdx - headIdx))
// }
}
// Fire features on constituent labels (or None if it isn't a constituent)
for (constituent <- dominatingConstituents) {
add("DominatingConstituent=" + constituent)
add("DominatingConstituentLength=" + constituent + "-" + bucketedLen)
add("DominatingConstituentBefore=" + constituent + "-" + beforePos)
add("DominatingConstituentAfter=" + constituent + "-" + afterPos)
// This makes it way slower and doesn't help
// val maybeParent = doc.trees(sentIdx).getParent(startIdx, endIdx)
// if (!maybeParent.isDefined) {
// add("DominatingParent=None")
// } else {
// val (parent, childIdx) = maybeParent.get
// val childrenStr = (0 until parent.getChildren().size).map(i => (if (childIdx == i) ">" else "") + parent.getChildren().get(i).getLabel()).foldLeft("")(_ + " " + _)
//// Logger.logss(parent.getLabel() + " ->" + childrenStr)
//// add("DominatingRule=" + parent.getLabel() + " ->" + childrenStr)
// add("DominatingParent=" + parent.getLabel() + " -> " + constituent)
// }
}
feats.toArray
}
}
@SerialVersionUID(1L)
class EDUSegmenterSemiMarkovComputer(val featurizer: EDUSegmenterSemiMarkovFeaturizer,
val wholeSpanLossScale: Double = 4.0) extends LikelihoodAndGradientComputer[DiscourseDepEx] with Serializable {
def getInitialWeights(initialWeightsScale: Double): Array[Double] = Array.tabulate(featurizer.featIdx.size)(i => 0.0)
def accumulateGradientAndComputeObjective(ex: DiscourseDepEx, weights: Array[Double], gradient: Array[Double]): Double = {
val (predSegs, predScore) = decode(ex, weights, 1.0);
// val recomputedPredScore = scoreParse(ex, weights, predParents, 1.0)
val goldSegs = ex.goldEduSpans
val goldScore = scoreSegmentation(ex, weights, goldSegs, 1.0)
// Logger.logss("Pred score: " + predScore + ", recomputed pred score: " + recomputedPredScore + ", gold score: " + goldScore)
for (sentIdx <- 0 until ex.conllDoc.numSents) {
for (startIdx <- 0 until ex.conllDoc.words(sentIdx).size) {
for (endIdx <- startIdx + 1 to ex.conllDoc.words(sentIdx).size) {
val seg = startIdx -> endIdx
val increment = (if (goldSegs(sentIdx).contains(seg)) 1 else 0) + (if (predSegs(sentIdx).contains(seg)) -1 else 0)
if (increment != 0) {
val feats = ex.cachedEduSemiMarkovFeatures(sentIdx)(startIdx)(endIdx)
for (feat <- feats) {
gradient(feat) += increment
}
}
}
}
}
predScore - goldScore
}
def computeObjective(ex: DiscourseDepEx, weights: Array[Double]): Double = accumulateGradientAndComputeObjective(ex, weights, Array.tabulate(weights.size)(i => 0.0))
def decode(ex: DiscourseDepEx, weights: Array[Double]): Array[Array[Boolean]] = {
EDUSegmenterSemiMarkov.convertSegsToBooleanArray(decode(ex, weights, 0)._1)
}
def decode(ex: DiscourseDepEx, weights: Array[Double], lossWeight: Double): (Array[Seq[(Int,Int)]], Double) = {
val feats = featurizer.extractFeaturesCached(ex, false)
var cumScore = 0.0
val allPreds = Array.tabulate(ex.conllDoc.numSents)(sentIdx => {
val result = decodeSentence(feats(sentIdx), ex.conllDoc.words(sentIdx).size, weights, lossWeight, Some(ex.goldEduSpans(sentIdx)))
cumScore += result._2
result._1
})
(allPreds, cumScore)
}
def decodeSentence(feats: Array[Array[Array[Int]]], sentLen: Int, weights: Array[Double], lossWeight: Double, goldSpans: Option[Seq[(Int,Int)]]): (Seq[(Int,Int)], Double) = {
val chart = Array.tabulate(sentLen + 1)(i => if (i == 0) 0.0 else Double.NegativeInfinity)
val backptrs = Array.tabulate(sentLen + 1)(i => -1)
for (endIdx <- 1 to sentLen) {
for (startIdx <- 0 until endIdx) {
val isGold = if (goldSpans.isDefined) goldSpans.get.contains(startIdx -> endIdx) else false
val lossScore = if (!isGold) {
if (startIdx == 0 && endIdx == sentLen) {
// lossWeight
lossWeight * wholeSpanLossScale
} else {
lossWeight
}
} else {
0.0
}
val score = ClassifyUtils.scoreIndexedFeats(feats(startIdx)(endIdx), weights) + lossScore
if (chart(startIdx) + score > chart(endIdx)) {
backptrs(endIdx) = startIdx
chart(endIdx) = chart(startIdx) + score
}
}
}
// Recover the gold derivation
val pairs = new ArrayBuffer[(Int,Int)]
var ptr = sentLen
while (ptr > 0) {
pairs.prepend(backptrs(ptr) -> ptr)
ptr = backptrs(ptr)
}
(pairs.toSeq, chart(sentLen))
}
private def scoreSegmentation(ex: DiscourseDepEx, weights: Array[Double], segmentation: Seq[Seq[(Int,Int)]], lossWeight: Double) = {
var score = 0.0
val feats = featurizer.extractFeaturesCached(ex, false)
for (sentIdx <- 0 until ex.conllDoc.numSents) {
for (segment <- segmentation(sentIdx)) {
val isGold = ex.goldEduSpans(sentIdx).contains(segment)
score += ClassifyUtils.scoreIndexedFeats(feats(sentIdx)(segment._1)(segment._2), weights) + (if (!isGold) lossWeight else 0.0)
}
}
score
}
}
@SerialVersionUID(1L)
class EDUSegmenterSemiMarkov(val computer: EDUSegmenterSemiMarkovComputer,
val weights: Array[Double]) extends EDUSegmenter {
def decode(ex: DiscourseDepEx) = computer.decode(ex, weights)
def decode(doc: ConllDoc) = {
val feats = computer.featurizer.extractFeatures(doc, false)
val result = Array.tabulate(feats.size)(i => {
computer.decodeSentence(feats(i), doc.words(i).size, weights, 0.0, None)._1
})
EDUSegmenterSemiMarkov.convertSegsToBooleanArray(result)
}
}
object EDUSegmenterSemiMarkov {
def convertSegsToBooleanArray(segments: Seq[Seq[(Int,Int)]]): Array[Array[Boolean]] = {
Array.tabulate(segments.size)(i => {
val seq = segments(i)
val starts = seq.map(_._1)
Array.tabulate(seq.last._2 - 1)(i => starts.contains(i+1))
})
}
}
| gregdurrett/berkeley-doc-summarizer | src/main/scala/edu/berkeley/nlp/summ/preprocess/EDUSegmenterSemiMarkov.scala | Scala | gpl-3.0 | 10,680 |
package spire.benchmark.jmh
import org.openjdk.jmh.annotations.{Scope, Setup, State}
import scala.util.Random._
@State(Scope.Thread)
class DoubleState extends StateSupport {
var values: Array[Double] = _
@Setup
def setup(): Unit = values = init(size)(nextDouble)
}
| kevinmeredith/spire | benchmark-jmh/src/main/scala/spire/benchmark/jmh/DoubleState.scala | Scala | mit | 274 |
package finloader
import finloader.entities.FileInfos
import org.joda.time.LocalDateTime
import org.specs2.mutable.Specification
import ITUtils.db
import scala.slick.driver.JdbcDriver.simple._
import scala.slick.lifted.TableQuery
/**
* Created by gefox on 26.12.14.
*/
class FileInfoServiceItSpec extends Specification {
sequential
"FileInfoService" should {
cleanFileInfo
"detect missing record" in {
fiService.needsUpdate(SAMPLE_FILE, SAMPLE_DATE1) must beTrue
}
"remember file date" in {
fiService.setUpdatedDateTime(SAMPLE_FILE, SAMPLE_DATE1)
fiService.needsUpdate(SAMPLE_FILE, SAMPLE_DATE1) must beFalse
}
"detect outdated record" in {
fiService.needsUpdate(SAMPLE_FILE, SAMPLE_DATE2) must beTrue
}
"overwrite file date" in {
fiService.setUpdatedDateTime(SAMPLE_FILE, SAMPLE_DATE2)
fiService.needsUpdate(SAMPLE_FILE, SAMPLE_DATE1) must beFalse
fiService.needsUpdate(SAMPLE_FILE, SAMPLE_DATE2) must beFalse
}
"avoid redundant records" in {
db.withSession { implicit session =>
fiQuery.list.size must beEqualTo(1)
}
}
}
private val SAMPLE_FILE = "sampleFile"
private val SAMPLE_DATE1 = LocalDateTime.parse("2014-12-24")
private val SAMPLE_DATE2 = LocalDateTime.parse("2014-12-25")
private val fiService = new FileInfoService(ITUtils.db)
private val fiQuery = TableQuery[FileInfos]
private def cleanFileInfo {
db.withSession { implicit session =>
fiQuery.delete
fiQuery.list.toSet must beEmpty
}
}
}
| paul-lysak/finloader | src/it/scala/finloader/FileInfoServiceItSpec.scala | Scala | apache-2.0 | 1,571 |
package org.shubinmountain.king
import scala.collection.mutable.{ Buffer, ListBuffer }
import akka.actor.ActorRef
/**
* Form: rectangle
* cx, cy - coordinate of lef-bottom corner
*/
class Button(val cx: Float, val cy: Float, val width: Float, val height: Float, action: Model => Unit)
extends Control {
val listeners: Buffer[ActorRef] = ListBuffer()
def checkAndAction(x: Float, y: Float): Boolean = {
val contains = (x >= cx && x <= cx + width && y >= cy && y <= cy + height)
if (contains) listeners.par.foreach { l => l ! ButtonClick(action) }
contains
}
}
// vim: set ts=4 sw=4 et:
| signal2564/king-of-nothing | common/src/main/scala/org/shubinmountain/king/Button.scala | Scala | gpl-2.0 | 635 |
object rec{
def main(args: Array[String]){
def factorial(num: Int): BigInt={
if(num<=1){
1
}
else{
num*factorial(num-1)
}
}
print("Factorial of 4 is: "+factorial(4))
}
}
| Jargon4072/DS-ALGO_implementations | scala/rec.scala | Scala | gpl-3.0 | 227 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation.extern
import leon.lang._
object ExpressionOrder {
case class Pixel(rgb: Int)
case class Matrix(data: Array[Int], w: Int, h: Int)
def void = ()
def fun = 0xffffff
def foo = 4
def bar(i: Int) = i * 2
def baz(i: Int, j: Int) = bar(i) - bar(j)
def syntaxCheck(i: Int) {
val p = Pixel(fun)
val m = Matrix(Array(0, 1, 2, 3), 2, 2)
val z = baz(foo, bar(foo))
val a = Array(0, 1, foo / 2, 3, bar(2), z / 1)
val t = (true, foo, bar(a(0)))
val a2 = Array.fill(4)(2)
val a3 = Array.fill(if (i <= 0) 1 else i)(bar(i))
val b = Array(1, 2, 0)
b(1) = if (bar(b(1)) % 2 == 0) 42 else 58
def f1 = {
require(a.length > 0 && b.length > 0)
(if (i < 0) a else b)(0)
}
def f2 = (if (i < 0) a else b).length
//def f3 = (if (i < 0) a else b)(0) = 0 // <- not supported
val c = (0, true, 2)
val d = (if (i > 0) i else -i, false, 0)
def f4 = (if (i < 0) d else c)._2 // expression result unused
}
def _main() = {
syntaxCheck(0)
printOnFailure(
bool2int(test0(false), 1) +
bool2int(test1(42), 2) +
bool2int(test2(58), 4) +
bool2int(test3(false), 8) +
bool2int(test4(false), 16) +
bool2int(test6, 32) +
bool2int(test7, 64) +
bool2int(test8, 128)+
bool2int(test9, 256)+
bool2int(test10, 512)+
bool2int(test11, 1024)
)
} ensuring { _ == 0 }
def test0(b: Boolean) = {
val f = b && !b // == false
var c = 0
val x = f && { c = 1; true }
c == 0
}.holds
def test1(i: Int) = {
require(i > 0)
val j = i / i * 3 // == 3
var c = 0
val x = { c = c + 3; j } + { c = c + 1; j } * { c = c * 2; j }
c == 8 && j == 3 && x == 12
}.holds
def test2(i: Int) = {
var c = 0;
val x = if (i < 0) { c = 1; -i } else { c = 2; i }
if (i < 0) c == 1
else c == 2
}.holds
def test3(b: Boolean) = {
val f = b && !b // == false
var c = 0
val x = f || { c = 1; true } || { c = 2; false }
c == 1
}.holds
def test4(b: Boolean) = {
var i = 10
var c = 0
val f = b && !b // == false
val t = b || !b // == true
// The following condition is executed 11 times,
// and only during the last execution is the last
// operand evaluated
while ({ c = c + 1; t } && i > 0 || { c = c * 2; f }) {
i = i - 1
}
i == 0 && c == 22
}.holds
def test5(b: Boolean) = {
val f = b && !b // == false
var c = if (f) 0 else -1
c = c + (if (f) 0 else 1)
c == 0
}.holds
def test6 = {
val a = Array(0, 1, 2, 3, 4)
def rec(b: Boolean, i: Int): Boolean = {
require(i >= 0 && i < 2147483647) // 2^31 - 1
if (i + 1 < a.length) rec(if (a(i) < a(i + 1)) b else false, i + 1)
else b
}
rec(true, 0)
}.holds
def test7 = {
var c = 1
val a = Array(0, 1, 2, 3, 4)
a(if(a(0) == 0) { c = c + 1; 0 } else { c = c + 2; 1 }) = { c = c * 2; -1 }
c == 4
}.holds
def test8 = {
var x = 0
def bar(y: Int) = {
def fun(z: Int) = 1 * x * (y + z)
fun(3)
}
bar(2) == 0
}.holds
def test9() = {
var c = 0
val r = { c = c + 3; c } + { c = c + 1; c } * { c = c * 2; c }
r == 35 && c == 8
}.holds
def test10() = {
var c = 0
def myfma(x: Int, y: Int, z: Int) = {
require(z == 8 && c == 8)
x + y * z
}
val r = myfma({ c = c + 3; c }, { c = c + 1; c }, { c = c * 2; c })
r == 35 && c == 8
}.holds
def test11() = {
val a = Array(0, 1, 2, 3)
val b = Array(9, 9, 9)
var c = 666
val i = 9
val x = (if (i != 9) b else { c = 0; a })(if (i == 9) { c = c + 1; 0 } else 1)
x == 0 && c == 1
}.holds
def bool2int(b: Boolean, f: Int) = if (b) 0 else f;
// Because on Unix, exit code should be in [0, 255], we print the exit code on failure
// and return 1. On success, we do nothing special.
def printOnFailure(exitCode: Int): Int = {
if (exitCode == 0) 0
else {
implicit val state = leon.io.newState
leon.io.StdOut.print("Error code: ")
leon.io.StdOut.print(exitCode)
leon.io.StdOut.println()
1
}
}
@extern
def main(args: Array[String]): Unit = _main()
}
| epfl-lara/leon | src/test/resources/regression/genc/valid/ExpressionOrder.scala | Scala | gpl-3.0 | 4,343 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import scala.collection.mutable.HashSet
import scala.concurrent.ExecutionContext
import scala.reflect.ClassTag
import scala.util.{Failure, Success}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rpc.{RpcEndpointRef, RpcAddress, RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.{Logging, SecurityManager, SparkConf}
import org.apache.spark.deploy.DeployMessages._
import org.apache.spark.deploy.master.{DriverState, Master}
import org.apache.spark.util.{ThreadUtils, SparkExitCode, Utils}
/**
* Proxy that relays messages to the driver.
*
* We currently don't support retry if submission fails. In HA mode, client will submit request to
* all masters and see which one could handle it.
*/
private class ClientEndpoint(
override val rpcEnv: RpcEnv,
driverArgs: ClientArguments,
masterEndpoints: Seq[RpcEndpointRef],
conf: SparkConf)
extends ThreadSafeRpcEndpoint with Logging {
// A scheduled executor used to send messages at the specified time.
private val forwardMessageThread =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("client-forward-message")
// Used to provide the implicit parameter of `Future` methods.
private val forwardMessageExecutionContext =
ExecutionContext.fromExecutor(forwardMessageThread,
t => t match {
case ie: InterruptedException => // Exit normally
case e: Throwable =>
logError(e.getMessage, e)
System.exit(SparkExitCode.UNCAUGHT_EXCEPTION)
})
private val lostMasters = new HashSet[RpcAddress]
private var activeMasterEndpoint: RpcEndpointRef = null
override def onStart(): Unit = {
driverArgs.cmd match {
case "launch" =>
// TODO: We could add an env variable here and intercept it in `sc.addJar` that would
// truncate filesystem paths similar to what YARN does. For now, we just require
// people call `addJar` assuming the jar is in the same directory.
val mainClass = "org.apache.spark.deploy.worker.DriverWrapper"
val classPathConf = "spark.driver.extraClassPath"
val classPathEntries = sys.props.get(classPathConf).toSeq.flatMap { cp =>
cp.split(java.io.File.pathSeparator)
}
val libraryPathConf = "spark.driver.extraLibraryPath"
val libraryPathEntries = sys.props.get(libraryPathConf).toSeq.flatMap { cp =>
cp.split(java.io.File.pathSeparator)
}
val extraJavaOptsConf = "spark.driver.extraJavaOptions"
val extraJavaOpts = sys.props.get(extraJavaOptsConf)
.map(Utils.splitCommandString).getOrElse(Seq.empty)
val sparkJavaOpts = Utils.sparkJavaOpts(conf)
val javaOpts = sparkJavaOpts ++ extraJavaOpts
val command = new Command(mainClass,
Seq("{{WORKER_URL}}", "{{USER_JAR}}", driverArgs.mainClass) ++ driverArgs.driverOptions,
sys.env, classPathEntries, libraryPathEntries, javaOpts)
val driverDescription = new DriverDescription(
driverArgs.jarUrl,
driverArgs.memory,
driverArgs.cores,
driverArgs.supervise,
command)
ayncSendToMasterAndForwardReply[SubmitDriverResponse](
RequestSubmitDriver(driverDescription))
case "kill" =>
val driverId = driverArgs.driverId
ayncSendToMasterAndForwardReply[KillDriverResponse](RequestKillDriver(driverId))
}
}
/**
* Send the message to master and forward the reply to self asynchronously.
*/
private def ayncSendToMasterAndForwardReply[T: ClassTag](message: Any): Unit = {
for (masterEndpoint <- masterEndpoints) {
masterEndpoint.ask[T](message).onComplete {
case Success(v) => self.send(v)
case Failure(e) =>
logWarning(s"Error sending messages to master $masterEndpoint", e)
}(forwardMessageExecutionContext)
}
}
/* Find out driver status then exit the JVM */
def pollAndReportStatus(driverId: String) {
// Since ClientEndpoint is the only RpcEndpoint in the process, blocking the event loop thread
// is fine.
logInfo("... waiting before polling master for driver state")
Thread.sleep(5000)
logInfo("... polling master for driver state")
val statusResponse =
activeMasterEndpoint.askWithRetry[DriverStatusResponse](RequestDriverStatus(driverId))
statusResponse.found match {
case false =>
logError(s"ERROR: Cluster master did not recognize $driverId")
System.exit(-1)
case true =>
logInfo(s"State of $driverId is ${statusResponse.state.get}")
// Worker node, if present
(statusResponse.workerId, statusResponse.workerHostPort, statusResponse.state) match {
case (Some(id), Some(hostPort), Some(DriverState.RUNNING)) =>
logInfo(s"Driver running on $hostPort ($id)")
case _ =>
}
// Exception, if present
statusResponse.exception.map { e =>
logError(s"Exception from cluster was: $e")
e.printStackTrace()
System.exit(-1)
}
System.exit(0)
}
}
override def receive: PartialFunction[Any, Unit] = {
case SubmitDriverResponse(master, success, driverId, message) =>
logInfo(message)
if (success) {
activeMasterEndpoint = master
pollAndReportStatus(driverId.get)
} else if (!Utils.responseFromBackup(message)) {
System.exit(-1)
}
case KillDriverResponse(master, driverId, success, message) =>
logInfo(message)
if (success) {
activeMasterEndpoint = master
pollAndReportStatus(driverId)
} else if (!Utils.responseFromBackup(message)) {
System.exit(-1)
}
}
override def onDisconnected(remoteAddress: RpcAddress): Unit = {
if (!lostMasters.contains(remoteAddress)) {
logError(s"Error connecting to master $remoteAddress.")
lostMasters += remoteAddress
// Note that this heuristic does not account for the fact that a Master can recover within
// the lifetime of this client. Thus, once a Master is lost it is lost to us forever. This
// is not currently a concern, however, because this client does not retry submissions.
if (lostMasters.size >= masterEndpoints.size) {
logError("No master is available, exiting.")
System.exit(-1)
}
}
}
override def onNetworkError(cause: Throwable, remoteAddress: RpcAddress): Unit = {
if (!lostMasters.contains(remoteAddress)) {
logError(s"Error connecting to master ($remoteAddress).")
logError(s"Cause was: $cause")
lostMasters += remoteAddress
if (lostMasters.size >= masterEndpoints.size) {
logError("No master is available, exiting.")
System.exit(-1)
}
}
}
override def onError(cause: Throwable): Unit = {
logError(s"Error processing messages, exiting.")
cause.printStackTrace()
System.exit(-1)
}
override def onStop(): Unit = {
forwardMessageThread.shutdownNow()
}
}
/**
* Executable utility for starting and terminating drivers inside of a standalone cluster.
*/
object Client {
def main(args: Array[String]) {
// scalastyle:off println
if (!sys.props.contains("SPARK_SUBMIT")) {
println("WARNING: This client is deprecated and will be removed in a future version of Spark")
println("Use ./bin/spark-submit with \\"--master spark://host:port\\"")
}
// scalastyle:on println
val conf = new SparkConf()
val driverArgs = new ClientArguments(args)
if (!driverArgs.logLevel.isGreaterOrEqual(Level.WARN)) {
conf.set("spark.akka.logLifecycleEvents", "true")
}
conf.set("spark.rpc.askTimeout", "10")
conf.set("akka.loglevel", driverArgs.logLevel.toString.replace("WARN", "WARNING"))
Logger.getRootLogger.setLevel(driverArgs.logLevel)
val rpcEnv =
RpcEnv.create("driverClient", Utils.localHostName(), 0, conf, new SecurityManager(conf))
val masterEndpoints = driverArgs.masters.map(RpcAddress.fromSparkURL).
map(rpcEnv.setupEndpointRef(Master.SYSTEM_NAME, _, Master.ENDPOINT_NAME))
rpcEnv.setupEndpoint("client", new ClientEndpoint(rpcEnv, driverArgs, masterEndpoints, conf))
rpcEnv.awaitTermination()
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/main/scala/org/apache/spark/deploy/Client.scala | Scala | apache-2.0 | 9,092 |
package org.sisioh.aws4s.sns.model
import com.amazonaws.services.sns.model.AddPermissionRequest
import org.sisioh.aws4s.PimpedType
import scala.collection.JavaConverters._
object AddPermissionRequestFactory {
def create(): AddPermissionRequest = new AddPermissionRequest()
def create(topicArn: String, label: String, awsAccountIds: Seq[String], actions: Seq[String]): AddPermissionRequest =
new AddPermissionRequest(topicArn, label, awsAccountIds.asJava, actions.asJava)
}
class RichAddPermissionRequest(val underlying: AddPermissionRequest)
extends AnyVal
with PimpedType[AddPermissionRequest] {
def topicArnOpt: Option[String] =
Option(underlying.getTopicArn)
def topicArnOpt_=(value: Option[String]): Unit =
underlying.setTopicArn(value.orNull)
def withTopicArnOpt(value: Option[String]): AddPermissionRequest =
underlying.withTopicArn(value.orNull)
def labelOpt: Option[String] =
Option(underlying.getLabel)
def labelOpt_=(value: Option[String]) =
underlying.setLabel(value.orNull)
def withLabelOpt(label: Option[String]): AddPermissionRequest =
underlying.withLabel(label.orNull)
def awsAccountIds: Seq[String] =
underlying.getAWSAccountIds.asScala.toVector
def awsAccountIds_=(value: Seq[String]): Unit =
underlying.setAWSAccountIds(value.asJava)
def withAWSAccountIds(value: Seq[String]): AddPermissionRequest =
underlying.withAWSAccountIds(value.asJava)
def actionNames: Seq[String] =
underlying.getActionNames.asScala.toVector
def actionNames_=(value: Seq[String]): Unit =
underlying.setActionNames(value.asJava)
def withActionNames(value: Seq[String]): AddPermissionRequest =
underlying.withActionNames(value.asJava)
}
| sisioh/aws4s | aws4s-sns/src/main/scala/org/sisioh/aws4s/sns/model/RichAddPermissionRequest.scala | Scala | mit | 1,736 |
/*
* Sentilab SARE: a Sentiment Analysis Research Environment
* Copyright (C) 2013 Sabanci University Sentilab
* http://sentilab.sabanciuniv.edu
*
* This file is part of SARE.
*
* SARE is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* SARE is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with SARE. If not, see <http://www.gnu.org/licenses/>.
*/
package edu.sabanciuniv.sentilab.utils.text.nlp.base
import java.util.Map
import edu.sabanciuniv.sentilab.core.controllers.ControllerLike
/**
* A class that implements this interface will be able to provide NLP capabilities.
* @author Mus'ab Husaini
*/
trait LinguisticProcessorLike
extends ControllerLike {
/**
* Gets the basic POS tags of this language.
* @return a {@link Map} where the keys are internal POS tag names and the values are their respective pluralized display names.
*/
def getBasicPosTags: Map[String, String]
/**
* Decomposes a given text using NLP to its sentences and tokens.
* @param text the text to decompose.
* @return a {@link LinguisticText} object containing the decomposed text.
*/
def decompose(text: String): LinguisticText
/**
* Tags a given text with POS tags.
* @param text the text to tag.
* @return a {@link LinguisticText} object containing the tagged text.
*/
def tag(text: String): LinguisticText
/**
* Parses a given text for linguistic dependencies.
* @param text the text to parse.
* @return a {@link LinguisticText} object containing the parsed text.
*/
def parse(text: String): LinguisticText
} | musabhusaini/sare | sare-lib/modules/utils/src/main/scala/edu/sabanciuniv/sentilab/utils/text/nlp/base/LinguisticProcessorLike.scala | Scala | gpl-3.0 | 2,005 |
package org.jetbrains.plugins.scala.debugger.friendlyCollections
import java.util
import com.intellij.debugger.engine.evaluation.{EvaluateException, EvaluationContextImpl}
import com.intellij.debugger.settings.NodeRendererSettings
import com.intellij.debugger.ui.impl.ThreadsDebuggerTree
import com.intellij.debugger.ui.impl.watch._
import com.intellij.debugger.ui.tree.render._
import com.intellij.debugger.ui.tree.{DebuggerTreeNode, NodeDescriptorFactory, NodeManager, ValueDescriptor}
import com.intellij.openapi.util.Disposer
import org.jetbrains.plugins.scala.debugger.ui.ScalaCollectionRenderer
import org.jetbrains.plugins.scala.debugger.{ScalaDebuggerTestCase, ScalaVersion_2_11}
/**
* User: Dmitry Naydanov
* Date: 9/5/12
*/
class ScalaCollectionRendererTest extends ScalaDebuggerTestCase with ScalaVersion_2_11 {
private val COMMON_FILE_NAME = "dummy.scala"
private val UNIQUE_ID = "uniqueID"
private def renderLabelAndChildren(variableName: String): (String, List[String]) = {
import scala.collection.JavaConversions._
val frameTree = new ThreadsDebuggerTree(getProject)
Disposer.register(getTestRootDisposable, frameTree)
var testVariableChildren: util.List[DebuggerTreeNode] = null
val testVariable = managed[LocalVariableDescriptorImpl] {
val context = evaluationContext()
val testVariable = localVar(frameTree, context, variableName)
val renderer = testVariable.getRenderer(getDebugProcess)
testVariable.setRenderer(renderer)
testVariable.updateRepresentation(context, DescriptorLabelListener.DUMMY_LISTENER)
val value = testVariable.calcValue(context)
renderer.buildChildren(value, new ChildrenBuilder {
def setChildren(children: util.List[DebuggerTreeNode]) {testVariableChildren = children}
def getDescriptorManager: NodeDescriptorFactory = frameTree.getNodeFactory
def getNodeManager: NodeManager = frameTree.getNodeFactory
def setRemaining(remaining: Int) {}
def initChildrenArrayRenderer(renderer: ArrayRenderer) {}
def getParentDescriptor: ValueDescriptor = testVariable
}, context)
testVariable
}
managed{testVariableChildren map (_.getDescriptor) foreach {
case impl: NodeDescriptorImpl =>
impl.updateRepresentation(evaluationContext(), DescriptorLabelListener.DUMMY_LISTENER)
case a => println(a)
}}
//<magic>
evalResult(variableName)
//</magic>
managed {
(testVariable.getLabel, (testVariableChildren map {_.getDescriptor.getLabel}).toList)
}
}
private def localVar(frameTree: DebuggerTree, evaluationContext: EvaluationContextImpl, name: String) = {
try {
val frameProxy = evaluationContext.getFrameProxy
val local = frameTree.getNodeFactory.getLocalVariableDescriptor(null, frameProxy visibleVariableByName name)
local setContext evaluationContext
local
} catch {
case e: EvaluateException => null
}
}
protected def testScalaCollectionRenderer(collectionName: String, collectionLength: Int, collectionClass: String) = {
import org.junit.Assert._
runDebugger() {
waitForBreakpoint()
val (label, children) = renderLabelAndChildren(collectionName)
val classRenderer: ClassRenderer = NodeRendererSettings.getInstance().getClassRenderer
val typeName = classRenderer.renderTypeName(collectionClass)
val expectedLabel = s"$collectionName = {$typeName@$UNIQUE_ID}${
ScalaCollectionRenderer.transformName(collectionClass)} size = $collectionLength"
assertEquals(expectedLabel, label)
val intType = classRenderer.renderTypeName("java.lang.Integer")
val intLabel = s"{$intType@$UNIQUE_ID}"
var testIndex = 0
children foreach { childLabel =>
val expectedChildLabel = s"$testIndex = $intLabel${testIndex + 1}"
assertEquals(childLabel, expectedChildLabel)
testIndex += 1
}
}
}
addFileWithBreakpoints("ShortList.scala",
s"""
|object ShortList {
| def main(args: Array[String]) {
| val lst = List(1, 2, 3, 4, 5, 6)
| val a = 1$bp
| }
|}
""".replace("\\r", "").stripMargin.trim
)
def testShortList() {
testScalaCollectionRenderer("lst", 6, "scala.collection.immutable.$colon$colon")
}
addFileWithBreakpoints("Stack.scala",
s"""
|object Stack {
| def main(args: Array[String]) {
| import scala.collection.mutable
| val stack = mutable.Stack(1,2,3,4,5,6,7,8)
| val b = 45$bp
| }
|}
""".stripMargin.replace("\\r","").trim
)
def testStack() {
testScalaCollectionRenderer("stack", 8, "scala.collection.mutable.Stack")
}
addFileWithBreakpoints("MutableList.scala",
s"""
|object MutableList {
| def main(args: Array[String]) {
| val mutableList = scala.collection.mutable.MutableList(1,2,3,4,5)
| val a = 1$bp
| }
|}
""".stripMargin.replace("\\r", "").trim
)
def testMutableList() {
testScalaCollectionRenderer("mutableList", 5, "scala.collection.mutable.MutableList")
}
addFileWithBreakpoints("Queue.scala",
s"""
|object Queue {
| def main(args: Array[String]) {
| val queue = scala.collection.immutable.Queue(1,2,3,4)
| val a = 1$bp
| }
|}
""".stripMargin.replace("\\r", "").trim
)
def testQueue() {
testScalaCollectionRenderer("queue", 4, "scala.collection.immutable.Queue")
}
addFileWithBreakpoints("LongList.scala",
s"""
|object LongList {
| def main(args: Array[String]) {
| val longList = (1 to 50).toList
| val a = 1$bp
| }
|}
""".stripMargin.replace("\\r", "").trim
)
def testLongList() {
testScalaCollectionRenderer("longList", 50, "scala.collection.immutable.$colon$colon")
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/debugger/friendlyCollections/ScalaCollectionRendererTest.scala | Scala | apache-2.0 | 5,934 |
package org.igye.jfxutils.properties
import javafx.collections.ListChangeListener
import javafx.collections.ListChangeListener.Change
object ListChgListener {
def apply[T](body: Change[_ <: T] => Unit): ListChangeListener[T] = {
new ListChangeListener[T] {
override def onChanged(c: Change[_ <: T]): Unit = {
body(c)
}
}
}
}
| Igorocky/jfxutils | src/main/scala/org/igye/jfxutils/properties/ListChgListener.scala | Scala | mit | 391 |
/*
* Copyright 2014 Twitter inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus
import com.twitter.util.Future
/** MapStore is a ReadableStore backed by a scala immutable Map.
*
* @author Oscar Boykin
* @author Sam Ritchie
*/
class MapStore[K, +V](val backingStore: Map[K, V] = Map[K, V]()) extends ReadableStore[K, V]
with IterableStore[K, V] {
override def get(k: K) = Future.value(backingStore.get(k))
override def getAll = IterableStore.iteratorToSpool(backingStore.iterator)
}
| joychugh/storehaus | storehaus-core/src/main/scala/com/twitter/storehaus/MapStore.scala | Scala | apache-2.0 | 1,073 |
package uk.gov.gds.ier.form
import org.joda.time.{LocalDate, YearMonth, Years}
import uk.gov.gds.ier.validation.{ErrorTransformForm, FormKeys, Key}
import uk.gov.gds.ier.model.{DateLeft, ApplicationType, LastRegisteredType}
import scala.util.Try
import uk.gov.gds.ier.validation.constants.DateOfBirthConstants
import uk.gov.gds.ier.transaction.overseas.InprogressOverseas
trait OverseasFormImplicits {
self: FormKeys =>
implicit class OverseasImprovedForm(form:ErrorTransformForm[InprogressOverseas]) {
def dateOfBirth = {
for(
day <- form(keys.dob.day).value;
month <- form(keys.dob.month).value;
year <- form(keys.dob.year).value
) yield {
new LocalDate()
.withYear(year.toInt)
.withMonthOfYear(month.toInt)
.withDayOfMonth(day.toInt)
}
}
def dateLeftSpecial = {
for (
month <- form(keys.dateLeftSpecial.month).value;
year <- form(keys.dateLeftSpecial.year).value
) yield {
new YearMonth().withYear(year.toInt).withMonthOfYear(month.toInt)
}
}
def dateLeftUk = {
for (
month <- form(keys.dateLeftUk.month).value;
year <- form(keys.dateLeftUk.year).value
) yield {
new YearMonth().withYear(year.toInt).withMonthOfYear(month.toInt)
}
}
def within15YearLimit = {
val fifteenYearsAgo = new YearMonth().minusYears(15)
dateLeftUk map { date =>
date isAfter fifteenYearsAgo
}
}
def dateBecameCitizen = for (
day <- form(keys.passport.citizenDetails.dateBecameCitizen.day).value;
month <- form(keys.passport.citizenDetails.dateBecameCitizen.month).value;
year <- form(keys.passport.citizenDetails.dateBecameCitizen.year).value
) yield {
new LocalDate()
.withYear(year.toInt)
.withMonthOfYear(month.toInt)
.withDayOfMonth(day.toInt)
}
def bornBefore1983 = {
dateOfBirth map { dob =>
dob isBefore DateOfBirthConstants.jan1st1983
}
}
def under18WhenLeft = {
for(dob <- dateOfBirth; whenLeft <- dateLeftUk) yield {
Years.yearsBetween(new YearMonth(dob), whenLeft).getYears() < 18
}
}
def lastRegisteredType = {
Try {
form(keys.lastRegisteredToVote.registeredType).value.map { regType =>
LastRegisteredType.parse(regType)
}
}.getOrElse(None)
}
def identifyApplication:ApplicationType = {
identifyOverseasApplication(
dateOfBirth map { dateTime => new YearMonth(dateTime) },
dateLeftUk,
lastRegisteredType
)
}
}
implicit class OverseasImprovedApplication(application: InprogressOverseas) {
def identifyApplication:ApplicationType = {
val dateOfBirth = application.dob.map { dob =>
new YearMonth().withYear(dob.year).withMonthOfYear(dob.month)
}
val whenLeft = application.dateLeftUk.map { dateLeft =>
new YearMonth().withYear(dateLeft.year).withMonthOfYear(dateLeft.month)
}
val lastRegistered = application.lastRegisteredToVote.map(_.lastRegisteredType)
identifyOverseasApplication(dateOfBirth, whenLeft, lastRegistered)
}
}
private def identifyOverseasApplication(
dob:Option[YearMonth],
dateLeft:Option[YearMonth],
lastRegistered: Option[LastRegisteredType]):ApplicationType = {
val under18WhenLeft = for(dateOfBirth <- dob; whenLeft <- dateLeft) yield {
Years.yearsBetween(dateOfBirth, whenLeft).getYears() < 18
}
if (lastRegistered.exists(_ == LastRegisteredType.Overseas)) {
ApplicationType.RenewerVoter
} else if (lastRegistered.exists(_ == LastRegisteredType.Ordinary)) {
ApplicationType.NewVoter
} else if (lastRegistered.exists(_ == LastRegisteredType.Forces)
|| lastRegistered.exists(_ == LastRegisteredType.Crown)
|| lastRegistered.exists(_ == LastRegisteredType.Council)){
ApplicationType.SpecialVoter
} else if (under18WhenLeft.exists(_ == true)) {
ApplicationType.YoungVoter
} else {
ApplicationType.DontKnow
}
}
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/form/OverseasFormImplicits.scala | Scala | mit | 4,139 |
package sorting
import org.salgo.sorting.{CycleSort, GeneralSortingAlgorithm}
class CycleSortSpec extends GenericSortSpec {
override def getSortingAlgorithm: GeneralSortingAlgorithm = CycleSort
}
| ascensio/salgo | tests/sorting/CycleSortSpec.scala | Scala | apache-2.0 | 200 |
package scutil.collection
import minitest._
import scutil.lang._
import scutil.collection.implicits._
object SeqImplicitsTest extends SimpleTestSuite {
test("equivalentSpans should be empty for empty input") {
//(Seq.empty[String] equivalentSpans equivalentSpansCriterium) must haveTheSameElementsAs(Seq.empty)
assertEquals(
(Seq.empty[String] equivalentSpans equivalentSpansCriterium),
Seq.empty
)
}
test("equivalentSpans should be simple for a 1-element input") {
assertEquals(
(Seq("hallo") equivalentSpans equivalentSpansCriterium),
Seq(Seq("hallo"))
)
}
test("equivalentSpans should group together 2 equivalent elements") {
assertEquals(
(Seq("hallo", "hello") equivalentSpans equivalentSpansCriterium),
Seq(Seq("hallo", "hello"))
)
}
test("equivalentSpans should group separate 2 non-equivalent elements") {
assertEquals(
(Seq("hallo", "ballo") equivalentSpans equivalentSpansCriterium),
Seq(Seq("hallo"), Seq("ballo"))
)
}
test("equivalentSpans should leave 1 non-equivalent element at the end") {
assertEquals(
(Seq("hallo", "hello", "ballo") equivalentSpans equivalentSpansCriterium),
Seq(Seq("hallo", "hello"), Seq("ballo"))
)
}
private def equivalentSpansCriterium(a:String, b:String):Boolean =
(a charAt 0) == (b charAt 0)
//------------------------------------------------------------------------------
/*
test("splitAround should be empty for empty input") {
(Seq.empty[Int] splitAround 1) must haveTheSameElementsAs(Seq.empty)
}
test("splitAround should be simple for a 1-element input") {
(Seq(0) splitAround 1) must haveTheSameElementsAs(Seq(Seq(0)))
}
test("splitAround should split into two for a single separator") {
(Seq(1) splitAround 1) must haveTheSameElementsAs(Seq(Seq(),Seq()))
}
test("splitAround should split an empty Seq before a leading separator") {
(Seq(1,2) splitAround 1) must haveTheSameElementsAs(Seq(Seq(),Seq(2)))
}
test("splitAround should split an empty Seq after a trailing separator") {
(Seq(0,1) splitAround 1) must haveTheSameElementsAs(Seq(Seq(0),Seq()))
}
test("splitAround should split a simple Seq correctly") {
(Seq(0,1,2) splitAround 1) must haveTheSameElementsAs(Seq(Seq(0),Seq(2)))
}
test("splitAround should create an empty Seq between two adjacent separators") {
(Seq(0,1,1,2) splitAround 1) must haveTheSameElementsAs(Seq(Seq(0),Seq(),Seq(2)))
}
*/
//------------------------------------------------------------------------------
test("adjacents should work with 0 elements") {
assertEquals(
Seq.empty[Int].adjacents,
Seq.empty
)
}
test("adjacents should work with 1 element") {
assertEquals(
Seq(1).adjacents,
Seq((None,1,None))
)
}
test("adjacents should work with 2 elements") {
assertEquals(
Seq(1,2).adjacents,
Seq((None,1,Some(2)), (Some(1),2,None))
)
}
test("adjacents should work with 3 elements") {
assertEquals(
Seq(1,2,3).adjacents,
Seq((None,1,Some(2)), (Some(1),2,Some(3)), (Some(2),3,None))
)
}
test("adjacents should return the right type") {
val a = Vector(1,2,3).adjacents
typed[ Vector[(Option[Int],Int,Option[Int])] ](a)
assertEquals(
a,
Seq((None,1,Some(2)), (Some(1),2,Some(3)), (Some(2),3,None))
)
}
//------------------------------------------------------------------------------
private val splitWherePredicate:Int=>Boolean = _ == 1
test("splitWhere should be empty for empty input") {
assertEquals(
Seq.empty[Int] splitWhere splitWherePredicate,
Seq.empty
)
}
test("splitWhere should be simple for a 1-element input") {
assertEquals(
Seq(0) splitWhere splitWherePredicate,
Seq(Right(Seq(0)))
)
}
test("splitWhere should split into two for a single separator") {
assertEquals(
Seq(1) splitWhere splitWherePredicate,
Seq(Right(Seq()),Left(1), Right(Seq()))
)
}
test("splitWhere should split an empty Seq before a leading separator") {
assertEquals(
Seq(1,2) splitWhere splitWherePredicate,
Seq(Right(Seq()),Left(1),Right(Seq(2)))
)
}
test("splitWhere should split an empty Seq after a trailing separator") {
assertEquals(
Seq(0,1) splitWhere splitWherePredicate,
Seq(Right(Seq(0)),Left(1),Right(Seq()))
)
}
test("splitWhere should split simple Seq correctly") {
assertEquals(
Seq(0,1,2) splitWhere splitWherePredicate,
Seq(Right(Seq(0)),Left(1),Right(Seq(2)))
)
}
test("splitWhere should create an empty Seq between two adjacent separators") {
assertEquals(
Seq(0,1,1,2) splitWhere splitWherePredicate,
Seq(Right(Seq(0)),Left(1),Right(Seq()),Left(1),Right(Seq(2)))
)
}
//------------------------------------------------------------------------------
test("moveAt should fail without enough elements") {
assertEquals(
Seq().moveAt(0,0),
None
)
}
test("moveAt should move from start to end") {
assertEquals(
Seq(1,2,3).moveAt(0,3),
Some(Seq(2,3,1))
)
}
test("moveAt should move from end to start") {
assertEquals(
Seq(1,2,3).moveAt(2,0),
Some(Seq(3,1,2))
)
}
test("moveAt should not move to gap left") {
assertEquals(
Seq(1,2,3,4).moveAt(1,1),
None
)
}
test("moveAt should not move to gap right") {
assertEquals(
Seq(1,2,3,4).moveAt(1,2),
None
)
}
test("moveAt should move to gap further left") {
assertEquals(
Seq(1,2,3,4).moveAt(1,0),
Some(Seq(2,1,3,4))
)
}
test("moveAt should move to gap further right") {
assertEquals(
Seq(1,2,3,4).moveAt(1,3),
Some(Seq(1,3,2,4))
)
}
//------------------------------------------------------------------------------
test("zipTail should just work") {
assertEquals(
Vector(1,2,3).zipTail,
Vector((1,2),(2,3))
)
}
}
| ritschwumm/scutil | modules/core/src/test/scala/scutil/collection/SeqImplicitsTest.scala | Scala | bsd-2-clause | 5,716 |
package authes
import models.Account
import play.api.mvc.{Request, Result}
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
trait Authenticator {
import controllers.Responses._
type User = Account
val SessionCookie = "PLAY_SESSION"
val TimeoutInSeconds = 7.days.toSeconds.toInt
val container = new SessionContainer
def findUser(token: String): Option[User] = {
for {
sessionId <- container.get(token)
account <- Account.findById(sessionId)
} yield account
}
def withAuth[A](role: Authority)(f: User => Result)(implicit req: Request[A]): Result = {
val res = for {
session <- req.session.get(SessionCookie).toRight(authorizationFailed)
user <- findUser(session).toRight(authorizationFailed)
res <- Either.cond(role.auth(user.role), f(user).withSession(SessionCookie -> session), authenticationFailed)
} yield res
res.merge
}
def withAuthAsync[A](role: Authority)(f: User => Future[Result])(implicit req: Request[A], ec: ExecutionContext): Future[Result] = {
val res = for {
session <- req.session.get(SessionCookie).toRight(suc(authorizationFailed))
user <- findUser(session).toRight(suc(authorizationFailed))
res <- Either.cond(role.auth(user.role), f(user).map(_.withSession(SessionCookie -> session)), suc(authenticationFailed))
} yield res
res.merge
}
def gotoLoginSucceeded(user: User) = {
val token = container.startNewSession(user.id, TimeoutInSeconds)
Success.withSession(SessionCookie -> token)
}
def gotoLogoutSucceeded[A]()(implicit req: Request[A]) = {
req.session.get(SessionCookie).fold(authorizationFailed) { session =>
container.remove(session)
Success
}
}
def authorizationFailed = Forbidden("Authorization failed")
def authenticationFailed = Forbidden("Authentication failed")
private def suc[A](x: A) = Future.successful(x)
}
trait Authority {
def auth(userAuthority: Authority): Boolean
}
| ponkotuy/train-stamp-rally | app/authes/Authenticator.scala | Scala | apache-2.0 | 2,002 |
package chapter28
/**
* 28.7 ์ ์ฅํ๊ธฐ์ ๋ถ๋ฌ์ค๊ธฐ
*
* XML ๋ฐ์ดํฐ๋ฅผ ๋ฐ์ดํธ ์คํธ๋ฆผ์ผ๋ก ๋ณํํ๊ณ ์ญ์ผ๋ก ๊ฐ์ ธ์ค๊ธฐ.
* ์ด๋ฅผ ์ฒ๋ฆฌํด์ฃผ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๊ฐ ์๊ธฐ์ ๊ฐ์ฅ ์ฌ์ด ๋ถ๋ถ์ด๋ค.
* ๋ง์ฝ ์ง์ ๋ฌธ์์ด์ ๋ฐ์ดํธ๋ก ๋ณ๊ฒฝํ๋ค๋ฉด ๋ฌธ์ ์ธ์ฝ๋ฉ์ ์ถ์ ํ๋ ์ง์ ์ง์ ์ง์ด์ ธ์ผ๋ง ํ๋ค..
*
* XML์ ๋ฐ์ดํธ๊ฐ ๋ด๊ธด ํ์ผ๋ก ๋ณ๊ฒฝํ๋ ค๋ฉด XML.save ๋ช
๋ น์ ์ฌ์ฉํ ์ ์๋ค.
*/
object c28_i07 extends App {
val therm = new CCTherm {
val description: String = "hot dog #5"
val yearMade: Int = 1952
val dateObtained: String = "March 14, 2006"
val bookPrice: Int = 2199
val purchasePrice: Int = 500
val condition: Int = 9
} //> therm : chapter28.CCTherm = hot dog #5
scala.xml.XML.save("src/main/java/chapter28/therm.xml", therm.toXML)
val loadnode = xml.XML.loadFile("src/main/java/chapter28/therm.xml")
println(loadnode.toString)
} | seraekim/srkim-lang-scala | src/main/java/chapter28/c28_i07.scala | Scala | bsd-3-clause | 1,005 |
package com.equalinformation.fpps.week4
/**
* Created by bpupadhyaya on 9/30/15.
* Problem: Provide an implementation of the abstract class Nat that represents
* non-negative integers.
*/
//Peano numbers
abstract class Nat {
def isZero: Boolean
def predecessor: Nat
def successor = new Succ(this)
def +(that: Nat): Nat
def -(that: Nat): Nat
}
object Zero extends Nat {
def isZero = true
def predecessor = throw new Error("0.predecessor")
def +(that: Nat) = that
def -(that: Nat) = if (that.isZero) this else throw new Error("negative number")
}
class Succ(n: Nat) extends Nat {
def isZero = false
def predecessor = n
def +(that: Nat) = new Succ(n + that)
def -(that: Nat) = if(that.isZero) this else n - that.predecessor
}
| bpupadhyaya/fpps | src/main/scala/com/equalinformation/fpps/week4/Nat.scala | Scala | mit | 757 |
/*******************************************************************************
* Copyright (c) 2012-2013
* - Bruno C.d.S. Oliveira ([email protected])
* - Tijs van der Storm ([email protected])
* - Alex Loh ([email protected])
* - William R. Cook ([email protected])
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
********************************************************************************/
package oalg.algebra.paper
import scala.collection.mutable.HashMap
import scala.reflect._
object Self {
import Exp.IEval
import Exp.IPrint
trait GExpAlg[In,Out] {
def Lit(x : Int) : Out
def Add(e1 : In, e2 : In) : Out
}
type ExpAlg[E] = GExpAlg[E,E]
type Open[S <: E, E] = (=> S) => E
type OExpAlg[S <: E, E] = GExpAlg[S, Open[S,E]]
trait ExpEval extends ExpAlg[IEval] {
def Lit(x : Int) : IEval = new IEval {
def eval() : Int = x
}
def Add(e1 : IEval, e2 : IEval) : IEval = new IEval {
def eval() : Int = e1.eval() + e2.eval()
}
}
object ExpEval extends ExpEval
// Use the core algebra types and combinators
import oalg.algebra.core.Algebras._
class LiftEP[S <: IEval with IPrint] extends Lifter[IEval,IPrint,S] {
def lift(x : IEval, y : IPrint) = self => new IEval with IPrint {
def print() = y.print()
def eval() = x.eval()
}
}
object TraceEval extends ExpAlg[IEval] {
def action(o : IEval) = new IEval() {
def eval() = {
println("Entering eval()!")
o.eval()
}
}
def Lit(x : Int) = throw new Exception()
def Add(e1 : IEval, e2 : IEval) = throw new Exception()
}
trait ExpPrint2[S <: IEval with IPrint] extends OExpAlg[S, IPrint] {
def Lit(x : Int) = self => new IPrint() {
def print() = x.toString()
}
def Add(e1 : S, e2 : S) = self => new IPrint() {
def print() = e1.print() + " + " + e2.print() + " = " + self.eval()
}
}
// Closing
trait CloseAlg[E] extends ExpAlg[E] {
val alg : OExpAlg[E,E]
def Lit(x : Int) : E = fix(alg.Lit(x))
def Add(e1 : E, e2 : E) : E = fix(alg.Add(e1,e2))
}
def closeAlg[E](a : OExpAlg[E,E]) : ExpAlg[E] = new CloseAlg[E] {
val alg = a
}
def fix[A](f : Open[A,A]) : A = {lazy val s : A = f(s); s}
// requires a closed (object algebra) component
def exp[A](f : ExpAlg[A]) = {
import f._
Add(Lit(13),Lit(5))
}
// FAMILY Self-references
// Algebras with a family self-reference
trait SelfAlg[Self <: Exp, Exp] {
val fself : ExpAlg[Self]
}
trait SelfExpAlg[Self <: Exp, Exp] extends GExpAlg[Self,Open[Self,Exp]] with SelfAlg[Self,Exp]
trait ExpPrint3[Self <: IEval with IPrint] extends SelfExpAlg[Self,IPrint]{
def Lit(x : Int) = self => new IPrint() {
def print() = x.toString()
}
def Add(e1 : Self, e2 : Self) = self => new IPrint() {
def print() = {
val plus54 = fself.Add(fself.Lit(5), fself.Lit(4)); // virtual constructors
e1.print() + " + " + e2.print() + " = " + self.eval() + " and " + "5 + 4 = " + plus54.eval(); // self-reference
}
}
}
def ExpPrint3[S <: IEval with IPrint] : OpenExpAlg[S,IPrint] = s => new ExpPrint3[S] {
lazy val fself = s
}
trait ExpEval2[Self <: IEval] extends SelfExpAlg[Self,IEval] {
def Lit(x : Int) = self => new IEval {
def eval() : Int = x
}
def Add(e1 : Self, e2 : Self) = self => new IEval {
def eval() : Int = e1.eval() + e2.eval()
}
}
def expEval[S <: IEval] : OpenExpAlg[S,IEval] = s => new ExpEval2[S] {
lazy val fself = s
}
type OpenExpAlg[S <: A, A] = (=> ExpAlg[S]) => GExpAlg[S, Open[S,A]]
def compose[A,B, C](f : B => C, g : A => B) : A => C = x => f(g(x))
def close[S](f : OpenExpAlg[S,S]) : ExpAlg[S] =
fix[ExpAlg[S]](compose(closeAlg,f))
// GENERIC
type OFAlg[F[_,_],S <: E, E] = F[S, Open[S,E]]
type FAlg[F[_,_],E] = F[E,E]
trait SelfFAlg[F[_,_],Self <: Exp, Exp] {
val fself : F[Self, Self]
}
class LiftDecorate[S <: A, A](action : A => A) extends Lifter[A,Any,S] {
def lift(x : A, y : Any) = self => action(x)
}
class MkLifter[A,B, S <: A with B](f : (A,B) => A with B) extends Lifter[A,B,S] {
def lift(x : A, y : B) : Open[S,A with B] = self => f(x,y)
}
object ExpComb extends AlgebraDefault[GExpAlg]
def test3() = {
import ExpComb._
val o = exp(fclose(merge[IEval,IPrint,IEval with IPrint](new LiftEP,
decorate(expEval,TraceEval.action),ExpPrint3)))
println("Eval: " + o.eval() + "\\nPrint: " + o.print())
}
type ExpAlgOpen[S <: T, T] = (=> GExpAlg[S,S]) => GExpAlg[S,Open[S,T]]
} | tvdstorm/oalgcomp | src/oalg/algebra/paper/Self.scala | Scala | epl-1.0 | 4,880 |
import leon.instrumentation._
import leon.invariant._
object AmortizedQueue {
sealed abstract class List
case class Cons(head : BigInt, tail : List) extends List
case class Nil() extends List
case class Queue(front : List, rear : List)
def size(list : List) : BigInt = (list match {
case Nil() => 0
case Cons(_, xs) => 1 + size(xs)
})
def sizeList(list : List) : BigInt = (list match {
case Nil() => 0
case Cons(_, xs) => 1 + sizeList(xs)
}) ensuring(res => res >= 0 && tmpl((a,b) => depth <= a*size(list) + b))
def qsize(q : Queue) : BigInt = size(q.front) + size(q.rear)
def asList(q : Queue) : List = concat(q.front, reverse(q.rear))
def concat(l1 : List, l2 : List) : List = (l1 match {
case Nil() => l2
case Cons(x,xs) => Cons(x, concat(xs, l2))
}) ensuring (res => size(res) == size(l1) + size(l2) && tmpl((a,b,c) => depth <= a*size(l1) + b))
def isAmortized(q : Queue) : Boolean = sizeList(q.front) >= sizeList(q.rear)
def isEmpty(queue : Queue) : Boolean = queue match {
case Queue(Nil(), Nil()) => true
case _ => false
}
def reverseRec(l1: List, l2: List): List = (l1 match {
case Nil() => l2
case Cons(x, xs) => reverseRec(xs, Cons(x, l2))
}) ensuring (res => size(l1) + size(l2) == size(res) && tmpl((a,b) => depth <= a*size(l1) + b))
def reverse(l: List): List = {
reverseRec(l, Nil())
} ensuring (res => size(l) == size(res) && tmpl((a,b) => depth <= a*size(l) + b))
def amortizedQueue(front : List, rear : List) : Queue = {
if (sizeList(rear) <= sizeList(front))
Queue(front, rear)
else
Queue(concat(front, reverse(rear)), Nil())
}
def enqueue(q : Queue, elem : BigInt) : Queue = ({
amortizedQueue(q.front, Cons(elem, q.rear))
}) ensuring(res => true && tmpl((a,b) => depth <= a*qsize(q) + b))
def dequeue(q : Queue) : Queue = {
require(isAmortized(q) && !isEmpty(q))
q match {
case Queue(Cons(f, fs), rear) => amortizedQueue(fs, rear)
case _ => Queue(Nil(),Nil())
}
} ensuring(res => true && tmpl((a,b) => depth <= a*qsize(q) + b))
def removeLast(l : List) : List = {
require(l != Nil())
l match {
case Cons(x,Nil()) => Nil()
case Cons(x,xs) => Cons(x, removeLast(xs))
case _ => Nil()
}
} ensuring(res => size(res) <= size(l) && tmpl((a,b) => depth <= a*size(l) + b))
def pop(q : Queue) : Queue = {
require(isAmortized(q) && !isEmpty(q))
q match {
case Queue(front, Cons(r,rs)) => Queue(front, rs)
case Queue(front, rear) => Queue(removeLast(front), rear)
case _ => Queue(Nil(),Nil())
}
} ensuring(res => true && tmpl((a,b) => depth <= a*size(q.front) + b))
}
| epfl-lara/leon | testcases/orb-testcases/depth/AmortizedQueue.scala | Scala | gpl-3.0 | 2,707 |
package ohnosequences.sequences
import sequences._, alphabets._
import ohnosequences.cosas.typeSets._
case object finiteSequences {
trait AnyFiniteSequence {
type Alphabet <: AnyFiniteAlphabet
}
trait FiniteSequenceOver[A <: AnyFiniteAlphabet] extends AnyFiniteSequence {
type Alphabet = A
}
def Empty[A0 <: AnyFiniteAlphabet]: EmptyFiniteSequence[A0] = new EmptyFiniteSequence[A0]
class EmptyFiniteSequence[Alph <: AnyFiniteAlphabet]
extends FiniteSequenceOver[Alph] {
def ::[H0](h: H0)(implicit
validSymbol: H0 โ Alph#Symbols
)
: ConsFiniteSequence[Alph,H0,EmptyFiniteSequence[Alph]] =
new ConsFiniteSequence(h,this)
}
case class ConsFiniteSequence[
Alph <: AnyFiniteAlphabet,
H, T <: FiniteSequenceOver[Alph]
](val head: H, val tail: T)(implicit val validSymbol: H โ Alph#Symbols)
extends FiniteSequenceOver[Alph] {
def ::[H0](h: H0)(implicit
validSymbol: H0 โ Alph#Symbols
)
: ConsFiniteSequence[Alph,H0,ConsFiniteSequence[Alph,H,T]] =
new ConsFiniteSequence(h,this)
}
trait AnyFiniteSequenceType extends AnySequenceType {
type Alphabet <: AnyFiniteAlphabet
}
abstract class FiniteSequenceType[A <: AnyFiniteAlphabet](val alphabet: A) extends AnySequenceType {
type Alphabet = A
}
// TODO build sequences from an HList of symbols, all coming from the FiniteAlphabet (check predicate etc).
// TODO this should be in ops for any sequence with finite alphabet
/*
Given a rep `A :: T :: C :: G :: G :: T` we can check statically whether that thing is from that alphabet. Then as we can create values of seqs from chars, we can use static mappings between alphabets to implement generic translations.
Of course we need to be able to represent functions on alphabets. We can define `poly`s that
1. have as input **symbols** from the domain alphabet
2. and as output static sequences from the codomain alphabet
Then given easy generic ops for building raw stuff from static sequences, we can implement a totally safe translation thing. We can also allow (statically defined) functions which would be defined on static *sequences* as input, returning something over a different alphabet. For example
a :: t :: c -> atc
Like in the canonical DNA -> aminoacid thing. Do we need coproducts here? if we would have them, we could have random access with a safe return type: `A or T or C or D` (over the typeset of symbols)
*/
}
| ohnosequences/sequences | src/main/scala/finiteSequences.scala | Scala | agpl-3.0 | 2,487 |
/* Copyright โ 2012 Michael Ekstrand
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject
* to the following conditions:
*
* - The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package net.elehack.argparse4s
import net.sourceforge.argparse4j.inf.Namespace
/**
* The execution context for a command execution. This is used to
* allow options to resolve their values.
*/
class ExecutionContext(val namespace: Namespace)
object ExecutionContext {
def apply(ns: Namespace) = new ExecutionContext(ns)
}
| mdekstrand/argparse4s | src/main/scala/ExecutionContext.scala | Scala | mit | 1,456 |
package org.genericConfig.admin.shared.step
import play.api.libs.functional.syntax.unlift
import play.api.libs.json.{Format, JsPath}
import play.api.libs.functional.syntax._
/**
* Copyright (C) 2016 Gennadi Heimann [email protected]
*
* Created by Gennadi Heimann 14.04.2020
*/
case class StepParamsDTO(
stepId: Option[String] = None,
outId: Option[String] = None, //configId or componentId
kind: Option[String] = None,
properties : Option[StepPropertiesDTO] = None
)
object StepParamsDTO{
implicit val format: Format[StepParamsDTO] = (
(JsPath \\ "stepId").format(Format.optionWithNull[String]) and
(JsPath \\ "fromId").format(Format.optionWithNull[String]) and
(JsPath \\ "kind").format(Format.optionWithNull[String]) and
(JsPath \\ "properties").format(Format.optionWithNull[StepPropertiesDTO])
)(StepParamsDTO.apply, unlift(StepParamsDTO.unapply))
}
| gennadij/admin | shared/src/main/scala/org/genericConfig/admin/shared/step/StepParamsDTO.scala | Scala | apache-2.0 | 1,019 |
package com.twitter.finagle.stress
import java.io.PrintStream
import com.twitter.ostrich.stats.{Stats => OstrichStats}
import com.twitter.ostrich.stats.StatsProvider
object Stats {
private[this] def print(message: String) {
(new PrintStream(System.out, true, "UTF-8")).println(message)
}
def prettyPrintStats() {
prettyPrint(OstrichStats)
prettyPrintGauges()
}
def prettyPrint(stats: StatsProvider) {
stats.getCounters foreach { case (name, count) =>
print("# %-60s %d".format(name, count))
}
stats.getMetrics foreach { case (name, stat) =>
val statMap = stat.toMap
val keys = statMap.keys.toList.sorted
keys foreach { key =>
print("โ %-60s %s".format("%s/%s".format(name, key), statMap(key)))
}
}
}
def prettyPrintGauges() {
OstrichStats.getGauges foreach { case (k, v) =>
print("โ %-60s %s".format(k, v))
}
}
}
| enachb/finagle_2.9_durgh | finagle-stress/src/main/scala/com/twitter/finagle/stress/Stats.scala | Scala | apache-2.0 | 921 |
package jsm4s.ds
import scala.collection.Iterable
/**
* Created by olshanskiy on 7/13/17.
*/
trait IntentFactory {
val attributes: Int
def empty: FcaSet
def full: FcaSet
def values(x: Iterable[Int]): FcaSet
}
| DmitryOlshansky/jsm4s | src/main/scala/jsm4s/ds/IntentFactory.scala | Scala | gpl-2.0 | 226 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{ArrayData, MapData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* An abstract class for row used internally in Spark SQL, which only contains the columns as
* internal types.
*/
abstract class InternalRow extends SpecializedGetters with Serializable {
def numFields: Int
// This is only use for test and will throw a null pointer exception if the position is null.
def getString(ordinal: Int): String = getUTF8String(ordinal).toString
def setNullAt(i: Int): Unit
/**
* Updates the value at column `i`. Note that after updating, the given value will be kept in this
* row, and the caller side should guarantee that this value won't be changed afterwards.
*/
def update(i: Int, value: Any): Unit
// default implementation (slow)
def setBoolean(i: Int, value: Boolean): Unit = update(i, value)
def setByte(i: Int, value: Byte): Unit = update(i, value)
def setShort(i: Int, value: Short): Unit = update(i, value)
def setInt(i: Int, value: Int): Unit = update(i, value)
def setLong(i: Int, value: Long): Unit = update(i, value)
def setFloat(i: Int, value: Float): Unit = update(i, value)
def setDouble(i: Int, value: Double): Unit = update(i, value)
/**
* Update the decimal column at `i`.
*
* Note: In order to support update decimal with precision > 18 in UnsafeRow,
* CAN NOT call setNullAt() for decimal column on UnsafeRow, call setDecimal(i, null, precision).
*/
def setDecimal(i: Int, value: Decimal, precision: Int) { update(i, value) }
/**
* Make a copy of the current [[InternalRow]] object.
*/
def copy(): InternalRow
/** Returns true if there are any NULL values in this row. */
def anyNull: Boolean = {
val len = numFields
var i = 0
while (i < len) {
if (isNullAt(i)) { return true }
i += 1
}
false
}
/* ---------------------- utility methods for Scala ---------------------- */
/**
* Return a Scala Seq representing the row. Elements are placed in the same order in the Seq.
*/
def toSeq(fieldTypes: Seq[DataType]): Seq[Any] = {
val len = numFields
assert(len == fieldTypes.length)
val values = new Array[Any](len)
var i = 0
while (i < len) {
values(i) = get(i, fieldTypes(i))
i += 1
}
values
}
def toSeq(schema: StructType): Seq[Any] = toSeq(schema.map(_.dataType))
}
object InternalRow {
/**
* This method can be used to construct a [[InternalRow]] with the given values.
*/
def apply(values: Any*): InternalRow = new GenericInternalRow(values.toArray)
/**
* This method can be used to construct a [[InternalRow]] from a [[Seq]] of values.
*/
def fromSeq(values: Seq[Any]): InternalRow = new GenericInternalRow(values.toArray)
/** Returns an empty [[InternalRow]]. */
val empty = apply()
/**
* Copies the given value if it's string/struct/array/map type.
*/
def copyValue(value: Any): Any = value match {
case v: UTF8String => v.copy()
case v: InternalRow => v.copy()
case v: ArrayData => v.copy()
case v: MapData => v.copy()
case _ => value
}
/**
* Returns an accessor for an `InternalRow` with given data type. The returned accessor
* actually takes a `SpecializedGetters` input because it can be generalized to other classes
* that implements `SpecializedGetters` (e.g., `ArrayData`) too.
*/
def getAccessor(dt: DataType, nullable: Boolean = true): (SpecializedGetters, Int) => Any = {
val getValueNullSafe: (SpecializedGetters, Int) => Any = dt match {
case BooleanType => (input, ordinal) => input.getBoolean(ordinal)
case ByteType => (input, ordinal) => input.getByte(ordinal)
case ShortType => (input, ordinal) => input.getShort(ordinal)
case IntegerType | DateType => (input, ordinal) => input.getInt(ordinal)
case LongType | TimestampType => (input, ordinal) => input.getLong(ordinal)
case FloatType => (input, ordinal) => input.getFloat(ordinal)
case DoubleType => (input, ordinal) => input.getDouble(ordinal)
case StringType => (input, ordinal) => input.getUTF8String(ordinal)
case BinaryType => (input, ordinal) => input.getBinary(ordinal)
case CalendarIntervalType => (input, ordinal) => input.getInterval(ordinal)
case t: DecimalType => (input, ordinal) => input.getDecimal(ordinal, t.precision, t.scale)
case t: StructType => (input, ordinal) => input.getStruct(ordinal, t.size)
case _: ArrayType => (input, ordinal) => input.getArray(ordinal)
case _: MapType => (input, ordinal) => input.getMap(ordinal)
case u: UserDefinedType[_] => getAccessor(u.sqlType, nullable)
case _ => (input, ordinal) => input.get(ordinal, dt)
}
if (nullable) {
(getter, index) => {
if (getter.isNullAt(index)) {
null
} else {
getValueNullSafe(getter, index)
}
}
} else {
getValueNullSafe
}
}
/**
* Returns a writer for an `InternalRow` with given data type.
*/
def getWriter(ordinal: Int, dt: DataType): (InternalRow, Any) => Unit = dt match {
case BooleanType => (input, v) => input.setBoolean(ordinal, v.asInstanceOf[Boolean])
case ByteType => (input, v) => input.setByte(ordinal, v.asInstanceOf[Byte])
case ShortType => (input, v) => input.setShort(ordinal, v.asInstanceOf[Short])
case IntegerType | DateType => (input, v) => input.setInt(ordinal, v.asInstanceOf[Int])
case LongType | TimestampType => (input, v) => input.setLong(ordinal, v.asInstanceOf[Long])
case FloatType => (input, v) => input.setFloat(ordinal, v.asInstanceOf[Float])
case DoubleType => (input, v) => input.setDouble(ordinal, v.asInstanceOf[Double])
case DecimalType.Fixed(precision, _) =>
(input, v) => input.setDecimal(ordinal, v.asInstanceOf[Decimal], precision)
case udt: UserDefinedType[_] => getWriter(ordinal, udt.sqlType)
case NullType => (input, _) => input.setNullAt(ordinal)
case StringType => (input, v) => input.update(ordinal, v.asInstanceOf[UTF8String].copy())
case _: StructType => (input, v) => input.update(ordinal, v.asInstanceOf[InternalRow].copy())
case _: ArrayType => (input, v) => input.update(ordinal, v.asInstanceOf[ArrayData].copy())
case _: MapType => (input, v) => input.update(ordinal, v.asInstanceOf[MapData].copy())
case _ => (input, v) => input.update(ordinal, v)
}
}
| aosagie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/InternalRow.scala | Scala | apache-2.0 | 7,372 |
import clean.lib.Ds
import ml.classifiers.MLP
import traits.Arg
import util.Datasets
object Main extends Arg with RHeatMap {
lazy val (unlabeledStyleFore, unlabeledStyleBack) = "mark=text, mark options={solid, scale=1, ultra thick}" -> "gray, mark=*, mark options={solid, scale=2, ultra thick}"
lazy val (labeledStyleFore, labeledStyleBack) = "mark=text, mark options={solid, scale=1.1, ultra thick}" -> "white, mark=*, mark options={solid, scale=2.3, ultra thick}"
lazy val formats = Seq(
("color=teal, text mark=\\\\bf{a}, " + labeledStyleFore, labeledStyleBack),
("color=blue, text mark=\\\\bf{b}, " + labeledStyleFore, labeledStyleBack),
("color=yellow, text mark=\\\\bf{?}, " + unlabeledStyleFore, unlabeledStyleBack),
("color=red, text mark=\\\\bf{c}, " + labeledStyleFore, labeledStyleBack)
)
run()
def run(): Unit = {
val (patts, testSet) = Ds("fig.gif", readOnly = true).patterns -> Ds("fig2.gif", readOnly = true).patterns
if (patts.head.nclasses != testSet.head.nclasses) sys.error("Training and testing sets have different number of classes.")
val unlabeledClass = patts.groupBy(_.label).maxBy(_._2.size)._1
val (labeled, unlabeled) = patts.partition(_.label != unlabeledClass)
val pattsIg = Datasets.reweighted(unlabeled.map(_.relabeled(0)) ++ labeled.map(_.relabeled(1)))
val first = (d: Array[Double]) => d(0)
val max = (d: Array[Double]) => d.sorted.reverse(0)
val margin = (d: Array[Double]) => 1 - (d.sorted.reverse(0) - d.sorted.reverse(1))
val symbs = patts.groupBy(_.label).values.zip(formats).toList
Seq(
// ("certainty", labeled, max)
("ignorance", pattsIg, first, "(255,100,200)")
, ("decision-boundary", labeled, margin, "(100,200,255)")
// , ("knowledge-boundary", pattsIg, margin)
) foreach { case (name, set, f, colormap) => MLP().build(set).heatmap(name, testSet, f, symbs, colormap) }
}
}
| davips/knowledge-boundary | src/main/scala/Main.scala | Scala | gpl-3.0 | 1,930 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow
package enrich
package kinesis
package sources
// Java
import java.io.{FileInputStream,IOException}
import java.net.InetAddress
import java.nio.ByteBuffer
import java.util.{List,UUID}
// Amazon
import com.amazonaws.auth._
import com.amazonaws.AmazonClientException
import com.amazonaws.services.kinesis.AmazonKinesisClient
import com.amazonaws.services.kinesis.clientlibrary.interfaces._
import com.amazonaws.services.kinesis.clientlibrary.exceptions._
import com.amazonaws.services.kinesis.clientlibrary.lib.worker._
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason
import com.amazonaws.services.kinesis.metrics.impl.NullMetricsFactory
import com.amazonaws.services.kinesis.model.Record
// Scala
import scala.util.control.Breaks._
import scala.collection.JavaConversions._
// Thrift
import org.apache.thrift.TDeserializer
// Snowplow events and enrichment
import sinks._
import collectors.thrift.{
SnowplowRawEvent,
TrackerPayload => ThriftTrackerPayload,
PayloadProtocol,
PayloadFormat
}
// Logging
import org.slf4j.LoggerFactory
/**
* Source to read events from a Kinesis stream
*
* TODO: replace printlns with using Java logger
*/
class KinesisSource(config: KinesisEnrichConfig)
extends AbstractSource(config) {
private lazy val log = LoggerFactory.getLogger(getClass())
import log.{error, debug, info, trace}
/**
* Never-ending processing loop over source stream.
*/
def run {
val workerId = InetAddress.getLocalHost().getCanonicalHostName() +
":" + UUID.randomUUID()
info("Using workerId: " + workerId)
val kinesisClientLibConfiguration = new KinesisClientLibConfiguration(
config.appName,
config.rawInStream,
kinesisProvider,
workerId
).withInitialPositionInStream(
InitialPositionInStream.valueOf(config.initialPosition)
)
info(s"Running: ${config.appName}.")
info(s"Processing raw input stream: ${config.rawInStream}")
val rawEventProcessorFactory = new RawEventProcessorFactory(
config,
sink.get // TODO: yech
)
val worker = new Worker(
rawEventProcessorFactory,
kinesisClientLibConfiguration,
new NullMetricsFactory()
)
worker.run()
}
// Factory needed by the Amazon Kinesis Consumer library to
// create a processor.
class RawEventProcessorFactory(config: KinesisEnrichConfig, sink: ISink)
extends IRecordProcessorFactory {
@Override
def createProcessor: IRecordProcessor = {
return new RawEventProcessor(config, sink);
}
}
// Process events from a Kinesis stream.
class RawEventProcessor(config: KinesisEnrichConfig, sink: ISink)
extends IRecordProcessor {
private val thriftDeserializer = new TDeserializer()
private var kinesisShardId: String = _
private var nextCheckpointTimeInMillis: Long = _
// Backoff and retry settings.
private val BACKOFF_TIME_IN_MILLIS = 3000L
private val NUM_RETRIES = 10
private val CHECKPOINT_INTERVAL_MILLIS = 1000L
@Override
def initialize(shardId: String) = {
info("Initializing record processor for shard: " + shardId)
this.kinesisShardId = shardId
}
@Override
def processRecords(records: List[Record],
checkpointer: IRecordProcessorCheckpointer) = {
info(s"Processing ${records.size} records from $kinesisShardId")
processRecordsWithRetries(records)
if (System.currentTimeMillis() > nextCheckpointTimeInMillis) {
checkpoint(checkpointer)
nextCheckpointTimeInMillis =
System.currentTimeMillis + CHECKPOINT_INTERVAL_MILLIS
}
}
private def processRecordsWithRetries(records: List[Record]) = {
for (record <- records) {
try {
info(s"Sequence number: ${record.getSequenceNumber}")
info(s"Partition key: ${record.getPartitionKey}")
enrichEvent(record.getData.array)
} catch {
case t: Throwable =>
error(s"Caught throwable while processing record $record")
println(t)
}
}
}
@Override
def shutdown(checkpointer: IRecordProcessorCheckpointer,
reason: ShutdownReason) = {
info(s"Shutting down record processor for shard: $kinesisShardId")
if (reason == ShutdownReason.TERMINATE) {
checkpoint(checkpointer)
}
}
private def checkpoint(checkpointer: IRecordProcessorCheckpointer) = {
info(s"Checkpointing shard $kinesisShardId")
breakable {
for (i <- 0 to NUM_RETRIES-1) {
try {
checkpointer.checkpoint()
break
} catch {
case se: ShutdownException =>
error("Caught shutdown exception, skipping checkpoint.", se)
case e: ThrottlingException =>
if (i >= (NUM_RETRIES - 1)) {
error(s"Checkpoint failed after ${i+1} attempts.", e)
} else {
error(s"Transient issue when checkpointing - attempt ${i+1} of "
+ NUM_RETRIES, e)
}
case e: InvalidStateException =>
error("Cannot save checkpoint to the DynamoDB table used by " +
"the Amazon Kinesis Client Library.", e)
}
Thread.sleep(BACKOFF_TIME_IN_MILLIS)
}
}
}
}
}
| pkallos/snowplow | 3-enrich/scala-kinesis-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich.kinesis/sources/KinesisSource.scala | Scala | apache-2.0 | 6,101 |
package org.scalatra
import scala.xml.{Text, Node}
import org.apache.commons.io.IOUtils
import fileupload.FileUploadSupport
import scalate.ScalateSupport
class TemplateExample extends ScalatraServlet with UrlSupport /*with FileUploadSupport*/ with FlashMapSupport with ScalateSupport {
object Template {
def style() =
"""
pre { border: 1px solid black; padding: 10px; }
body { font-family: Helvetica, sans-serif; }
h1 { color: #8b2323 }
"""
def page(title:String, content:Seq[Node]) = {
<html>
<head>
<title>{ title }</title>
<style>{ Template.style }</style>
</head>
<body>
<h1>{ title }</h1>
{ content }
<hr/>
<a href={url("/date/2009/12/26")}>date example</a>
<a href={url("/form")}>form example</a>
<a href={url("/upload")}>upload</a>
<a href={url("/")}>hello world</a>
<a href={url("/flash-map/form")}>flash scope</a>
<a href={url("/login")}>login</a>
<a href={url("/logout")}>logout</a>
<a href={url("/filter-example")}>filter example</a>
<a href={url("/cookies-example")}>cookies example</a>
<a href={url("/chat")}>chat demo</a>
<a href={url("/atmo_chat.html")}>Atmosphere chat demo</a>
<a href={url("/chat_30.html")}>Servlet 3.0 async chat demo</a>
</body>
</html>
}
}
before() {
contentType = "text/html"
}
get("/date/:year/:month/:day") {
Template.page("Scalatra: Date Example",
<ul>
<li>Year: {params("year")}</li>
<li>Month: {params("month")}</li>
<li>Day: {params("day")}</li>
</ul>
<pre>Route: /date/:year/:month/:day</pre>
)
}
get("/form") {
Template.page("Scalatra: Form Post Example",
<form action={url("/post")} method='POST'>
Post something: <input name="submission" type='text'/>
<input type='submit'/>
</form>
<pre>Route: /form</pre>
)
}
post("/post") {
Template.page("Scalatra: Form Post Result",
<p>You posted: {params("submission")}</p>
<pre>Route: /post</pre>
)
}
get("/login") {
(session.get("first"), session.get("last")) match {
case (Some(first:String), Some(last:String)) =>
Template.page("Scalatra: Session Example",
<pre>You have logged in as: {first + "-" + last}</pre>
<pre>Route: /login</pre>
)
case x:AnyRef =>
Template.page("Scalatra: Session Example" + x.toString,
<form action={url("/login")} method='POST'>
First Name: <input name="first" type='text'/>
Last Name: <input name="last" type='text'/>
<input type='submit'/>
</form>
<pre>Route: /login</pre>
)
}
}
get("/echoclient") {
Template.page("Scalatra: Echo Server Client Example",
<pre>
<script type="text/javascript" src="/js/json.js" ></script>
<script type="text/javascript" src="/socket.io/socket.io.js"></script>
{"var socket = new io.Socket(null, { port: 8080, rememberTransport: false });"}
{"""socket.on("message", function(messageType, data) { console.log(data) });"""}
{"socket.connect();"}
{"""socket.send("hello");"""}
</pre>
)
}
get("/chat") {
layoutTemplate("chat.ssp")
}
post("/login") {
(params("first"), params("last")) match {
case (first:String, last:String) => {
session("first") = first
session("last") = last
Template.page("Scalatra: Session Example",
<pre>You have just logged in as: {first + " " + last}</pre>
<pre>Route: /login</pre>
)
}
}
}
get("/logout") {
session.invalidate
Template.page("Scalatra: Session Example",
<pre>You have logged out</pre>
<pre>Route: /logout</pre>
)
}
get("/") {
Template.page("Scalatra: Hello World",
<h2>Hello world!</h2>
<p>Referer: { (request referrer) map { Text(_) } getOrElse { <i>none</i> }}</p>
<pre>Route: /</pre>
)
}
get("/scalate") {
val content = "this is some fake content for the web page"
layoutTemplate("index.scaml", "content"-> content)
}
get("/upload") {
Template.page("Scalatra: Session Example",
<form method="post" enctype="multipart/form-data">
Upload a file. Its contents will be displayed in the browser.<br />
<label>File: <input type="file" name="file" /></label><br />
<input type="submit" />
</form>
)
}
/*
post("/upload") {
contentType = "text/plain"
fileParams.get("file") foreach { file => IOUtils.copy(file.getInputStream, response.getOutputStream) }
}
*/
get("/flash-map/form") {
Template.page("Scalatra: Flash Map Example",
<span>Supports the post-then-redirect pattern</span><br />
<form method="post">
<label>Message: <input type="text" name="message" /></label><br />
<input type="submit" />
</form>
)
}
post("/flash-map/form") {
flash("message") = params.getOrElse("message", "")
redirect("/flash-map/result")
}
get("/flash-map/result") {
Template.page("Scalatra: Flash Example",
<span>Message = {flash.getOrElse("message", "")}</span>
)
}
protected def contextPath = request.getContextPath
post("/echo") {
java.net.URLEncoder.encode(params("echo"), "UTF-8")
}
}
| kuochaoyi/scalatra | example/src/main/scala/org/scalatra/TemplateExample.scala | Scala | bsd-2-clause | 5,395 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.