code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.datasource.mongodb.client
import com.mongodb.casbah.MongoClient
import com.mongodb.{MongoCredential, ServerAddress}
import com.stratio.datasource.MongodbTestConstants
import com.stratio.datasource.mongodb.config.MongodbSSLOptions
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, FlatSpec, Matchers}
@RunWith(classOf[JUnitRunner])
class MongodbClientFactoryTest extends FlatSpec
with Matchers
with MongodbTestConstants
with BeforeAndAfter
with BeforeAndAfterAll {
type Client = MongoClient
val hostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
val hostPortCredentialsClient = MongodbClientFactory.getClient("127.0.0.1", 27017, "user", "database", "password").clientConnection
val fullClient = MongodbClientFactory.getClient(
List(new ServerAddress("127.0.0.1:27017")),
List(MongoCredential.createCredential("user","database","password".toCharArray)),
Some(MongodbSSLOptions(Some("/etc/ssl/mongodb.keystore"), Some("password"), "/etc/ssl/mongodb.keystore", Some("password"))),
Map(
"readPreference" -> "nearest",
"connectTimeout"-> "50000",
"socketTimeout"-> "50000",
"maxWaitTime"-> "50000",
"connectionsPerHost" -> "20",
"threadsAllowedToBlockForConnectionMultiplier" -> "5"
)
).clientConnection
val gracefully = true
val notGracefully = false
behavior of "MongodbClientFactory"
it should "Valid output type " + scalaBinaryVersion in {
hostClient shouldBe a [Client]
hostPortCredentialsClient shouldBe a [Client]
fullClient shouldBe a [Client]
MongodbClientFactory.closeAll(notGracefully)
}
it should "Valid clients size when getting the same client " in {
val sameHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (1)
val otherHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (2)
MongodbClientFactory.closeAll(notGracefully)
}
it should "Valid clients size when getting the same client and set free " in {
val sameHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (1)
MongodbClientFactory.setFreeConnectionByClient(sameHostClient)
val otherHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (1)
MongodbClientFactory.closeAll(notGracefully)
}
it should "Valid clients size when closing one client gracefully " in {
val sameHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (1)
MongodbClientFactory.closeByClient(sameHostClient)
MongodbClientFactory.getClientPoolSize should be (1)
MongodbClientFactory.closeAll(notGracefully)
}
it should "Valid clients size when closing one client not gracefully " in {
val sameHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (1)
MongodbClientFactory.closeByClient(sameHostClient, notGracefully)
MongodbClientFactory.getClientPoolSize should be (0)
MongodbClientFactory.closeAll(notGracefully)
}
it should "Valid clients size when closing all clients gracefully " in {
val sameHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
val otherHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
MongodbClientFactory.getClientPoolSize should be (2)
MongodbClientFactory.closeAll(gracefully, 1)
MongodbClientFactory.getClientPoolSize should be (2)
MongodbClientFactory.setFreeConnectionByClient(sameHostClient)
MongodbClientFactory.closeAll(gracefully, 1)
MongodbClientFactory.getClientPoolSize should be (1)
MongodbClientFactory.closeAll(notGracefully)
}
it should "Valid clients size when closing all clients not gracefully " in {
val sameHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
val otherHostClient = MongodbClientFactory.getClient("127.0.0.1").clientConnection
val gracefully = false
MongodbClientFactory.getClientPoolSize should be (2)
MongodbClientFactory.closeAll(notGracefully)
MongodbClientFactory.getClientPoolSize should be (0)
MongodbClientFactory.closeAll(notGracefully)
}
}
| Stratio/spark-mongodb | spark-mongodb/src/test/scala/com/stratio/datasource/mongodb/client/MongodbClientFactoryTest.scala | Scala | apache-2.0 | 5,202 |
package geotrellis.logic.applicative
import geotrellis._
import geotrellis.process._
/**
* This corresponds to Haskell's "apply" (<*>) on Functor.
*/
case class Apply[A, Z:Manifest](a:Op[A])(f:Op[A => Z])
extends Op2[A, A => Z, Z](a, f)((a, f) => Result(f(a)))
| Tjoene/thesis | Case_Programs/geotrellis-0.7.0/src/main/scala/geotrellis/logic/applicative/Apply.scala | Scala | gpl-2.0 | 265 |
package org.scalaide.core.internal.builder
import org.eclipse.core.resources.IFile
import org.eclipse.core.runtime.IProgressMonitor
import org.eclipse.core.runtime.SubMonitor
import sbt.inc.IncOptions
import sbt.inc.Analysis
import java.io.File
import org.eclipse.core.resources.IMarker
/**
* Abstraction which exposes sbt compiler to eclipse.
*/
trait EclipseBuildManager {
def build(addedOrUpdated: Set[IFile], removed: Set[IFile], monitor: SubMonitor): Unit
/** Has build errors? Only valid if the project has been built before. */
@volatile protected var hasInternalErrors: Boolean = false
/** <code>true</code> says that compiler requires a sources reload. */
def invalidateAfterLoad: Boolean
/** Can be used to clean an compiler's internal state. */
def clean(implicit monitor: IProgressMonitor): Unit
/** Says about a compilation result. */
def hasErrors: Boolean = hasInternalErrors
/** Says if underlying compiler is able to find out and add dependencies to build path. */
def canTrackDependencies: Boolean
/** Gives back the latest dependencies analysis done by underlying compiler. */
def latestAnalysis(incOptions: => IncOptions): Analysis
/**
* Finds build manager which built given file
* @return `Option[EclipseBuildManager]` when found or `None` otherwise
*/
def buildManagerOf(outputFile: File): Option[EclipseBuildManager]
/** Returns error markers on underlying resources. */
def buildErrors: Set[IMarker]
}
/** Keeps collected analysis persistently in store. This store is exposed outdoor. */
trait CachedAnalysisBuildManager extends EclipseBuildManager {
def analysisStore: IFile
}
| andrey-ilinykh/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/builder/EclipseBuildManager.scala | Scala | bsd-3-clause | 1,661 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.UnsupportedOperationChecker
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, ReturnAnswer}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.command.{DescribeTableCommand, ExecutedCommandExec, ShowTablesCommand}
import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReuseExchange}
import org.apache.spark.sql.types.{BinaryType, DateType, DecimalType, TimestampType, _}
import org.apache.spark.util.Utils
/**
* The primary workflow for executing relational queries using Spark. Designed to allow easy
* access to the intermediate phases of query execution for developers.
*
* While this is not a public class, we should avoid changing the function names for the sake of
* changing them, because a lot of developers use the feature for debugging.
*/
class QueryExecution(val sparkSession: SparkSession, val logical: LogicalPlan) {
// TODO: Move the planner an optimizer into here from SessionState.
protected def planner = sparkSession.sessionState.planner
def assertAnalyzed(): Unit = {
// Analyzer is invoked outside the try block to avoid calling it again from within the
// catch block below.
analyzed
try {
sparkSession.sessionState.analyzer.checkAnalysis(analyzed)
} catch {
case e: AnalysisException =>
val ae = new AnalysisException(e.message, e.line, e.startPosition, Option(analyzed))
ae.setStackTrace(e.getStackTrace)
throw ae
}
}
def assertSupported(): Unit = {
if (sparkSession.sessionState.conf.isUnsupportedOperationCheckEnabled) {
UnsupportedOperationChecker.checkForBatch(analyzed)
}
}
lazy val analyzed: LogicalPlan = {
SparkSession.setActiveSession(sparkSession)
sparkSession.sessionState.analyzer.execute(logical)
}
lazy val withCachedData: LogicalPlan = {
assertAnalyzed()
assertSupported()
sparkSession.sharedState.cacheManager.useCachedData(analyzed)
}
lazy val optimizedPlan: LogicalPlan = sparkSession.sessionState.optimizer.execute(withCachedData)
lazy val sparkPlan: SparkPlan = {
SparkSession.setActiveSession(sparkSession)
// TODO: We use next(), i.e. take the first plan returned by the planner, here for now,
// but we will implement to choose the best plan.
planner.plan(ReturnAnswer(optimizedPlan)).next()
}
// executedPlan should not be used to initialize any SparkPlan. It should be
// only used for execution.
lazy val executedPlan: SparkPlan = prepareForExecution(sparkPlan)
/** Internal version of the RDD. Avoids copies and has no schema */
lazy val toRdd: RDD[InternalRow] = executedPlan.execute()
/**
* Prepares a planned [[SparkPlan]] for execution by inserting shuffle operations and internal
* row format conversions as needed.
*/
protected def prepareForExecution(plan: SparkPlan): SparkPlan = {
preparations.foldLeft(plan) { case (sp, rule) => rule.apply(sp) }
}
/** A sequence of rules that will be applied in order to the physical plan before execution. */
protected def preparations: Seq[Rule[SparkPlan]] = Seq(
python.ExtractPythonUDFs,
PlanSubqueries(sparkSession),
EnsureRequirements(sparkSession.sessionState.conf),
CollapseCodegenStages(sparkSession.sessionState.conf),
ReuseExchange(sparkSession.sessionState.conf),
ReuseSubquery(sparkSession.sessionState.conf))
protected def stringOrError[A](f: => A): String =
try f.toString catch { case e: AnalysisException => e.toString }
/**
* Returns the result as a hive compatible sequence of strings. This is for testing only.
*/
def hiveResultString(): Seq[String] = executedPlan match {
case ExecutedCommandExec(desc: DescribeTableCommand) =>
// If it is a describe command for a Hive table, we want to have the output format
// be similar with Hive.
desc.run(sparkSession).map {
case Row(name: String, dataType: String, comment) =>
Seq(name, dataType,
Option(comment.asInstanceOf[String]).getOrElse(""))
.map(s => String.format(s"%-20s", s))
.mkString("\\t")
}
// SHOW TABLES in Hive only output table names, while ours output database, table name, isTemp.
case command @ ExecutedCommandExec(s: ShowTablesCommand) if !s.isExtended =>
command.executeCollect().map(_.getString(1))
case other =>
val result: Seq[Seq[Any]] = other.executeCollectPublic().map(_.toSeq).toSeq
// We need the types so we can output struct field names
val types = analyzed.output.map(_.dataType)
// Reformat to match hive tab delimited output.
result.map(_.zip(types).map(toHiveString)).map(_.mkString("\\t"))
}
/** Formats a datum (based on the given data type) and returns the string representation. */
private def toHiveString(a: (Any, DataType)): String = {
val primitiveTypes = Seq(StringType, IntegerType, LongType, DoubleType, FloatType,
BooleanType, ByteType, ShortType, DateType, TimestampType, BinaryType)
def formatDecimal(d: java.math.BigDecimal): String = {
if (d.compareTo(java.math.BigDecimal.ZERO) == 0) {
java.math.BigDecimal.ZERO.toPlainString
} else {
d.stripTrailingZeros().toPlainString
}
}
/** Hive outputs fields of structs slightly differently than top level attributes. */
def toHiveStructString(a: (Any, DataType)): String = a match {
case (struct: Row, StructType(fields)) =>
struct.toSeq.zip(fields).map {
case (v, t) => s""""${t.name}":${toHiveStructString(v, t.dataType)}"""
}.mkString("{", ",", "}")
case (seq: Seq[_], ArrayType(typ, _)) =>
seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]")
case (map: Map[_, _], MapType(kType, vType, _)) =>
map.map {
case (key, value) =>
toHiveStructString((key, kType)) + ":" + toHiveStructString((value, vType))
}.toSeq.sorted.mkString("{", ",", "}")
case (null, _) => "null"
case (s: String, StringType) => "\\"" + s + "\\""
case (decimal, DecimalType()) => decimal.toString
case (other, tpe) if primitiveTypes contains tpe => other.toString
}
a match {
case (struct: Row, StructType(fields)) =>
struct.toSeq.zip(fields).map {
case (v, t) => s""""${t.name}":${toHiveStructString(v, t.dataType)}"""
}.mkString("{", ",", "}")
case (seq: Seq[_], ArrayType(typ, _)) =>
seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]")
case (map: Map[_, _], MapType(kType, vType, _)) =>
map.map {
case (key, value) =>
toHiveStructString((key, kType)) + ":" + toHiveStructString((value, vType))
}.toSeq.sorted.mkString("{", ",", "}")
case (null, _) => "NULL"
case (d: Date, DateType) =>
DateTimeUtils.dateToString(DateTimeUtils.fromJavaDate(d))
case (t: Timestamp, TimestampType) =>
DateTimeUtils.timestampToString(DateTimeUtils.fromJavaTimestamp(t),
DateTimeUtils.getTimeZone(sparkSession.sessionState.conf.sessionLocalTimeZone))
case (bin: Array[Byte], BinaryType) => new String(bin, StandardCharsets.UTF_8)
case (decimal: java.math.BigDecimal, DecimalType()) => formatDecimal(decimal)
case (other, tpe) if primitiveTypes.contains(tpe) => other.toString
}
}
def simpleString: String = {
s"""== Physical Plan ==
|${stringOrError(executedPlan.treeString(verbose = false))}
""".stripMargin.trim
}
override def toString: String = completeString(appendStats = false)
def toStringWithStats: String = completeString(appendStats = true)
private def completeString(appendStats: Boolean): String = {
def output = Utils.truncatedString(
analyzed.output.map(o => s"${o.name}: ${o.dataType.simpleString}"), ", ")
val analyzedPlan = Seq(
stringOrError(output),
stringOrError(analyzed.treeString(verbose = true))
).filter(_.nonEmpty).mkString("\\n")
val optimizedPlanString = if (appendStats) {
// trigger to compute stats for logical plans
optimizedPlan.stats(sparkSession.sessionState.conf)
optimizedPlan.treeString(verbose = true, addSuffix = true)
} else {
optimizedPlan.treeString(verbose = true)
}
s"""== Parsed Logical Plan ==
|${stringOrError(logical.treeString(verbose = true))}
|== Analyzed Logical Plan ==
|$analyzedPlan
|== Optimized Logical Plan ==
|${stringOrError(optimizedPlanString)}
|== Physical Plan ==
|${stringOrError(executedPlan.treeString(verbose = true))}
""".stripMargin.trim
}
/** A special namespace for commands that can be used to debug query execution. */
// scalastyle:off
object debug {
// scalastyle:on
/**
* Prints to stdout all the generated code found in this plan (i.e. the output of each
* WholeStageCodegen subtree).
*/
def codegen(): Unit = {
// scalastyle:off println
println(org.apache.spark.sql.execution.debug.codegenString(executedPlan))
// scalastyle:on println
}
}
}
| andrewor14/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/QueryExecution.scala | Scala | apache-2.0 | 10,326 |
package uk.gov.gds.ier.transaction.ordinary.previousAddress
import uk.gov.gds.ier.model.{PossibleAddress, Addresses, MovedHouseOption}
import uk.gov.gds.ier.step.StepTemplate
import uk.gov.gds.ier.serialiser.JsonSerialiser
import uk.gov.gds.ier.service.AddressService
import uk.gov.gds.ier.transaction.ordinary.InprogressOrdinary
trait PreviousAddressSelectMustache extends StepTemplate[InprogressOrdinary] {
val addressService: AddressService
val serialiser: JsonSerialiser
case class SelectModel (
question: Question,
lookupUrl: String,
manualUrl: String,
postcode: Field,
address: Field,
possibleJsonList: Field,
possiblePostcode: Field,
hasAddresses: Boolean,
hasAuthority: Boolean
) extends MustacheData
val mustache = MultilingualTemplate("ordinary/previousAddressSelect") { implicit lang =>
(form, post) =>
implicit val progressForm = form
val movedRecently = form(keys.previousAddress.movedRecently).value.map {
str => MovedHouseOption.parse(str)
}
val title = movedRecently match {
case Some(MovedHouseOption.MovedFromAbroadRegistered) => Messages("ordinary_previousAddress_yesFromAbroadWasRegistered_title")
case _ => Messages("ordinary_previousAddress_yesFromUk_title")
}
val selectedUprn = form(keys.previousAddress.previousAddress.uprn).value
val postcode = form(keys.previousAddress.previousAddress.postcode).value
val storedAddresses = for(
jsonList <- form(keys.possibleAddresses.jsonList).value;
postcode <- form(keys.possibleAddresses.postcode).value
) yield {
PossibleAddress(
jsonList = serialiser.fromJson[Addresses](jsonList),
postcode = postcode
)
}
//IER0091 : Temp removing the storedAddresses section of the code checks to remove populating against the hidden input field
//val possibleAddresses = storedAddresses orElse postcode.map { pc =>
val possibleAddresses = postcode.map { pc =>
val addresses = addressService.lookupPartialAddress(pc)
PossibleAddress(
jsonList = Addresses(addresses),
postcode = pc
)
}
val options = for (
address <- possibleAddresses.map(_.jsonList.addresses).toList.flatten
) yield SelectOption(
value = address.uprn.getOrElse(""),
text = address.addressLine.getOrElse(""),
selected = if (address.uprn == selectedUprn) {
"selected=\\"selected\\""
} else ""
)
val hasAddresses = possibleAddresses.exists(!_.jsonList.addresses.isEmpty)
//IER0055: Check authority table too to allow for manual entry
val hasAuthority = hasAddresses || addressService.validAuthority(postcode)
val addressSelect = SelectField(
key = keys.previousAddress.previousAddress.uprn,
optionList = options,
default = SelectOption(
value = "",
text = s"${options.size} ${Messages("ordinary_previousAddress_select_addressesFound")}"
)
)
val addressSelectWithError = addressSelect.copy(
classes = if (!hasAddresses) {
"invalid"
} else {
addressSelect.classes
}
)
SelectModel(
question = Question(
postUrl = post.url,
title = title,
errorMessages = Messages.translatedGlobalErrors(form)
),
lookupUrl = routes.PreviousAddressPostcodeStep.get.url,
manualUrl = routes.PreviousAddressManualStep.get.url,
postcode = TextField(keys.previousAddress.previousAddress.postcode),
address = addressSelectWithError, // this is model data for <select>
possibleJsonList = HiddenField(
key = keys.possibleAddresses.jsonList,
value = possibleAddresses.map { poss =>
serialiser.toJson(poss.jsonList)
}.getOrElse("")
),
possiblePostcode = HiddenField(
key = keys.possibleAddresses.postcode,
value = form(keys.previousAddress.previousAddress.postcode).value.getOrElse("")
),
hasAddresses = hasAddresses,
hasAuthority = hasAuthority
)
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/ordinary/previousAddress/PreviousAddressSelectMustache.scala | Scala | mit | 4,073 |
package me.jeffmay.neo4j.client.ws
import akka.actor.Scheduler
import me.jeffmay.neo4j.client._
import me.jeffmay.neo4j.client.cypher.CypherStatement
import me.jeffmay.neo4j.client.ws.json.DebugFormats
import me.jeffmay.neo4j.client.ws.json.rest.RestFormats._
import me.jeffmay.neo4j.client.ws.json.rest._
import me.jeffmay.util.ws.{ProxyWSClient, TimeoutWSRequest}
import org.slf4j.Logger
import play.api.libs.json.{JsValue, Json}
import play.api.libs.ws.{WSAuthScheme, WSClient, WSRequest, WSResponse}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
class WSNeo4jClient(
wsClient: WSClient,
val config: Neo4jClientConfig,
logger: Logger,
implicit private val scheduler: Scheduler,
implicit private val executionContext: ExecutionContext
) extends Neo4jClient with Proxy {
client =>
import WSNeo4jClient._
import WSRequestWithException._
override def self: Any = (wsClient, config)
def copy(
ws: WSClient = this.wsClient,
config: Neo4jClientConfig = this.config,
logger: Logger = this.logger,
scheduler: Scheduler = this.scheduler,
executionContext: ExecutionContext = this.executionContext
) = {
new WSNeo4jClient(ws, config, logger, scheduler, executionContext)
}
lazy val ws: WSClient = new ProxyWSClient(wsClient) {
override def url(url: String): WSRequest = {
new TimeoutWSRequest(wsClient.url(url), config.timeout)
}
}
override def withCredentials(username: String, password: String): Neo4jClient =
this.copy(config = config.copy(credentials = Neo4jBasicAuth(username, password)))
override def withBaseUrl(baseUrl: String): Neo4jClient =
this.copy(config = config.copy(baseUrl = baseUrl))
override def withStatsIncludedByDefault(includeStatsByDefault: Boolean): Neo4jClient =
this.copy(config = config.copy(includeStatsByDefault = includeStatsByDefault))
override def withTimeout(timeout: FiniteDuration): Neo4jClient =
this.copy(config = config.copy(timeout = timeout))
protected def http(path: String): WSRequest = {
require(!path.isEmpty, "path cannot be empty")
require(path.charAt(0) == '/', "path must be an absolute path starting with '/'")
import config.credentials.{password, username}
ws.url(config.baseUrl + path).withAuth(username, password, WSAuthScheme.BASIC)
}
override def passwordChangeRequired(): Future[Boolean] = {
val request = http(s"/user/${config.credentials.username}")
request.getAndCheckStatus(Set(200)).map { resp =>
(resp.json \\ "password_change_required").asOpt[Boolean] contains true
}
}
override def changePassword(newPassword: String): Future[Unit] = {
val request = http(s"/user/${config.credentials.username}/password")
request.postAndCheckStatus(Some(Json.obj("password" -> newPassword)), Set(200))
.map(_ => ())
}
private def requestTxn[T](url: String, statements: Seq[CypherStatement])(convert: RawTxnResponse => Try[T]): Future[T] = {
import DebugFormats._
statements.foreach { stmt =>
logger.debug(s"Executing Cypher statement: ${Json.prettyPrint(Json.toJson(stmt))}")
}
val rawBody = RawStatementTransactionRequest.fromCypherStatements(statements)
val jsonBody = Json.toJson(rawBody)
val request = http(url)
request.postAndCheckStatus(Some(jsonBody)).flatMap { resp =>
// TODO: Handle version / json format errors with common recovery code
val respBody = resp.json.as[RawTxnResponse]
if (respBody.isSuccessful) {
// Do the conversion in the same thread. Any thrown exceptions will fold into the failed case
Future.successful(convert(respBody).get)
}
else {
Future.failed(statusCodeException(request, Some(jsonBody), resp, respBody.neo4jErrors))
}
}
}
override def openTxn(): Future[OpenedTxnResponse] = openTxn(Seq())
override def openTxn(statement: CypherStatement): Future[SingleOpenedTxnResponse] = {
requestTxn("/db/data/transaction", Seq(statement)) {
_.asOpenedTxnResponse(statement)
}
}
override def openTxn(statements: Seq[CypherStatement]): Future[OpenedTxnResponse] = {
requestTxn("/db/data/transaction", statements) {
_.asOpenedTxnResponse(statements)
}
}
override def openAndCommitTxn(statement: CypherStatement): Future[SingleCommittedTxnResponse] = {
requestTxn("/db/data/transaction/commit", Seq(statement)) {
_.asCommittedTxnResponse(statement)
}
}
override def openAndCommitTxn(statements: Seq[CypherStatement]): Future[CommittedTxnResponse] = {
requestTxn("/db/data/transaction/commit", statements) {
_.asCommittedTxnResponse(statements)
}
}
override def commitTxn(ref: TxnRef, alongWith: Seq[CypherStatement]): Future[CommittedTxnResponse] = {
requestTxn(ref.url, alongWith) {
_.asCommittedTxnResponse(alongWith)
}
}
}
object WSNeo4jClient {
def apply(
ws: WSClient,
config: Neo4jClientConfig,
logger: Logger,
scheduler: Scheduler,
executionContext: ExecutionContext
) = new WSNeo4jClient(ws, config, logger, scheduler, executionContext)
def flattenHeaders(headers: Map[String, Seq[String]]): Seq[(String, String)] = {
headers.toSeq.flatMap {
case (header, values) => values.map(header -> _)
}
}
def statusCodeException(request: WSRequest, requestBody: Option[JsValue], response: WSResponse, errors: Seq[Neo4jError]): StatusCodeException = {
new StatusCodeException(
request.method,
request.url,
flattenHeaders(request.headers),
requestBody.fold("")(Json.prettyPrint),
response.status,
Try(response.json).map(Json.prettyPrint) getOrElse response.body,
errors
)
}
def unexpectedStatusException(request: WSRequest, requestBody: Option[JsValue], response: WSResponse): UnexpectedStatusException = {
new UnexpectedStatusException(
request.method,
request.url,
flattenHeaders(request.headers),
requestBody.fold("")(Json.prettyPrint),
response.status,
Try(Json.prettyPrint(response.json)) getOrElse response.body
)
}
}
| AudaxHealthInc/neo4j-scala-client | ws/src/main/scala/me/jeffmay/neo4j/client/ws/WSNeo4jClient.scala | Scala | apache-2.0 | 6,154 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io._
import java.nio.file.Files
import scala.io.Source
import scala.util.Properties
import scala.collection.JavaConverters._
import scala.collection.mutable.Stack
import sbt._
import sbt.Classpaths.publishTask
import sbt.Keys._
import sbtunidoc.Plugin.UnidocKeys.unidocGenjavadocVersion
import com.simplytyped.Antlr4Plugin._
import com.typesafe.sbt.pom.{PomBuild, SbtPomKeys}
import com.typesafe.tools.mima.plugin.MimaKeys
import org.scalastyle.sbt.ScalastylePlugin._
import org.scalastyle.sbt.Tasks
import spray.revolver.RevolverPlugin._
object BuildCommons {
private val buildLocation = file(".").getAbsoluteFile.getParentFile
val sqlProjects@Seq(catalyst, sql, hive, hiveThriftServer, sqlKafka010) = Seq(
"catalyst", "sql", "hive", "hive-thriftserver", "sql-kafka-0-10"
).map(ProjectRef(buildLocation, _))
val streamingProjects@Seq(
streaming, streamingFlumeSink, streamingFlume, streamingKafka, streamingKafka010
) = Seq(
"streaming", "streaming-flume-sink", "streaming-flume", "streaming-kafka-0-8", "streaming-kafka-0-10"
).map(ProjectRef(buildLocation, _))
val allProjects@Seq(
core, graphx, mllib, mllibLocal, repl, networkCommon, networkShuffle, launcher, unsafe, tags, sketch, kvstore, _*
) = Seq(
"core", "graphx", "mllib", "mllib-local", "repl", "network-common", "network-shuffle", "launcher", "unsafe",
"tags", "sketch", "kvstore"
).map(ProjectRef(buildLocation, _)) ++ sqlProjects ++ streamingProjects
val optionallyEnabledProjects@Seq(mesos, yarn, sparkGangliaLgpl,
streamingKinesisAsl, dockerIntegrationTests, hadoopCloud) =
Seq("mesos", "yarn", "ganglia-lgpl", "streaming-kinesis-asl",
"docker-integration-tests", "hadoop-cloud").map(ProjectRef(buildLocation, _))
val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKafka010Assembly, streamingKinesisAslAssembly) =
Seq("network-yarn", "streaming-flume-assembly", "streaming-kafka-0-8-assembly", "streaming-kafka-0-10-assembly", "streaming-kinesis-asl-assembly")
.map(ProjectRef(buildLocation, _))
val copyJarsProjects@Seq(assembly, examples) = Seq("assembly", "examples")
.map(ProjectRef(buildLocation, _))
val tools = ProjectRef(buildLocation, "tools")
// Root project.
val spark = ProjectRef(buildLocation, "spark")
val sparkHome = buildLocation
val testTempDir = s"$sparkHome/target/tmp"
val javacJVMVersion = settingKey[String]("source and target JVM version for javac")
val scalacJVMVersion = settingKey[String]("source and target JVM version for scalac")
}
object SparkBuild extends PomBuild {
import BuildCommons._
import scala.collection.mutable.Map
val projectsMap: Map[String, Seq[Setting[_]]] = Map.empty
override val profiles = {
Properties.envOrNone("SBT_MAVEN_PROFILES") match {
case None => Seq("sbt")
case Some(v) =>
v.split("(\\\\s+|,)").filterNot(_.isEmpty).map(_.trim.replaceAll("-P", "")).toSeq
}
}
Properties.envOrNone("SBT_MAVEN_PROPERTIES") match {
case Some(v) =>
v.split("(\\\\s+|,)").filterNot(_.isEmpty).map(_.split("=")).foreach(x => System.setProperty(x(0), x(1)))
case _ =>
}
override val userPropertiesMap = System.getProperties.asScala.toMap
lazy val MavenCompile = config("m2r") extend(Compile)
lazy val publishLocalBoth = TaskKey[Unit]("publish-local", "publish local for m2 and ivy")
lazy val sparkGenjavadocSettings: Seq[sbt.Def.Setting[_]] = Seq(
libraryDependencies += compilerPlugin(
"com.typesafe.genjavadoc" %% "genjavadoc-plugin" % unidocGenjavadocVersion.value cross CrossVersion.full),
scalacOptions ++= Seq(
"-P:genjavadoc:out=" + (target.value / "java"),
"-P:genjavadoc:strictVisibility=true" // hide package private types
)
)
lazy val scalaStyleRules = Project("scalaStyleRules", file("scalastyle"))
.settings(
libraryDependencies += "org.scalastyle" %% "scalastyle" % "0.8.0"
)
lazy val scalaStyleOnCompile = taskKey[Unit]("scalaStyleOnCompile")
lazy val scalaStyleOnTest = taskKey[Unit]("scalaStyleOnTest")
// We special case the 'println' lint rule to only be a warning on compile, because adding
// printlns for debugging is a common use case and is easy to remember to remove.
val scalaStyleOnCompileConfig: String = {
val in = "scalastyle-config.xml"
val out = "scalastyle-on-compile.generated.xml"
val replacements = Map(
"""customId="println" level="error"""" -> """customId="println" level="warn""""
)
var contents = Source.fromFile(in).getLines.mkString("\\n")
for ((k, v) <- replacements) {
require(contents.contains(k), s"Could not rewrite '$k' in original scalastyle config.")
contents = contents.replace(k, v)
}
new PrintWriter(out) {
write(contents)
close()
}
out
}
// Return a cached scalastyle task for a given configuration (usually Compile or Test)
private def cachedScalaStyle(config: Configuration) = Def.task {
val logger = streams.value.log
// We need a different cache dir per Configuration, otherwise they collide
val cacheDir = target.value / s"scalastyle-cache-${config.name}"
val cachedFun = FileFunction.cached(cacheDir, FilesInfo.lastModified, FilesInfo.exists) {
(inFiles: Set[File]) => {
val args: Seq[String] = Seq.empty
val scalaSourceV = Seq(file(scalaSource.in(config).value.getAbsolutePath))
val configV = (baseDirectory in ThisBuild).value / scalaStyleOnCompileConfig
val configUrlV = scalastyleConfigUrl.in(config).value
val streamsV = streams.in(config).value
val failOnErrorV = true
val scalastyleTargetV = scalastyleTarget.in(config).value
val configRefreshHoursV = scalastyleConfigRefreshHours.in(config).value
val targetV = target.in(config).value
val configCacheFileV = scalastyleConfigUrlCacheFile.in(config).value
logger.info(s"Running scalastyle on ${name.value} in ${config.name}")
Tasks.doScalastyle(args, configV, configUrlV, failOnErrorV, scalaSourceV, scalastyleTargetV,
streamsV, configRefreshHoursV, targetV, configCacheFileV)
Set.empty
}
}
cachedFun(findFiles(scalaSource.in(config).value))
}
private def findFiles(file: File): Set[File] = if (file.isDirectory) {
file.listFiles().toSet.flatMap(findFiles) + file
} else {
Set(file)
}
def enableScalaStyle: Seq[sbt.Def.Setting[_]] = Seq(
scalaStyleOnCompile := cachedScalaStyle(Compile).value,
scalaStyleOnTest := cachedScalaStyle(Test).value,
logLevel in scalaStyleOnCompile := Level.Warn,
logLevel in scalaStyleOnTest := Level.Warn,
(compile in Compile) := {
scalaStyleOnCompile.value
(compile in Compile).value
},
(compile in Test) := {
scalaStyleOnTest.value
(compile in Test).value
}
)
lazy val sharedSettings = sparkGenjavadocSettings ++
(if (sys.env.contains("NOLINT_ON_COMPILE")) Nil else enableScalaStyle) ++ Seq(
exportJars in Compile := true,
exportJars in Test := false,
javaHome := sys.env.get("JAVA_HOME")
.orElse(sys.props.get("java.home").map { p => new File(p).getParentFile().getAbsolutePath() })
.map(file),
incOptions := incOptions.value.withNameHashing(true),
publishMavenStyle := true,
unidocGenjavadocVersion := "0.10",
// Override SBT's default resolvers:
resolvers := Seq(
DefaultMavenRepository,
Resolver.mavenLocal,
Resolver.file("local", file(Path.userHome.absolutePath + "/.ivy2/local"))(Resolver.ivyStylePatterns)
),
externalResolvers := resolvers.value,
otherResolvers := SbtPomKeys.mvnLocalRepository(dotM2 => Seq(Resolver.file("dotM2", dotM2))).value,
publishLocalConfiguration in MavenCompile :=
new PublishConfiguration(None, "dotM2", packagedArtifacts.value, Seq(), ivyLoggingLevel.value),
publishMavenStyle in MavenCompile := true,
publishLocal in MavenCompile := publishTask(publishLocalConfiguration in MavenCompile, deliverLocal).value,
publishLocalBoth := Seq(publishLocal in MavenCompile, publishLocal).dependOn.value,
javacOptions in (Compile, doc) ++= {
val versionParts = System.getProperty("java.version").split("[+.\\\\-]+", 3)
var major = versionParts(0).toInt
if (major == 1) major = versionParts(1).toInt
if (major >= 8) Seq("-Xdoclint:all", "-Xdoclint:-missing") else Seq.empty
},
javacJVMVersion := "1.8",
scalacJVMVersion := "1.8",
javacOptions in Compile ++= Seq(
"-encoding", "UTF-8",
"-source", javacJVMVersion.value,
"-Xlint:unchecked"
),
// This -target option cannot be set in the Compile configuration scope since `javadoc` doesn't
// play nicely with it; see https://github.com/sbt/sbt/issues/355#issuecomment-3817629 for
// additional discussion and explanation.
javacOptions in (Compile, compile) ++= Seq(
"-target", javacJVMVersion.value
),
scalacOptions in Compile ++= Seq(
s"-target:jvm-${scalacJVMVersion.value}",
"-sourcepath", (baseDirectory in ThisBuild).value.getAbsolutePath // Required for relative source links in scaladoc
),
// Implements -Xfatal-warnings, ignoring deprecation warnings.
// Code snippet taken from https://issues.scala-lang.org/browse/SI-8410.
compile in Compile := {
val analysis = (compile in Compile).value
val out = streams.value
def logProblem(l: (=> String) => Unit, f: File, p: xsbti.Problem) = {
l(f.toString + ":" + p.position.line.fold("")(_ + ":") + " " + p.message)
l(p.position.lineContent)
l("")
}
var failed = 0
analysis.infos.allInfos.foreach { case (k, i) =>
i.reportedProblems foreach { p =>
val deprecation = p.message.contains("is deprecated")
if (!deprecation) {
failed = failed + 1
}
val printer: (=> String) => Unit = s => if (deprecation) {
out.log.warn(s)
} else {
out.log.error("[warn] " + s)
}
logProblem(printer, k, p)
}
}
if (failed > 0) {
sys.error(s"$failed fatal warnings")
}
analysis
}
)
def enable(settings: Seq[Setting[_]])(projectRef: ProjectRef) = {
val existingSettings = projectsMap.getOrElse(projectRef.project, Seq[Setting[_]]())
projectsMap += (projectRef.project -> (existingSettings ++ settings))
}
// Note ordering of these settings matter.
/* Enable shared settings on all projects */
(allProjects ++ optionallyEnabledProjects ++ assemblyProjects ++ copyJarsProjects ++ Seq(spark, tools))
.foreach(enable(sharedSettings ++ DependencyOverrides.settings ++
ExcludedDependencies.settings))
/* Enable tests settings for all projects except examples, assembly and tools */
(allProjects ++ optionallyEnabledProjects).foreach(enable(TestSettings.settings))
val mimaProjects = allProjects.filterNot { x =>
Seq(
spark, hive, hiveThriftServer, catalyst, repl, networkCommon, networkShuffle, networkYarn,
unsafe, tags, sqlKafka010, kvstore
).contains(x)
}
mimaProjects.foreach { x =>
enable(MimaBuild.mimaSettings(sparkHome, x))(x)
}
/* Generate and pick the spark build info from extra-resources */
enable(Core.settings)(core)
/* Unsafe settings */
enable(Unsafe.settings)(unsafe)
/*
* Set up tasks to copy dependencies during packaging. This step can be disabled in the command
* line, so that dev/mima can run without trying to copy these files again and potentially
* causing issues.
*/
if (!"false".equals(System.getProperty("copyDependencies"))) {
copyJarsProjects.foreach(enable(CopyDependencies.settings))
}
/* Enable Assembly for all assembly projects */
assemblyProjects.foreach(enable(Assembly.settings))
/* Package pyspark artifacts in a separate zip file for YARN. */
enable(PySparkAssembly.settings)(assembly)
/* Enable unidoc only for the root spark project */
enable(Unidoc.settings)(spark)
/* Catalyst ANTLR generation settings */
enable(Catalyst.settings)(catalyst)
/* Spark SQL Core console settings */
enable(SQL.settings)(sql)
/* Hive console settings */
enable(Hive.settings)(hive)
enable(Flume.settings)(streamingFlumeSink)
// SPARK-14738 - Remove docker tests from main Spark build
// enable(DockerIntegrationTests.settings)(dockerIntegrationTests)
/**
* Adds the ability to run the spark shell directly from SBT without building an assembly
* jar.
*
* Usage: `build/sbt sparkShell`
*/
val sparkShell = taskKey[Unit]("start a spark-shell.")
val sparkPackage = inputKey[Unit](
s"""
|Download and run a spark package.
|Usage `builds/sbt "sparkPackage <group:artifact:version> <MainClass> [args]
""".stripMargin)
val sparkSql = taskKey[Unit]("starts the spark sql CLI.")
enable(Seq(
connectInput in run := true,
fork := true,
outputStrategy in run := Some (StdoutOutput),
javaOptions += "-Xmx2g",
sparkShell := {
(runMain in Compile).toTask(" org.apache.spark.repl.Main -usejavacp").value
},
sparkPackage := {
import complete.DefaultParsers._
val packages :: className :: otherArgs = spaceDelimited("<group:artifact:version> <MainClass> [args]").parsed.toList
val scalaRun = (runner in run).value
val classpath = (fullClasspath in Runtime).value
val args = Seq("--packages", packages, "--class", className, (Keys.`package` in Compile in LocalProject("core"))
.value.getCanonicalPath) ++ otherArgs
println(args)
scalaRun.run("org.apache.spark.deploy.SparkSubmit", classpath.map(_.data), args, streams.value.log)
},
javaOptions in Compile += "-Dspark.master=local",
sparkSql := {
(runMain in Compile).toTask(" org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver").value
}
))(assembly)
enable(Seq(sparkShell := sparkShell in LocalProject("assembly")))(spark)
// TODO: move this to its upstream project.
override def projectDefinitions(baseDirectory: File): Seq[Project] = {
super.projectDefinitions(baseDirectory).map { x =>
if (projectsMap.exists(_._1 == x.id)) x.settings(projectsMap(x.id): _*)
else x.settings(Seq[Setting[_]](): _*)
} ++ Seq[Project](OldDeps.project)
}
}
object Core {
lazy val settings = Seq(
resourceGenerators in Compile += Def.task {
val buildScript = baseDirectory.value + "/../build/spark-build-info"
val targetDir = baseDirectory.value + "/target/extra-resources/"
val command = Seq("bash", buildScript, targetDir, version.value)
Process(command).!!
val propsFile = baseDirectory.value / "target" / "extra-resources" / "spark-version-info.properties"
Seq(propsFile)
}.taskValue
)
}
object Unsafe {
lazy val settings = Seq(
// This option is needed to suppress warnings from sun.misc.Unsafe usage
javacOptions in Compile += "-XDignore.symbol.file"
)
}
object Flume {
lazy val settings = sbtavro.SbtAvro.avroSettings
}
object DockerIntegrationTests {
// This serves to override the override specified in DependencyOverrides:
lazy val settings = Seq(
dependencyOverrides += "com.google.guava" % "guava" % "18.0",
resolvers += "DB2" at "https://app.camunda.com/nexus/content/repositories/public/",
libraryDependencies += "com.oracle" % "ojdbc6" % "11.2.0.1.0" from "https://app.camunda.com/nexus/content/repositories/public/com/oracle/ojdbc6/11.2.0.1.0/ojdbc6-11.2.0.1.0.jar" // scalastyle:ignore
)
}
/**
* Overrides to work around sbt's dependency resolution being different from Maven's.
*/
object DependencyOverrides {
lazy val settings = Seq(
dependencyOverrides += "com.google.guava" % "guava" % "14.0.1")
}
/**
* This excludes library dependencies in sbt, which are specified in maven but are
* not needed by sbt build.
*/
object ExcludedDependencies {
lazy val settings = Seq(
libraryDependencies ~= { libs => libs.filterNot(_.name == "groovy-all") }
)
}
/**
* Project to pull previous artifacts of Spark for generating Mima excludes.
*/
object OldDeps {
lazy val project = Project("oldDeps", file("dev"), settings = oldDepsSettings)
lazy val allPreviousArtifactKeys = Def.settingDyn[Seq[Set[ModuleID]]] {
SparkBuild.mimaProjects
.map { project => MimaKeys.mimaPreviousArtifacts in project }
.map(k => Def.setting(k.value))
.join
}
def oldDepsSettings() = Defaults.coreDefaultSettings ++ Seq(
name := "old-deps",
libraryDependencies := allPreviousArtifactKeys.value.flatten
)
}
object Catalyst {
lazy val settings = antlr4Settings ++ Seq(
antlr4PackageName in Antlr4 := Some("org.apache.spark.sql.catalyst.parser"),
antlr4GenListener in Antlr4 := true,
antlr4GenVisitor in Antlr4 := true
)
}
object SQL {
lazy val settings = Seq(
initialCommands in console :=
"""
|import org.apache.spark.SparkContext
|import org.apache.spark.sql.SQLContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
|import org.apache.spark.sql.catalyst.expressions._
|import org.apache.spark.sql.catalyst.plans.logical._
|import org.apache.spark.sql.catalyst.rules._
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.types._
|
|val sc = new SparkContext("local[*]", "dev-shell")
|val sqlContext = new SQLContext(sc)
|import sqlContext.implicits._
|import sqlContext._
""".stripMargin,
cleanupCommands in console := "sc.stop()"
)
}
object Hive {
lazy val settings = Seq(
// Specially disable assertions since some Hive tests fail them
javaOptions in Test := (javaOptions in Test).value.filterNot(_ == "-ea"),
// Supporting all SerDes requires us to depend on deprecated APIs, so we turn off the warnings
// only for this subproject.
scalacOptions := (scalacOptions map { currentOpts: Seq[String] =>
currentOpts.filterNot(_ == "-deprecation")
}).value,
initialCommands in console :=
"""
|import org.apache.spark.SparkContext
|import org.apache.spark.sql.catalyst.analysis._
|import org.apache.spark.sql.catalyst.dsl._
|import org.apache.spark.sql.catalyst.errors._
|import org.apache.spark.sql.catalyst.expressions._
|import org.apache.spark.sql.catalyst.plans.logical._
|import org.apache.spark.sql.catalyst.rules._
|import org.apache.spark.sql.catalyst.util._
|import org.apache.spark.sql.execution
|import org.apache.spark.sql.functions._
|import org.apache.spark.sql.hive._
|import org.apache.spark.sql.hive.test.TestHive._
|import org.apache.spark.sql.hive.test.TestHive.implicits._
|import org.apache.spark.sql.types._""".stripMargin,
cleanupCommands in console := "sparkContext.stop()",
// Some of our log4j jars make it impossible to submit jobs from this JVM to Hive Map/Reduce
// in order to generate golden files. This is only required for developers who are adding new
// new query tests.
fullClasspath in Test := (fullClasspath in Test).value.filterNot { f => f.toString.contains("jcl-over") }
)
}
object Assembly {
import sbtassembly.AssemblyUtils._
import sbtassembly.Plugin._
import AssemblyKeys._
val hadoopVersion = taskKey[String]("The version of hadoop that spark is compiled against.")
lazy val settings = assemblySettings ++ Seq(
test in assembly := {},
hadoopVersion := {
sys.props.get("hadoop.version")
.getOrElse(SbtPomKeys.effectivePom.value.getProperties.get("hadoop.version").asInstanceOf[String])
},
jarName in assembly := {
if (moduleName.value.contains("streaming-flume-assembly")
|| moduleName.value.contains("streaming-kafka-0-8-assembly")
|| moduleName.value.contains("streaming-kafka-0-10-assembly")
|| moduleName.value.contains("streaming-kinesis-asl-assembly")) {
// This must match the same name used in maven (see external/kafka-0-8-assembly/pom.xml)
s"${moduleName.value}-${version.value}.jar"
} else {
s"${moduleName.value}-${version.value}-hadoop${hadoopVersion.value}.jar"
}
},
jarName in (Test, assembly) := s"${moduleName.value}-test-${version.value}.jar",
mergeStrategy in assembly := {
case m if m.toLowerCase.endsWith("manifest.mf") => MergeStrategy.discard
case m if m.toLowerCase.matches("meta-inf.*\\\\.sf$") => MergeStrategy.discard
case "log4j.properties" => MergeStrategy.discard
case m if m.toLowerCase.startsWith("meta-inf/services/") => MergeStrategy.filterDistinctLines
case "reference.conf" => MergeStrategy.concat
case _ => MergeStrategy.first
}
)
}
object PySparkAssembly {
import sbtassembly.Plugin._
import AssemblyKeys._
import java.util.zip.{ZipOutputStream, ZipEntry}
lazy val settings = Seq(
// Use a resource generator to copy all .py files from python/pyspark into a managed directory
// to be included in the assembly. We can't just add "python/" to the assembly's resource dir
// list since that will copy unneeded / unwanted files.
resourceGenerators in Compile += Def.macroValueI(resourceManaged in Compile map { outDir: File =>
val src = new File(BuildCommons.sparkHome, "python/pyspark")
val zipFile = new File(BuildCommons.sparkHome , "python/lib/pyspark.zip")
zipFile.delete()
zipRecursive(src, zipFile)
Seq.empty[File]
}).value
)
private def zipRecursive(source: File, destZipFile: File) = {
val destOutput = new ZipOutputStream(new FileOutputStream(destZipFile))
addFilesToZipStream("", source, destOutput)
destOutput.flush()
destOutput.close()
}
private def addFilesToZipStream(parent: String, source: File, output: ZipOutputStream): Unit = {
if (source.isDirectory()) {
output.putNextEntry(new ZipEntry(parent + source.getName()))
for (file <- source.listFiles()) {
addFilesToZipStream(parent + source.getName() + File.separator, file, output)
}
} else {
val in = new FileInputStream(source)
output.putNextEntry(new ZipEntry(parent + source.getName()))
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
output.write(buf, 0, n)
}
}
output.closeEntry()
in.close()
}
}
}
object Unidoc {
import BuildCommons._
import sbtunidoc.Plugin._
import UnidocKeys._
private def ignoreUndocumentedPackages(packages: Seq[Seq[File]]): Seq[Seq[File]] = {
packages
.map(_.filterNot(_.getName.contains("$")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/deploy")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/examples")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/memory")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/network")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/shuffle")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/executor")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/unsafe")))
.map(_.filterNot(_.getCanonicalPath.contains("python")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/util/collection")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/catalyst")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/execution")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/internal")))
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/spark/sql/hive/test")))
}
private def ignoreClasspaths(classpaths: Seq[Classpath]): Seq[Classpath] = {
classpaths
.map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka-clients-0\\.10.*""")))
.map(_.filterNot(_.data.getCanonicalPath.matches(""".*kafka_2\\..*-0\\.10.*""")))
}
val unidocSourceBase = settingKey[String]("Base URL of source links in Scaladoc.")
lazy val settings = scalaJavaUnidocSettings ++ Seq (
publish := {},
unidocProjectFilter in(ScalaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags, streamingKafka010, sqlKafka010),
unidocProjectFilter in(JavaUnidoc, unidoc) :=
inAnyProject -- inProjects(OldDeps.project, repl, examples, tools, streamingFlumeSink, yarn, tags, streamingKafka010, sqlKafka010),
unidocAllClasspaths in (ScalaUnidoc, unidoc) := {
ignoreClasspaths((unidocAllClasspaths in (ScalaUnidoc, unidoc)).value)
},
unidocAllClasspaths in (JavaUnidoc, unidoc) := {
ignoreClasspaths((unidocAllClasspaths in (JavaUnidoc, unidoc)).value)
},
// Skip actual catalyst, but include the subproject.
// Catalyst is not public API and contains quasiquotes which break scaladoc.
unidocAllSources in (ScalaUnidoc, unidoc) := {
ignoreUndocumentedPackages((unidocAllSources in (ScalaUnidoc, unidoc)).value)
},
// Skip class names containing $ and some internal packages in Javadocs
unidocAllSources in (JavaUnidoc, unidoc) := {
ignoreUndocumentedPackages((unidocAllSources in (JavaUnidoc, unidoc)).value)
.map(_.filterNot(_.getCanonicalPath.contains("org/apache/hadoop")))
},
javacOptions in (JavaUnidoc, unidoc) := Seq(
"-windowtitle", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " JavaDoc",
"-public",
"-noqualifier", "java.lang",
"-tag", """example:a:Example\\:""",
"-tag", """note:a:Note\\:""",
"-tag", "group:X",
"-tag", "tparam:X",
"-tag", "constructor:X",
"-tag", "todo:X",
"-tag", "groupname:X"
),
// Use GitHub repository for Scaladoc source links
unidocSourceBase := s"https://github.com/apache/spark/tree/v${version.value}",
scalacOptions in (ScalaUnidoc, unidoc) ++= Seq(
"-groups", // Group similar methods together based on the @group annotation.
"-skip-packages", "org.apache.hadoop"
) ++ (
// Add links to sources when generating Scaladoc for a non-snapshot release
if (!isSnapshot.value) {
Opts.doc.sourceUrl(unidocSourceBase.value + "€{FILE_PATH}.scala")
} else {
Seq()
}
)
)
}
object CopyDependencies {
val copyDeps = TaskKey[Unit]("copyDeps", "Copies needed dependencies to the build directory.")
val destPath = (crossTarget in Compile) { _ / "jars"}
lazy val settings = Seq(
copyDeps := {
val dest = destPath.value
if (!dest.isDirectory() && !dest.mkdirs()) {
throw new IOException("Failed to create jars directory.")
}
(dependencyClasspath in Compile).value.map(_.data)
.filter { jar => jar.isFile() }
.foreach { jar =>
val destJar = new File(dest, jar.getName())
if (destJar.isFile()) {
destJar.delete()
}
Files.copy(jar.toPath(), destJar.toPath())
}
},
crossTarget in (Compile, packageBin) := destPath.value,
packageBin in Compile := (packageBin in Compile).dependsOn(copyDeps).value
)
}
object TestSettings {
import BuildCommons._
private val scalaBinaryVersion = "2.11"
lazy val settings = Seq (
// Fork new JVMs for tests and set Java options for those
fork := true,
// Setting SPARK_DIST_CLASSPATH is a simple way to make sure any child processes
// launched by the tests have access to the correct test-time classpath.
envVars in Test ++= Map(
"SPARK_DIST_CLASSPATH" ->
(fullClasspath in Test).value.files.map(_.getAbsolutePath)
.mkString(File.pathSeparator).stripSuffix(File.pathSeparator),
"SPARK_PREPEND_CLASSES" -> "1",
"SPARK_SCALA_VERSION" -> scalaBinaryVersion,
"SPARK_TESTING" -> "1",
"JAVA_HOME" -> sys.env.get("JAVA_HOME").getOrElse(sys.props("java.home"))),
javaOptions in Test += s"-Djava.io.tmpdir=$testTempDir",
javaOptions in Test += "-Dspark.test.home=" + sparkHome,
javaOptions in Test += "-Dspark.testing=1",
javaOptions in Test += "-Dspark.port.maxRetries=100",
javaOptions in Test += "-Dspark.master.rest.enabled=false",
javaOptions in Test += "-Dspark.memory.debugFill=true",
javaOptions in Test += "-Dspark.ui.enabled=false",
javaOptions in Test += "-Dspark.ui.showConsoleProgress=false",
javaOptions in Test += "-Dspark.unsafe.exceptionOnMemoryLeak=true",
javaOptions in Test += "-Dsun.io.serialization.extendedDebugInfo=false",
javaOptions in Test += "-Dderby.system.durability=test",
javaOptions in Test ++= System.getProperties.asScala.filter(_._1.startsWith("spark"))
.map { case (k,v) => s"-D$k=$v" }.toSeq,
javaOptions in Test += "-ea",
javaOptions in Test ++= "-Xmx3g -Xss4096k"
.split(" ").toSeq,
javaOptions += "-Xmx3g",
// Exclude tags defined in a system property
testOptions in Test += Tests.Argument(TestFrameworks.ScalaTest,
sys.props.get("test.exclude.tags").map { tags =>
tags.split(",").flatMap { tag => Seq("-l", tag) }.toSeq
}.getOrElse(Nil): _*),
testOptions in Test += Tests.Argument(TestFrameworks.JUnit,
sys.props.get("test.exclude.tags").map { tags =>
Seq("--exclude-categories=" + tags)
}.getOrElse(Nil): _*),
// Show full stack trace and duration in test cases.
testOptions in Test += Tests.Argument("-oDF"),
testOptions in Test += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"),
// Enable Junit testing.
libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % "test",
// Only allow one test at a time, even across projects, since they run in the same JVM
parallelExecution in Test := false,
// Make sure the test temp directory exists.
resourceGenerators in Test += Def.macroValueI(resourceManaged in Test map { outDir: File =>
var dir = new File(testTempDir)
if (!dir.isDirectory()) {
// Because File.mkdirs() can fail if multiple callers are trying to create the same
// parent directory, this code tries to create parents one at a time, and avoids
// failures when the directories have been created by somebody else.
val stack = new Stack[File]()
while (!dir.isDirectory()) {
stack.push(dir)
dir = dir.getParentFile()
}
while (stack.nonEmpty) {
val d = stack.pop()
require(d.mkdir() || d.isDirectory(), s"Failed to create directory $d")
}
}
Seq.empty[File]
}).value,
concurrentRestrictions in Global += Tags.limit(Tags.Test, 1),
// Remove certain packages from Scaladoc
scalacOptions in (Compile, doc) := Seq(
"-groups",
"-skip-packages", Seq(
"org.apache.spark.api.python",
"org.apache.spark.network",
"org.apache.spark.deploy",
"org.apache.spark.util.collection"
).mkString(":"),
"-doc-title", "Spark " + version.value.replaceAll("-SNAPSHOT", "") + " ScalaDoc"
)
)
}
| ajaysaini725/spark | project/SparkBuild.scala | Scala | apache-2.0 | 32,461 |
package com.raquo.xstream
class MiscSpec extends UnitSpec {
it("collect should work") {
trait BaseFoo { val foo: String = "basefoo" }
trait Foo extends BaseFoo { override val foo: String = "foo" }
trait SubFoo extends Foo { override val foo: String = "subfoo" }
val foo = new Foo {}
val baseFoo = new BaseFoo {}
val producer = Producer[BaseFoo](
onStart = listener => {
listener.next(foo)
listener.next(baseFoo)
},
onStop = () => fail("Producer should not have stopped")
)
var count = 0
val stream = XStream
.create(producer)
.collect { case f: Foo => f }
stream.addListener(Listener(
onNext = value => {
value shouldBe foo
count += 1
},
onError = err => fail(s"onError should have fired: $err"),
onComplete = () => fail(s"onComplete should have fired")
))
count shouldBe 1
}
}
| raquo/XStream.scala | src/test/scala/com/raquo/xstream/MiscSpec.scala | Scala | mit | 923 |
class D(x: Int) {
class DD {
inline def apply() = new DD()
}
val inner = new DD
}
object Test {
new D(2).inner.apply()
}
| som-snytt/dotty | tests/pos/i3130d.scala | Scala | apache-2.0 | 133 |
package lila.db
package api
import Implicits._
import play.api.libs.json._
import reactivemongo.bson._
object $primitive {
import play.modules.reactivemongo.json._
def apply[A: InColl, B](
query: JsObject,
field: String,
modifier: QueryBuilder => QueryBuilder = identity,
max: Option[Int] = None)(extract: JsValue => Option[B]): Fu[List[B]] =
modifier {
implicitly[InColl[A]].coll
.genericQueryBuilder
.query(query)
.projection(Json.obj(field -> true))
} toList[BSONDocument] max map2 { (obj: BSONDocument) =>
extract(JsObjectReader.read(obj) \\ field)
} map (_.flatten)
def one[A: InColl, B](
query: JsObject,
field: String,
modifier: QueryBuilder => QueryBuilder = identity)(extract: JsValue => Option[B]): Fu[Option[B]] =
modifier {
implicitly[InColl[A]].coll
.genericQueryBuilder
.query(query)
.projection(Json.obj(field -> true))
}.one[BSONDocument] map2 { (obj: BSONDocument) =>
extract(JsObjectReader.read(obj) \\ field)
} map (_.flatten)
}
| Enigmahack/lila | modules/db/src/main/api/primitive.scala | Scala | mit | 1,080 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification.multilabel.stronglearners
import org.apache.spark.annotation.Experimental
import org.apache.spark.mllib.classification.multilabel.baselearners.{ BaseLearnerAlgorithm, BaseLearnerModel, DecisionStumpAlgorithm, DecisionStumpModel }
import org.apache.spark.mllib.classification.multilabel.{ GeneralizedAdditiveModel, MultiLabelClassificationModel }
import org.apache.spark.mllib.linalg.{ Vector, Vectors }
import org.apache.spark.mllib.util.{ MultiLabeledPoint, WeightedMultiLabeledPoint }
import org.apache.spark.rdd.RDD
import scala.language.higherKinds
/**
*
* @param numClasses
* @param numFeatureDimensions
* @param baseLearnersList
* @tparam BM
*/
@Experimental
class AdaBoostMHModel[BM <: MultiLabelClassificationModel](
numClasses: Int,
numFeatureDimensions: Int,
baseLearnersList: List[BM])
extends StrongLearnerModel with GeneralizedAdditiveModel[BM] {
var debugString: String = ""
def this() = this(0, 0, List())
override def models = baseLearnersList
override def predict(testData: RDD[Vector]): RDD[Vector] = {
testData map predict
}
override def predict(testData: Vector): Vector = {
val rawPredicts: Vector = models.foldLeft(
Vectors.dense(Array.fill[Double](numClasses)(0.0))) { (sum, item) =>
val predicts = item predict testData
Vectors.fromBreeze(sum.toBreeze + predicts.toBreeze)
}
val predictArray: Array[Double] = rawPredicts.toArray.map {
case p: Double => if (p >= 0.0) 1.0 else -1.0
}
Vectors.dense(predictArray)
}
override def toString = models mkString ";\\n"
}
@Experimental
object AdaBoostMHModel {
def apply[BM <: MultiLabelClassificationModel](numClasses: Int,
numFeatureDimensions: Int,
baseLearnersList: List[BM]) = {
new AdaBoostMHModel[BM](numClasses, numFeatureDimensions, baseLearnersList)
}
}
@Experimental
class AdaBoostMHAlgorithm[BM <: BaseLearnerModel, BA <: BaseLearnerAlgorithm[BM]](
baseLearnerAlgo: BA,
_numClasses: Int,
_numFeatureDimensions: Int,
numIterations: Int) extends StrongLearnerAlgorithm[BM, BA, AdaBoostMHModel[BM]] {
override def numClasses = _numClasses
override def numFeatureDimensions = _numFeatureDimensions
def run(dataSet: RDD[MultiLabeledPoint]): AdaBoostMHModel[BM] = {
val weightedDataSet = AdaBoostMHAlgorithm.initWeights(numClasses, dataSet)
/**
* The encapsulation of the iteration data which consists of:
* @param model the resulted model, a strong learner, of previous iterations.
* @param dataSet the re-weighted multilabeled data points.
*/
case class IterationData(model: AdaBoostMHModel[BM], dataSet: RDD[WeightedMultiLabeledPoint])
val finalIterationData = (1 to numIterations).foldLeft(IterationData(
AdaBoostMHModel.apply[BM](numClasses, numFeatureDimensions, List()),
weightedDataSet)) { (iterData: IterationData, iter: Int) =>
logInfo(s"Start $iter-th iteration.")
// 1. train a new base learner
val baseLearner = baseLearnerAlgo.run(iterData.dataSet)
// 1.1 update strong learner
val updatedStrongLearner = AdaBoostMHModel.apply[BM](
numClasses, numFeatureDimensions, iterData.model.models :+ baseLearner)
// 2. get the weak hypothesis
val predictsAndPoints = iterData.dataSet map { wmlPoint =>
(baseLearner.predict(wmlPoint.data.features), wmlPoint)
}
// 3. sum up the normalize factor
val summedZ = predictsAndPoints.aggregate(0.0)({
// seqOp
case (sum: Double, (predict: Vector, wmlp: WeightedMultiLabeledPoint)) =>
(predict.toArray zip wmlp.data.labels.toArray zip wmlp.weights.toArray)
.map {
case ((p, l), w) =>
w * math.exp(-p * l)
}.sum + sum
}, { _ + _ })
logInfo(s"Weights normalization factor (Z) value: $summedZ")
updatedStrongLearner.debugString = iterData.model.debugString + s"\\nZ=$summedZ"
// XXX: should be using multi-label metrics in mllib.
// 3.1 hamming loss
val strongPredictsAndLabels = iterData.dataSet.map { wmlp =>
(updatedStrongLearner.predict(wmlp.data.features), wmlp.data.labels)
}
val hammingLoss = strongPredictsAndLabels.flatMap {
case (predict, label) =>
predict.toArray zip label.toArray
}.filter {
case (p, l) =>
p * l < 0.0
}.count.toDouble / (predictsAndPoints.count * numClasses).toDouble
logInfo(s"Iter $iter. Hamming loss: $hammingLoss")
updatedStrongLearner.debugString = iterData.model.debugString + s"\\nIter $iter. Hamming loss: $hammingLoss"
// 4. re-weight the data set
val reweightedDataSet = predictsAndPoints map {
case (predict: Vector, wmlp: WeightedMultiLabeledPoint) =>
val updatedWeights = for (i <- 0 until numClasses)
yield wmlp.weights(i) * math.exp(-predict(i) * wmlp.data.labels(i)) / summedZ
WeightedMultiLabeledPoint(
Vectors.dense(updatedWeights.toArray), wmlp.data)
}
// 5. next recursion
IterationData(updatedStrongLearner, reweightedDataSet)
}
finalIterationData.model
}
}
object AdaBoostMHAlgorithm {
/**
* Calculate the initial weight of the dataset.
*
* @param numClasses Num of class labels.
* @param dataSet The dataset RDD;
* @return A RDD of WeightedMultiLabeledPoint with initial weights in it.
*/
def initWeights(
numClasses: Int,
dataSet: RDD[MultiLabeledPoint]): RDD[WeightedMultiLabeledPoint] = {
val w = 1.0 / (dataSet.count().toDouble * numClasses)
val initialWeight = Vectors.dense(
Array.fill[Double](numClasses)(w))
dataSet map {
case mlPoint: MultiLabeledPoint =>
WeightedMultiLabeledPoint(initialWeight, mlPoint)
}
}
}
| BaiGang/spark_multiboost | src/main/scala/org/apache/spark/mllib/classification/multilabel/stronglearners/AdaBoostMH.scala | Scala | apache-2.0 | 6,667 |
package org.draegisoft.ddlCodeGenerator
sealed abstract class TableType(val name:String)
case class SimpleTableType(override val name: String, columns: Seq[ColumnType]) extends TableType(name)
| mdraeger/ddlCodeGenerator | src/main/scala/org/draegisoft/ddlCodeGenerator/TableType.scala | Scala | gpl-3.0 | 195 |
/*
* Copyright (c) 2015 François Cabrol.
*
* This file is part of MelVi.
*
* MelVi is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* MelVi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with MelVi. If not, see <http://www.gnu.org/licenses/>.
*/
package com.cabrol.francois.melvi.utils
import java.io.File
import javax.sound.midi.ShortMessage._
import javax.sound.midi.{MetaMessage, MidiEvent, MidiSystem, Sequence, ShortMessage, SysexMessage}
import com.cabrol.francois.libjamu.midi.utils.TickUtils
import com.cabrol.francois.libjamu.musictheory.entity.note.Note
/**
* @author Francois Cabrol <[email protected]>
* @since 15-01-29
*/
object MidiUtils {
private def createSequence(notes: List[Note]):Sequence = {
val seq = new Sequence(Sequence.PPQ, 4);
val track = seq.createTrack();
// General MIDI sysex -- turn on General MIDI sound set
val b = Array[Byte](0xF0.asInstanceOf[Byte], 0x7E, 0x7F, 0x09, 0x01, 0xF7.asInstanceOf[Byte]);
var sm = new SysexMessage();
sm.setMessage(b, 6);
var me = new MidiEvent(sm, 0);
track.add(me);
// set track name (meta event)
var mt = new MetaMessage();
val trackName = new String("mural track");
mt.setMessage(0x03, trackName.getBytes(), trackName.length());
me = new MidiEvent(mt, 0);
track.add(me);
def makeEvent(comd: Int, chan: Int, one: Int, two: Int, tick: Int): MidiEvent = {
val a = new ShortMessage();
a.setMessage(comd, chan, one, two);
return new MidiEvent(a, tick);
}
// set omni on
track.add(makeEvent(CONTROL_CHANGE, 0, 0x7D, 0x00, 0));
// set poly on
track.add(makeEvent(CONTROL_CHANGE, 0, 0x7F, 0x00, 0));
// set instrument to Piano
track.add(makeEvent(PROGRAM_CHANGE, 0x00, 0x00, 0x00, 0));
val maxTicks = (for (n <- notes) yield {
println(n)
val start = TickUtils.convertTockToTick(n.getRhythmicNote.getStart).toInt
val end = start + TickUtils.convertTockToTick(n.getRhythmicNote.getDuration).toInt
track.add(makeEvent(NOTE_ON, 0, n.getKey.getMidiKey, n.getRhythmicNote.getVelocity, start))
track.add(makeEvent(NOTE_OFF, 0, n.getKey.getMidiKey, n.getRhythmicNote.getVelocity, end))
end
}).max
// set end of track (meta event)
mt = new MetaMessage();
mt.setMessage(0x2F, Array[Byte](), 0);
me = new javax.sound.midi.MidiEvent(mt, maxTicks);
track.add(me);
seq
}
def play(notes: List[Note]) = {
val seq = createSequence(notes)
val sequencer = MidiSystem.getSequencer();
sequencer.open();
sequencer.setSequence(seq);
sequencer.setTempoInBPM(80);
sequencer.start();
}
def writeMifiFile(notes: List[Note], file: File) = {
val seq = createSequence(notes)
// write the MIDI sequence to a MIDI file
MidiSystem.write(seq, 1, file);
println("[Melvi] Wrote in midi file : " + file.getAbsolutePath());
}
}
| francoiscabrol/MelVi | src/main/scala/com/cabrol/francois/melvi/utils/MidiUtils.scala | Scala | gpl-3.0 | 3,411 |
/*
* Copyright (C) 2007 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.jiva.evolution
import scala.util.Random
import net.kogics.jiva.Predef._
import net.kogics.jiva.population.{Population, Chromosome}
import net.kogics.jiva.util.collection.JList
class RouletteWheelSelector[A](num: Int, rgen: Random) extends ParentSelector[A] {
checkArg(num > 0, "Invalid Selection Size: %d. Selection Size should be > 0", num)
def select(pop: Population[A]) : Population[A] = {
val probs = new JList[Double]
val cumProbs = new JList[Double]
var aggrFitness = pop.members.foldLeft(0.0){(aggr, chr) => aggr + chr.fitness.get}
pop.foreach { chr =>
probs += chr.fitness.get / aggrFitness
}
cumProbs += probs(0)
for (idx <- Iterator.range(1, pop.size)) {
cumProbs += (probs(idx) + cumProbs(idx-1))
}
val newChrs = new JList[Chromosome[A]]
for (idx <- Iterator.range(0, num)) {
val selected = spinWheel(cumProbs)
newChrs += pop(selected).clone2
}
new Population(newChrs)
}
def spinWheel(cumProbs: Seq[Double]): Int = {
val spinResult = rgen.nextDouble
Iterator.range(0, cumProbs.size).find(idx => spinResult < cumProbs(idx)).get
}
}
| milliondreams/jiva-ng | src/main/scala/net/kogics/jiva/evolution/RouletteWheelSelector.scala | Scala | gpl-3.0 | 1,747 |
package com.arcusys.valamis.web.portlet
import javax.portlet.{RenderRequest, RenderResponse}
import com.arcusys.learn.liferay.services.CompanyHelper
import com.arcusys.valamis.lrs.serializer.AgentSerializer
import com.arcusys.valamis.lrssupport.oauth.OAuthPortlet
import com.arcusys.valamis.utils.TincanHelper._
import com.arcusys.valamis.util.serialization.JsonHelper
import com.arcusys.valamis.web.portlet.base._
class GradebookView extends OAuthPortlet with PortletBase {
override def doView(request: RenderRequest, response: RenderResponse) {
implicit val out = response.getWriter
val securityScope = getSecurityData(request)
sendTextFile("/templates/gradebook_templates.html")
sendTextFile("/templates/common_templates.html")
val user = LiferayHelpers.getUser(request)
val tincanActor = JsonHelper.toJson(user.getAgentByUuid, new AgentSerializer)
val endpoint = JsonHelper.toJson(getLrsEndpointInfo(request))
val permission = new PermissionUtil(request, this)
val viewAllPermission = permission.hasPermission(ViewAllPermission.name)
val data = Map(
"tincanActor" -> tincanActor,
"endpointData" -> endpoint,
"viewAllPermission" -> viewAllPermission,
"assignmentDeployed" -> false
) ++ securityScope.data
sendMustacheFile(data, "gradebook.html")
}
} | arcusys/Valamis | valamis-portlets/src/main/scala/com/arcusys/valamis/web/portlet/GradebookView.scala | Scala | gpl-3.0 | 1,339 |
package plainer.external
import akka.actor.{Actor, ActorSystem}
import akka.actor.Props
import akka.event.Logging
class MyActor extends Actor {
val log = Logging(context.system, this)
def receive = {
case "test" => log.info("received test")
case _ => log.info("received unknown message")
}
}
class SimpleCounter extends Actor {
var count = 0
def receive = {
case "add" => count += 1
case "echo" => println(count)
case _ => // do nothing
}
}
object TryActors extends App {
def doStuff = {
val system = ActorSystem("system")
val counter = system.actorOf(Props(new SimpleCounter), "counter")
counter ! "add"
counter ! "add"
counter ! "add"
counter ! "echo" // 3
}
doStuff
}
| zeroed/plainer.scala | src/plainer/external/SimpleCounter.scala | Scala | gpl-3.0 | 747 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming.continuous
import org.json4s.DefaultFormats
import org.json4s.jackson.Serialization
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.streaming.{RateStreamOffset, ValueRunTimeMsPair}
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.reader.streaming._
case class RateStreamPartitionOffset(
partition: Int, currentValue: Long, currentTimeMs: Long) extends PartitionOffset
class RateStreamContinuousStream(rowsPerSecond: Long, numPartitions: Int) extends ContinuousStream {
implicit val defaultFormats: DefaultFormats = DefaultFormats
val creationTime = System.currentTimeMillis()
val perPartitionRate = rowsPerSecond.toDouble / numPartitions.toDouble
override def mergeOffsets(offsets: Array[PartitionOffset]): Offset = {
assert(offsets.length == numPartitions)
val tuples = offsets.map {
case RateStreamPartitionOffset(i, currVal, nextRead) =>
(i, ValueRunTimeMsPair(currVal, nextRead))
}
RateStreamOffset(Map(tuples: _*))
}
override def deserializeOffset(json: String): Offset = {
RateStreamOffset(Serialization.read[Map[Int, ValueRunTimeMsPair]](json))
}
override def initialOffset: Offset = createInitialOffset(numPartitions, creationTime)
override def planInputPartitions(start: Offset): Array[InputPartition] = {
val partitionStartMap = start match {
case off: RateStreamOffset => off.partitionToValueAndRunTimeMs
case off =>
throw new IllegalArgumentException(
s"invalid offset type ${off.getClass()} for ContinuousRateSource")
}
if (partitionStartMap.keySet.size != numPartitions) {
throw new IllegalArgumentException(
s"The previous run contained ${partitionStartMap.keySet.size} partitions, but" +
s" $numPartitions partitions are currently configured. The numPartitions option" +
" cannot be changed.")
}
Range(0, numPartitions).map { i =>
val start = partitionStartMap(i)
// Have each partition advance by numPartitions each row, with starting points staggered
// by their partition index.
RateStreamContinuousInputPartition(
start.value,
start.runTimeMs,
i,
numPartitions,
perPartitionRate)
}.toArray
}
override def createContinuousReaderFactory(): ContinuousPartitionReaderFactory = {
RateStreamContinuousReaderFactory
}
override def commit(end: Offset): Unit = {}
override def stop(): Unit = {}
private def createInitialOffset(numPartitions: Int, creationTimeMs: Long) = {
RateStreamOffset(
Range(0, numPartitions).map { i =>
// Note that the starting offset is exclusive, so we have to decrement the starting value
// by the increment that will later be applied. The first row output in each
// partition will have a value equal to the partition index.
(i,
ValueRunTimeMsPair(
(i - numPartitions).toLong,
creationTimeMs))
}.toMap)
}
}
case class RateStreamContinuousInputPartition(
startValue: Long,
startTimeMs: Long,
partitionIndex: Int,
increment: Long,
rowsPerSecond: Double)
extends InputPartition
object RateStreamContinuousReaderFactory extends ContinuousPartitionReaderFactory {
override def createReader(partition: InputPartition): ContinuousPartitionReader[InternalRow] = {
val p = partition.asInstanceOf[RateStreamContinuousInputPartition]
new RateStreamContinuousPartitionReader(
p.startValue, p.startTimeMs, p.partitionIndex, p.increment, p.rowsPerSecond)
}
}
class RateStreamContinuousPartitionReader(
startValue: Long,
startTimeMs: Long,
partitionIndex: Int,
increment: Long,
rowsPerSecond: Double)
extends ContinuousPartitionReader[InternalRow] {
private var nextReadTime: Long = startTimeMs
private val readTimeIncrement: Long = (1000 / rowsPerSecond).toLong
private var currentValue = startValue
private var currentRow: InternalRow = null
override def next(): Boolean = {
currentValue += increment
nextReadTime += readTimeIncrement
try {
while (System.currentTimeMillis < nextReadTime) {
Thread.sleep(nextReadTime - System.currentTimeMillis)
}
} catch {
case _: InterruptedException =>
// Someone's trying to end the task; just let them.
return false
}
currentRow = InternalRow(
DateTimeUtils.fromMillis(nextReadTime),
currentValue)
true
}
override def get: InternalRow = currentRow
override def close(): Unit = {}
override def getOffset(): PartitionOffset =
RateStreamPartitionOffset(partitionIndex, currentValue, nextReadTime)
}
| aosagie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/continuous/ContinuousRateStreamSource.scala | Scala | apache-2.0 | 5,640 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import java.lang.Thread.UncaughtExceptionHandler
import java.net.URL
import java.nio.ByteBuffer
import java.util.Properties
import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.immutable
import scala.collection.mutable.{ArrayBuffer, Map}
import scala.concurrent.duration._
import com.google.common.cache.{CacheBuilder, CacheLoader}
import org.mockito.ArgumentCaptor
import org.mockito.ArgumentMatchers.{any, eq => meq}
import org.mockito.Mockito.{inOrder, verify, when}
import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer
import org.scalatest.Assertions._
import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually
import org.scalatestplus.mockito.MockitoSugar
import org.apache.spark._
import org.apache.spark.TaskState.TaskState
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.UI._
import org.apache.spark.memory.{SparkOutOfMemoryError, TestMemoryManager}
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.rdd.RDD
import org.apache.spark.resource.ResourceInformation
import org.apache.spark.rpc.{RpcEndpointRef, RpcEnv, RpcTimeout}
import org.apache.spark.scheduler.{DirectTaskResult, FakeTask, ResultTask, Task, TaskDescription}
import org.apache.spark.serializer.{JavaSerializer, SerializerInstance, SerializerManager}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{BlockManager, BlockManagerId}
import org.apache.spark.util.{LongAccumulator, SparkUncaughtExceptionHandler, ThreadUtils, UninterruptibleThread}
class ExecutorSuite extends SparkFunSuite
with LocalSparkContext with MockitoSugar with Eventually with PrivateMethodTester {
override def afterEach(): Unit = {
// Unset any latches after each test; each test that needs them initializes new ones.
ExecutorSuiteHelper.latches = null
super.afterEach()
}
/**
* Creates an Executor with the provided arguments, is then passed to `f`
* and will be stopped after `f` returns.
*/
def withExecutor(
executorId: String,
executorHostname: String,
env: SparkEnv,
userClassPath: Seq[URL] = Nil,
isLocal: Boolean = true,
uncaughtExceptionHandler: UncaughtExceptionHandler
= new SparkUncaughtExceptionHandler,
resources: immutable.Map[String, ResourceInformation]
= immutable.Map.empty[String, ResourceInformation])(f: Executor => Unit): Unit = {
var executor: Executor = null
try {
executor = new Executor(executorId, executorHostname, env, userClassPath, isLocal,
uncaughtExceptionHandler, resources)
f(executor)
} finally {
if (executor != null) {
executor.stop()
}
}
}
test("SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner") {
// mock some objects to make Executor.launchTask() happy
val conf = new SparkConf
val serializer = new JavaSerializer(conf)
val env = createMockEnv(conf, serializer)
val serializedTask = serializer.newInstance().serialize(new FakeTask(0, 0))
val taskDescription = createFakeTaskDescription(serializedTask)
// we use latches to force the program to run in this order:
// +-----------------------------+---------------------------------------+
// | main test thread | worker thread |
// +-----------------------------+---------------------------------------+
// | executor.launchTask() | |
// | | TaskRunner.run() begins |
// | | ... |
// | | execBackend.statusUpdate // 1st time |
// | executor.killAllTasks(true) | |
// | | ... |
// | | task = ser.deserialize |
// | | ... |
// | | execBackend.statusUpdate // 2nd time |
// | | ... |
// | | TaskRunner.run() ends |
// | check results | |
// +-----------------------------+---------------------------------------+
val executorSuiteHelper = new ExecutorSuiteHelper
val mockExecutorBackend = mock[ExecutorBackend]
when(mockExecutorBackend.statusUpdate(any(), any(), any()))
.thenAnswer(new Answer[Unit] {
var firstTime = true
override def answer(invocationOnMock: InvocationOnMock): Unit = {
if (firstTime) {
executorSuiteHelper.latch1.countDown()
// here between latch1 and latch2, executor.killAllTasks() is called
executorSuiteHelper.latch2.await()
firstTime = false
}
else {
// save the returned `taskState` and `testFailedReason` into `executorSuiteHelper`
val taskState = invocationOnMock.getArguments()(1).asInstanceOf[TaskState]
executorSuiteHelper.taskState = taskState
val taskEndReason = invocationOnMock.getArguments()(2).asInstanceOf[ByteBuffer]
executorSuiteHelper.testFailedReason =
serializer.newInstance().deserialize(taskEndReason)
// let the main test thread check `taskState` and `testFailedReason`
executorSuiteHelper.latch3.countDown()
}
}
})
withExecutor("id", "localhost", env) { executor =>
// the task will be launched in a dedicated worker thread
executor.launchTask(mockExecutorBackend, taskDescription)
if (!executorSuiteHelper.latch1.await(5, TimeUnit.SECONDS)) {
fail("executor did not send first status update in time")
}
// we know the task will be started, but not yet deserialized, because of the latches we
// use in mockExecutorBackend.
executor.killAllTasks(true, "test")
executorSuiteHelper.latch2.countDown()
if (!executorSuiteHelper.latch3.await(5, TimeUnit.SECONDS)) {
fail("executor did not send second status update in time")
}
// `testFailedReason` should be `TaskKilled`; `taskState` should be `KILLED`
assert(executorSuiteHelper.testFailedReason.isInstanceOf[TaskKilled])
assert(executorSuiteHelper.testFailedReason.toErrorString === "TaskKilled (test)")
assert(executorSuiteHelper.taskState === TaskState.KILLED)
}
}
test("SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions") {
val conf = new SparkConf().setMaster("local").setAppName("executor suite test")
sc = new SparkContext(conf)
val serializer = SparkEnv.get.closureSerializer.newInstance()
val resultFunc = (context: TaskContext, itr: Iterator[Int]) => itr.size
// Submit a job where a fetch failure is thrown, but user code has a try/catch which hides
// the fetch failure. The executor should still tell the driver that the task failed due to a
// fetch failure, not a generic exception from user code.
val inputRDD = new FetchFailureThrowingRDD(sc)
val secondRDD = new FetchFailureHidingRDD(sc, inputRDD, throwOOM = false, interrupt = false)
val taskBinary = sc.broadcast(serializer.serialize((secondRDD, resultFunc)).array())
val taskDescription = createResultTaskDescription(serializer, taskBinary, secondRDD, 1)
val failReason = runTaskAndGetFailReason(taskDescription)
assert(failReason.isInstanceOf[FetchFailed])
}
test("Executor's worker threads should be UninterruptibleThread") {
val conf = new SparkConf()
.setMaster("local")
.setAppName("executor thread test")
.set(UI_ENABLED.key, "false")
sc = new SparkContext(conf)
val executorThread = sc.parallelize(Seq(1), 1).map { _ =>
Thread.currentThread.getClass.getName
}.collect().head
assert(executorThread === classOf[UninterruptibleThread].getName)
}
test("SPARK-19276: OOMs correctly handled with a FetchFailure") {
val (failReason, uncaughtExceptionHandler) = testFetchFailureHandling(true)
assert(failReason.isInstanceOf[ExceptionFailure])
val exceptionCaptor = ArgumentCaptor.forClass(classOf[Throwable])
verify(uncaughtExceptionHandler).uncaughtException(any(), exceptionCaptor.capture())
assert(exceptionCaptor.getAllValues.size === 1)
assert(exceptionCaptor.getAllValues().get(0).isInstanceOf[OutOfMemoryError])
}
test("SPARK-23816: interrupts are not masked by a FetchFailure") {
// If killing the task causes a fetch failure, we still treat it as a task that was killed,
// as the fetch failure could easily be caused by interrupting the thread.
val (failReason, _) = testFetchFailureHandling(false)
assert(failReason.isInstanceOf[TaskKilled])
}
/**
* Helper for testing some cases where a FetchFailure should *not* get sent back, because it's
* superseded by another error, either an OOM or intentionally killing a task.
* @param oom if true, throw an OOM after the FetchFailure; else, interrupt the task after the
* FetchFailure
* @param poll if true, poll executor metrics after launching task
*/
private def testFetchFailureHandling(
oom: Boolean,
poll: Boolean = false): (TaskFailedReason, UncaughtExceptionHandler) = {
// when there is a fatal error like an OOM, we don't do normal fetch failure handling, since it
// may be a false positive. And we should call the uncaught exception handler.
// SPARK-23816 also handle interrupts the same way, as killing an obsolete speculative task
// does not represent a real fetch failure.
val conf = new SparkConf().setMaster("local").setAppName("executor suite test")
sc = new SparkContext(conf)
val serializer = SparkEnv.get.closureSerializer.newInstance()
val resultFunc = (context: TaskContext, itr: Iterator[Int]) => itr.size
// Submit a job where a fetch failure is thrown, but then there is an OOM or interrupt. We
// should treat the fetch failure as a false positive, and do normal OOM or interrupt handling.
val inputRDD = new FetchFailureThrowingRDD(sc)
// helper to coordinate between the task thread and this thread that will kill the task
// (and to poll executor metrics if necessary)
ExecutorSuiteHelper.latches = new ExecutorSuiteHelper
val secondRDD = new FetchFailureHidingRDD(sc, inputRDD, throwOOM = oom, interrupt = !oom)
val taskBinary = sc.broadcast(serializer.serialize((secondRDD, resultFunc)).array())
val taskDescription = createResultTaskDescription(serializer, taskBinary, secondRDD, 1)
runTaskGetFailReasonAndExceptionHandler(taskDescription, killTask = !oom, poll)
}
test("Gracefully handle error in task deserialization") {
val conf = new SparkConf
val serializer = new JavaSerializer(conf)
val env = createMockEnv(conf, serializer)
val serializedTask = serializer.newInstance().serialize(new NonDeserializableTask)
val taskDescription = createFakeTaskDescription(serializedTask)
val failReason = runTaskAndGetFailReason(taskDescription)
failReason match {
case ef: ExceptionFailure =>
assert(ef.exception.isDefined)
assert(ef.exception.get.getMessage() === NonDeserializableTask.errorMsg)
case _ =>
fail(s"unexpected failure type: $failReason")
}
}
test("Heartbeat should drop zero accumulator updates") {
heartbeatZeroAccumulatorUpdateTest(true)
}
test("Heartbeat should not drop zero accumulator updates when the conf is disabled") {
heartbeatZeroAccumulatorUpdateTest(false)
}
private def withMockHeartbeatReceiverRef(executor: Executor)
(func: RpcEndpointRef => Unit): Unit = {
val executorClass = classOf[Executor]
val mockReceiverRef = mock[RpcEndpointRef]
val receiverRef = executorClass.getDeclaredField("heartbeatReceiverRef")
receiverRef.setAccessible(true)
receiverRef.set(executor, mockReceiverRef)
func(mockReceiverRef)
}
private def withHeartbeatExecutor(confs: (String, String)*)
(f: (Executor, ArrayBuffer[Heartbeat]) => Unit): Unit = {
val conf = new SparkConf
confs.foreach { case (k, v) => conf.set(k, v) }
val serializer = new JavaSerializer(conf)
val env = createMockEnv(conf, serializer)
withExecutor("id", "localhost", SparkEnv.get) { executor =>
withMockHeartbeatReceiverRef(executor) { mockReceiverRef =>
// Save all heartbeats sent into an ArrayBuffer for verification
val heartbeats = ArrayBuffer[Heartbeat]()
when(mockReceiverRef.askSync(any[Heartbeat], any[RpcTimeout])(any))
.thenAnswer((invocation: InvocationOnMock) => {
val args = invocation.getArguments()
heartbeats += args(0).asInstanceOf[Heartbeat]
HeartbeatResponse(false)
})
f(executor, heartbeats)
}
}
}
private def heartbeatZeroAccumulatorUpdateTest(dropZeroMetrics: Boolean): Unit = {
val c = EXECUTOR_HEARTBEAT_DROP_ZERO_ACCUMULATOR_UPDATES.key -> dropZeroMetrics.toString
withHeartbeatExecutor(c) { (executor, heartbeats) =>
val reportHeartbeat = PrivateMethod[Unit](Symbol("reportHeartBeat"))
// When no tasks are running, there should be no accumulators sent in heartbeat
executor.invokePrivate(reportHeartbeat())
// invokeReportHeartbeat(executor)
assert(heartbeats.length == 1)
assert(heartbeats(0).accumUpdates.length == 0,
"No updates should be sent when no tasks are running")
// When we start a task with a nonzero accumulator, that should end up in the heartbeat
val metrics = new TaskMetrics()
val nonZeroAccumulator = new LongAccumulator()
nonZeroAccumulator.add(1)
metrics.registerAccumulator(nonZeroAccumulator)
val executorClass = classOf[Executor]
val tasksMap = {
val field =
executorClass.getDeclaredField("org$apache$spark$executor$Executor$$runningTasks")
field.setAccessible(true)
field.get(executor).asInstanceOf[ConcurrentHashMap[Long, executor.TaskRunner]]
}
val mockTaskRunner = mock[executor.TaskRunner]
val mockTask = mock[Task[Any]]
when(mockTask.metrics).thenReturn(metrics)
when(mockTaskRunner.taskId).thenReturn(6)
when(mockTaskRunner.task).thenReturn(mockTask)
when(mockTaskRunner.startGCTime).thenReturn(1)
tasksMap.put(6, mockTaskRunner)
executor.invokePrivate(reportHeartbeat())
assert(heartbeats.length == 2)
val updates = heartbeats(1).accumUpdates
assert(updates.length == 1 && updates(0)._1 == 6,
"Heartbeat should only send update for the one task running")
val accumsSent = updates(0)._2.length
assert(accumsSent > 0, "The nonzero accumulator we added should be sent")
if (dropZeroMetrics) {
assert(accumsSent == metrics.accumulators().count(!_.isZero),
"The number of accumulators sent should match the number of nonzero accumulators")
} else {
assert(accumsSent == metrics.accumulators().length,
"The number of accumulators sent should match the number of total accumulators")
}
}
}
test("Send task executor metrics in DirectTaskResult") {
// Run a successful, trivial result task
// We need to ensure, however, that executor metrics are polled after the task is started
// so this requires some coordination using ExecutorSuiteHelper.
val conf = new SparkConf().setMaster("local").setAppName("executor suite test")
sc = new SparkContext(conf)
val serializer = SparkEnv.get.closureSerializer.newInstance()
ExecutorSuiteHelper.latches = new ExecutorSuiteHelper
val resultFunc =
(context: TaskContext, itr: Iterator[Int]) => {
// latch1 tells the test that the task is running, so it can ask the metricsPoller
// to poll; latch2 waits for the polling to be done
ExecutorSuiteHelper.latches.latch1.countDown()
ExecutorSuiteHelper.latches.latch2.await(5, TimeUnit.SECONDS)
itr.size
}
val rdd = new RDD[Int](sc, Nil) {
override def compute(split: Partition, context: TaskContext): Iterator[Int] = {
Iterator(1)
}
override protected def getPartitions: Array[Partition] = {
Array(new SimplePartition)
}
}
val taskBinary = sc.broadcast(serializer.serialize((rdd, resultFunc)).array())
val taskDescription = createResultTaskDescription(serializer, taskBinary, rdd, 0)
val mockBackend = mock[ExecutorBackend]
withExecutor("id", "localhost", SparkEnv.get) { executor =>
executor.launchTask(mockBackend, taskDescription)
// Ensure that the executor's metricsPoller is polled so that values are recorded for
// the task metrics
ExecutorSuiteHelper.latches.latch1.await(5, TimeUnit.SECONDS)
executor.metricsPoller.poll()
ExecutorSuiteHelper.latches.latch2.countDown()
eventually(timeout(5.seconds), interval(10.milliseconds)) {
assert(executor.numRunningTasks === 0)
}
}
// Verify that peak values for task metrics get sent in the TaskResult
val orderedMock = inOrder(mockBackend)
val statusCaptor = ArgumentCaptor.forClass(classOf[ByteBuffer])
orderedMock.verify(mockBackend)
.statusUpdate(meq(0L), meq(TaskState.RUNNING), statusCaptor.capture())
orderedMock.verify(mockBackend)
.statusUpdate(meq(0L), meq(TaskState.FINISHED), statusCaptor.capture())
val resultData = statusCaptor.getAllValues.get(1)
val result = serializer.deserialize[DirectTaskResult[Int]](resultData)
val taskMetrics = new ExecutorMetrics(result.metricPeaks)
assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0)
}
test("Send task executor metrics in TaskKilled") {
val (taskFailedReason, _) = testFetchFailureHandling(false, true)
assert(taskFailedReason.isInstanceOf[TaskKilled])
val metrics = taskFailedReason.asInstanceOf[TaskKilled].metricPeaks.toArray
val taskMetrics = new ExecutorMetrics(metrics)
assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0)
}
test("Send task executor metrics in ExceptionFailure") {
val (taskFailedReason, _) = testFetchFailureHandling(true, true)
assert(taskFailedReason.isInstanceOf[ExceptionFailure])
val metrics = taskFailedReason.asInstanceOf[ExceptionFailure].metricPeaks.toArray
val taskMetrics = new ExecutorMetrics(metrics)
assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0)
}
test("SPARK-34949: do not re-register BlockManager when executor is shutting down") {
val reregisterInvoked = new AtomicBoolean(false)
val mockBlockManager = mock[BlockManager]
when(mockBlockManager.reregister()).thenAnswer { (_: InvocationOnMock) =>
reregisterInvoked.getAndSet(true)
}
val conf = new SparkConf(false).setAppName("test").setMaster("local[2]")
val mockEnv = createMockEnv(conf, new JavaSerializer(conf))
when(mockEnv.blockManager).thenReturn(mockBlockManager)
withExecutor("id", "localhost", mockEnv) { executor =>
withMockHeartbeatReceiverRef(executor) { mockReceiverRef =>
when(mockReceiverRef.askSync(any[Heartbeat], any[RpcTimeout])(any)).thenAnswer {
(_: InvocationOnMock) => HeartbeatResponse(reregisterBlockManager = true)
}
val reportHeartbeat = PrivateMethod[Unit](Symbol("reportHeartBeat"))
executor.invokePrivate(reportHeartbeat())
assert(reregisterInvoked.get(), "BlockManager.reregister should be invoked " +
"on HeartbeatResponse(reregisterBlockManager = true) when executor is not shutting down")
reregisterInvoked.getAndSet(false)
executor.stop()
executor.invokePrivate(reportHeartbeat())
assert(!reregisterInvoked.get(),
"BlockManager.reregister should not be invoked when executor is shutting down")
}
}
}
test("SPARK-33587: isFatalError") {
def errorInThreadPool(e: => Throwable): Throwable = {
intercept[Throwable] {
val taskPool = ThreadUtils.newDaemonFixedThreadPool(1, "test")
try {
val f = taskPool.submit(new java.util.concurrent.Callable[String] {
override def call(): String = throw e
})
f.get()
} finally {
taskPool.shutdown()
}
}
}
def errorInGuavaCache(e: => Throwable): Throwable = {
val cache = CacheBuilder.newBuilder()
.build(new CacheLoader[String, String] {
override def load(key: String): String = throw e
})
intercept[Throwable] {
cache.get("test")
}
}
def testThrowable(
e: => Throwable,
depthToCheck: Int,
isFatal: Boolean): Unit = {
import Executor.isFatalError
// `e`'s depth is 1 so `depthToCheck` needs to be at least 3 to detect fatal errors.
assert(isFatalError(e, depthToCheck) == (depthToCheck >= 1 && isFatal))
// `e`'s depth is 2 so `depthToCheck` needs to be at least 3 to detect fatal errors.
assert(isFatalError(errorInThreadPool(e), depthToCheck) == (depthToCheck >= 2 && isFatal))
assert(isFatalError(errorInGuavaCache(e), depthToCheck) == (depthToCheck >= 2 && isFatal))
assert(isFatalError(
new SparkException("foo", e),
depthToCheck) == (depthToCheck >= 2 && isFatal))
// `e`'s depth is 3 so `depthToCheck` needs to be at least 3 to detect fatal errors.
assert(isFatalError(
errorInThreadPool(errorInGuavaCache(e)),
depthToCheck) == (depthToCheck >= 3 && isFatal))
assert(isFatalError(
errorInGuavaCache(errorInThreadPool(e)),
depthToCheck) == (depthToCheck >= 3 && isFatal))
assert(isFatalError(
new SparkException("foo", new SparkException("foo", e)),
depthToCheck) == (depthToCheck >= 3 && isFatal))
}
for (depthToCheck <- 0 to 5) {
testThrowable(new OutOfMemoryError(), depthToCheck, isFatal = true)
testThrowable(new InterruptedException(), depthToCheck, isFatal = false)
testThrowable(new RuntimeException("test"), depthToCheck, isFatal = false)
testThrowable(new SparkOutOfMemoryError("test"), depthToCheck, isFatal = false)
}
// Verify we can handle the cycle in the exception chain
val e1 = new Exception("test1")
val e2 = new Exception("test2")
e1.initCause(e2)
e2.initCause(e1)
for (depthToCheck <- 0 to 5) {
testThrowable(e1, depthToCheck, isFatal = false)
testThrowable(e2, depthToCheck, isFatal = false)
}
}
private def createMockEnv(conf: SparkConf, serializer: JavaSerializer): SparkEnv = {
val mockEnv = mock[SparkEnv]
val mockRpcEnv = mock[RpcEnv]
val mockMetricsSystem = mock[MetricsSystem]
val mockBlockManager = mock[BlockManager]
when(mockEnv.conf).thenReturn(conf)
when(mockEnv.serializer).thenReturn(serializer)
when(mockEnv.serializerManager).thenReturn(mock[SerializerManager])
when(mockEnv.rpcEnv).thenReturn(mockRpcEnv)
when(mockEnv.metricsSystem).thenReturn(mockMetricsSystem)
when(mockEnv.memoryManager).thenReturn(new TestMemoryManager(conf))
when(mockEnv.closureSerializer).thenReturn(serializer)
when(mockBlockManager.blockManagerId).thenReturn(BlockManagerId("1", "hostA", 1234))
when(mockEnv.blockManager).thenReturn(mockBlockManager)
SparkEnv.set(mockEnv)
mockEnv
}
private def createResultTaskDescription(
serializer: SerializerInstance,
taskBinary: Broadcast[Array[Byte]],
rdd: RDD[Int],
stageId: Int): TaskDescription = {
val serializedTaskMetrics = serializer.serialize(TaskMetrics.registered).array()
val task = new ResultTask(
stageId = stageId,
stageAttemptId = 0,
taskBinary = taskBinary,
partition = rdd.partitions(0),
locs = Seq(),
outputId = 0,
localProperties = new Properties(),
serializedTaskMetrics = serializedTaskMetrics
)
val serTask = serializer.serialize(task)
createFakeTaskDescription(serTask)
}
private def createFakeTaskDescription(serializedTask: ByteBuffer): TaskDescription = {
new TaskDescription(
taskId = 0,
attemptNumber = 0,
executorId = "",
name = "",
index = 0,
partitionId = 0,
addedFiles = Map[String, Long](),
addedJars = Map[String, Long](),
addedArchives = Map[String, Long](),
properties = new Properties,
cpus = 1,
resources = immutable.Map[String, ResourceInformation](),
serializedTask)
}
private def runTaskAndGetFailReason(taskDescription: TaskDescription): TaskFailedReason = {
runTaskGetFailReasonAndExceptionHandler(taskDescription, false)._1
}
private def runTaskGetFailReasonAndExceptionHandler(
taskDescription: TaskDescription,
killTask: Boolean,
poll: Boolean = false): (TaskFailedReason, UncaughtExceptionHandler) = {
val mockBackend = mock[ExecutorBackend]
val mockUncaughtExceptionHandler = mock[UncaughtExceptionHandler]
val timedOut = new AtomicBoolean(false)
withExecutor("id", "localhost", SparkEnv.get,
uncaughtExceptionHandler = mockUncaughtExceptionHandler) { executor =>
// the task will be launched in a dedicated worker thread
executor.launchTask(mockBackend, taskDescription)
if (killTask) {
val killingThread = new Thread("kill-task") {
override def run(): Unit = {
// wait to kill the task until it has thrown a fetch failure
if (ExecutorSuiteHelper.latches.latch1.await(10, TimeUnit.SECONDS)) {
// now we can kill the task
// but before that, ensure that the executor's metricsPoller is polled
if (poll) {
executor.metricsPoller.poll()
}
executor.killAllTasks(true, "Killed task, e.g. because of speculative execution")
} else {
timedOut.set(true)
}
}
}
killingThread.start()
} else {
if (ExecutorSuiteHelper.latches != null) {
ExecutorSuiteHelper.latches.latch1.await(5, TimeUnit.SECONDS)
if (poll) {
executor.metricsPoller.poll()
}
ExecutorSuiteHelper.latches.latch2.countDown()
}
}
eventually(timeout(5.seconds), interval(10.milliseconds)) {
assert(executor.numRunningTasks === 0)
}
assert(!timedOut.get(), "timed out waiting to be ready to kill tasks")
}
val orderedMock = inOrder(mockBackend)
val statusCaptor = ArgumentCaptor.forClass(classOf[ByteBuffer])
orderedMock.verify(mockBackend)
.statusUpdate(meq(0L), meq(TaskState.RUNNING), statusCaptor.capture())
val finalState = if (killTask) TaskState.KILLED else TaskState.FAILED
orderedMock.verify(mockBackend)
.statusUpdate(meq(0L), meq(finalState), statusCaptor.capture())
// first statusUpdate for RUNNING has empty data
assert(statusCaptor.getAllValues().get(0).remaining() === 0)
// second update is more interesting
val failureData = statusCaptor.getAllValues.get(1)
val failReason =
SparkEnv.get.closureSerializer.newInstance().deserialize[TaskFailedReason](failureData)
(failReason, mockUncaughtExceptionHandler)
}
}
class FetchFailureThrowingRDD(sc: SparkContext) extends RDD[Int](sc, Nil) {
override def compute(split: Partition, context: TaskContext): Iterator[Int] = {
new Iterator[Int] {
override def hasNext: Boolean = true
override def next(): Int = {
throw new FetchFailedException(
bmAddress = BlockManagerId("1", "hostA", 1234),
shuffleId = 0,
mapId = 0L,
mapIndex = 0,
reduceId = 0,
message = "fake fetch failure"
)
}
}
}
override protected def getPartitions: Array[Partition] = {
Array(new SimplePartition)
}
}
class SimplePartition extends Partition {
override def index: Int = 0
}
// NOTE: When instantiating this class, except with throwOOM = false and interrupt = false,
// ExecutorSuiteHelper.latches need to be set (not null).
class FetchFailureHidingRDD(
sc: SparkContext,
val input: FetchFailureThrowingRDD,
throwOOM: Boolean,
interrupt: Boolean) extends RDD[Int](input) {
override def compute(split: Partition, context: TaskContext): Iterator[Int] = {
val inItr = input.compute(split, context)
try {
Iterator(inItr.size)
} catch {
case t: Throwable =>
if (throwOOM) {
// Allow executor metrics to be polled (if necessary) before throwing the OOMError
ExecutorSuiteHelper.latches.latch1.countDown()
ExecutorSuiteHelper.latches.latch2.await(5, TimeUnit.SECONDS)
// scalastyle:off throwerror
throw new OutOfMemoryError("OOM while handling another exception")
// scalastyle:on throwerror
} else if (interrupt) {
// make sure our test is setup correctly
assert(TaskContext.get().asInstanceOf[TaskContextImpl].fetchFailed.isDefined)
// signal we are ready for executor metrics to be polled (if necessary) and for
// the task to get killed
ExecutorSuiteHelper.latches.latch1.countDown()
// then wait for another thread in the test to kill the task -- this latch
// is never actually decremented, we just wait to get killed.
ExecutorSuiteHelper.latches.latch2.await(10, TimeUnit.SECONDS)
throw new IllegalStateException("timed out waiting to be interrupted")
} else {
throw new RuntimeException("User Exception that hides the original exception", t)
}
}
}
override protected def getPartitions: Array[Partition] = {
Array(new SimplePartition)
}
}
// Helps to test("SPARK-15963")
private class ExecutorSuiteHelper {
val latch1 = new CountDownLatch(1)
val latch2 = new CountDownLatch(1)
val latch3 = new CountDownLatch(1)
@volatile var taskState: TaskState = _
@volatile var testFailedReason: TaskFailedReason = _
}
// Helper for coordinating killing tasks as well as polling executor metrics
private object ExecutorSuiteHelper {
var latches: ExecutorSuiteHelper = null
}
private class NonDeserializableTask extends FakeTask(0, 0) with Externalizable {
def writeExternal(out: ObjectOutput): Unit = {}
def readExternal(in: ObjectInput): Unit = {
throw new RuntimeException(NonDeserializableTask.errorMsg)
}
}
private object NonDeserializableTask {
val errorMsg = "failure in deserialization"
}
| ueshin/apache-spark | core/src/test/scala/org/apache/spark/executor/ExecutorSuite.scala | Scala | apache-2.0 | 31,902 |
package scala.slick.examples.direct
//#imports
import scala.slick.driver.H2Driver
import H2Driver.simple.Database
import Database.{threadLocalSession => session}
import scala.slick.direct._
import scala.slick.direct.AnnotationMapper._
//#imports
//#schema
// describe schema for direct embedding
@table(name="COFFEES")
case class Coffee(
@column(name="NAME")
name : String,
@column(name="PRICE")
price : Double
)
//#schema
object DirectEmbedding extends App {
//#result
val db = Database.forURL("jdbc:h2:mem:test1", driver = "org.h2.Driver")
db withSession {
//#result
//#inserts
// fill database with test data (using plain SQL)
val coffees_data = Vector(
("Colombian", 1.0),
("French_Roast", 2.0),
("Espresso", 3.0),
("Colombian_Decaf", 4.0),
("French_Roast_Decaf", 5.0)
)
// create test table
import scala.slick.jdbc.StaticQuery.interpolation
sqlu"create table COFFEES(NAME varchar(255), PRICE double)".execute
(for {
(name, sales) <- coffees_data
} yield sqlu"insert into COFFEES values ($name, $sales)".first).sum
//#inserts
//#query
// query database using direct embedding
val q1 = Queryable[Coffee]
val q2 = q1.filter( _.price > 3.0 ).map( _ .name )
//#query
//#result
// execute query using a chosen db backend
val backend = new SlickBackend( H2Driver, AnnotationMapper )
println( backend.result( q2, session ) )
println( backend.result( q2.length, session ) )
//#result
//#implicitqueryable
//
val iq1 = ImplicitQueryable( q1, backend, session )
val iq2 = iq1.filter( c => c.price > 3.0 )
println( iq2.toSeq ) // <- triggers execution
println( iq2.length ) // <- triggers execution
//#implicitqueryable
//#nesting
q1.map( c => (c.name, (c, c.price)) )
//#nesting
//#result
}
//#result
}
| boldradius/slick | src/sphinx/code/DirectEmbedding.scala | Scala | bsd-2-clause | 1,994 |
package geometry
import com.vividsolutions.jts.{geom => jts}
trait Geometry {
val jtsGeometry: jts.Geometry
def geometryType: String = jtsGeometry.getGeometryType
def envelope: Envelope = Envelope(jtsGeometry.getEnvelopeInternal)
def isValid: Boolean = jtsGeometry.isValid
def isSimple: Boolean = jtsGeometry.isSimple
def isEmpty: Boolean = jtsGeometry.isEmpty
def buffer(d: Double): Geometry = convertType(jtsGeometry.buffer(d))
def contains(that: Geometry): Boolean = {
jtsGeometry.contains(that.jtsGeometry)
}
def covers(that: Geometry): Boolean = {
jtsGeometry.covers(that.jtsGeometry)
}
def crosses(that: Geometry): Boolean = {
jtsGeometry.crosses(that.jtsGeometry)
}
def disjoint(that: Geometry): Boolean = {
jtsGeometry.disjoint(that.jtsGeometry)
}
def equal(that: Geometry): Boolean = {
jtsGeometry.equalsExact(that.jtsGeometry)
}
def almostEqual(that: Geometry, tolerance: Double): Boolean = {
jtsGeometry.equalsExact(that.jtsGeometry, tolerance)
}
def intersects(that: Geometry): Boolean = {
jtsGeometry.intersects(that.jtsGeometry)
}
def touches(that: Geometry): Boolean = {
jtsGeometry.touches(that.jtsGeometry)
}
def isWithinDistance(that: Geometry, distance: Double): Boolean = {
jtsGeometry.isWithinDistance(that.jtsGeometry, distance: Double)
}
def centroid: Point = Point(jtsGeometry.getCentroid)
def coordinates: Array[jts.Coordinate] = jtsGeometry.getCoordinates
def points: Array[Point] = coordinates.map(c => Point(c.x, c.y))
def intersection(that: Geometry): Geometry = {
val result = jtsGeometry.intersection(that.jtsGeometry)
convertType(result)
}
def wkt: String = jtsGeometry.toText
def convertType(geom: jts.Geometry): Geometry = {
geom.getGeometryType match {
case "Point" => Point(geom.asInstanceOf[jts.Point])
case "Line" => Line(geom.asInstanceOf[jts.LineString])
case "Polygon" => Polygon(geom.asInstanceOf[jts.Polygon])
case "MultiPoint" => MultiPoint(geom.asInstanceOf[jts.MultiPoint])
case "MultiLineString" =>
MultiLine(geom.asInstanceOf[jts.MultiLineString])
case "MultiPolygon" => MultiPolygon(geom.asInstanceOf[jts.MultiPolygon])
}
}
}
| jmarin/scale | core/src/main/scala/geometry/Geometry.scala | Scala | apache-2.0 | 2,262 |
package org.machine.engine.graph.decisions
object NodeIdentityGenerator{
private var counter: Short = 0
private val incr: Short = 1
def id:Short = {
counter = (counter + incr).toShort
return counter
}
def reset{
counter = 0
}
}
| sholloway/graph-engine | src/main/scala/org/machine/engine/graph/decisions/NodeIdentifyGenerator.scala | Scala | mit | 255 |
package scray.querying.storeabstraction
import scray.querying.description.{ TableIdentifier, VersioningConfiguration }
import scray.querying.description.Row
import scray.querying.source.store.QueryableStoreSource
import scray.querying.queries.DomainQuery
import com.twitter.util.FuturePool
/**
* interface for store generation
*/
trait StoreGenerators {
/**
* creates a row store, i.e. a store that has a primary key column and maybe a bunch of other columns
*/
def createRowStore[Q <: DomainQuery](table: TableIdentifier): Option[(QueryableStoreSource[Q], ((_) => Row, Option[String], Option[VersioningConfiguration[_, _]]))]
/**
* gets the extractor, that helps to evaluate meta-data for this type of dbms
*/
def getExtractor[Q <: DomainQuery, S <: QueryableStoreSource[Q]](
store: S, tableName: Option[String], versions: Option[VersioningConfiguration[_, _]],
dbSystem: Option[String], futurePool: FuturePool): StoreExtractor[S]
} | scray/scray | scray-querying/modules/scray-querying/src/main/scala/scray/querying/storeabstraction/StoreGenerators.scala | Scala | apache-2.0 | 972 |
package spark.streaming.dstream
import spark.streaming.{Duration, DStream, Time}
import spark.RDD
private[streaming]
class FlatMappedDStream[T: ClassManifest, U: ClassManifest](
parent: DStream[T],
flatMapFunc: T => Traversable[U]
) extends DStream[U](parent.ssc) {
override def dependencies = List(parent)
override def slideDuration: Duration = parent.slideDuration
override def compute(validTime: Time): Option[RDD[U]] = {
parent.getOrCompute(validTime).map(_.flatMap(flatMapFunc))
}
}
| koeninger/spark | streaming/src/main/scala/spark/streaming/dstream/FlatMappedDStream.scala | Scala | bsd-3-clause | 516 |
package io.vamp.container_driver.marathon
import akka.actor.{ Actor, ActorRef }
import akka.event.LoggingAdapter
import io.vamp.common.akka.ActorExecutionContextProvider
import io.vamp.common.http.HttpClient
import io.vamp.common.notification.NotificationErrorException
import io.vamp.common.util.HashUtil
import io.vamp.common.vitals.InfoRequest
import io.vamp.common.{ ClassMapper, Config, ConfigMagnet }
import io.vamp.container_driver._
import io.vamp.container_driver.marathon.MarathonDriverActor.{ DeployMarathonApp, UnDeployMarathonApp }
import io.vamp.container_driver.notification.{ UndefinedMarathonApplication, UnsupportedContainerDriverRequest }
import io.vamp.model.artifact._
import io.vamp.model.notification.InvalidArgumentValueError
import io.vamp.model.reader.{ MegaByte, Quantity }
import io.vamp.model.resolver.NamespaceValueResolver
import org.json4s.JsonAST.JObject
import org.json4s._
import org.json4s.native.JsonMethods.parse
import scala.concurrent.Future
import scala.util.Try
class MarathonDriverActorMapper extends ClassMapper {
val name = "marathon"
val clazz: Class[_] = classOf[MarathonDriverActor]
}
object MarathonDriverActor {
val mesosConfig = "vamp.container-driver.mesos"
val marathonConfig = "vamp.container-driver.marathon"
val namespaceConstraint: ConfigMagnet[List[String]] = Config.stringList(s"$marathonConfig.namespace-constraint")
object Schema extends Enumeration {
val Docker, Cmd, Command = Value
}
MarathonDriverActor.Schema.values
val dialect = "marathon"
case class DeployMarathonApp(request: AnyRef)
case class UnDeployMarathonApp(request: AnyRef)
}
case class MesosInfo(frameworks: Any, slaves: Any)
case class MarathonDriverInfo(mesos: MesosInfo, marathon: Any)
class MarathonDriverActor
extends ContainerDriverActor
with MarathonNamespace
with ActorExecutionContextProvider
with ContainerDriver
with HealthCheckMerger
with NamespaceValueResolver {
import ContainerDriverActor._
private implicit val formats: Formats = DefaultFormats
private implicit val loggingAdapter: LoggingAdapter = log
private implicit lazy val httpClient: HttpClient = new HttpClient
private lazy val config = MarathonClientConfig()
private lazy val client: MarathonClient = MarathonClient.acquire(config)
override protected def supportedDeployableTypes: List[DeployableType] = DockerDeployableType :: CommandDeployableType :: Nil
override def receive: Actor.Receive = {
case InfoRequest ⇒ reply(info)
case GetNodes ⇒ reply(schedulerNodes)
case GetRoutingGroups ⇒ reply(routingGroups)
case Get(services, equality) ⇒ get(services, equality)
case d: Deploy ⇒ reply(deploy(d.deployment, d.cluster, d.service, d.update))
case u: Undeploy ⇒ reply(undeploy(u.deployment, u.service))
case DeployedGateways(gateways) ⇒ reply(deployedGateways(gateways))
case GetWorkflow(workflow, replyTo) ⇒ get(workflow, replyTo)
case d: DeployWorkflow ⇒ reply(deploy(d.workflow, d.update))
case u: UndeployWorkflow ⇒ reply(undeploy(u.workflow))
case a: DeployMarathonApp ⇒ reply(deploy(a.request))
case a: UnDeployMarathonApp ⇒ reply(undeploy(a.request))
case any ⇒ unsupported(UnsupportedContainerDriverRequest(any))
}
override def postStop(): Unit = MarathonClient.release(config)
private def info: Future[ContainerInfo] = client.info
private def schedulerNodes: Future[List[SchedulerNode]] = client.nodes().map {
_.flatMap {
case node: Map[_, _] ⇒
Try {
try {
val used = node.asInstanceOf[Map[String, AnyVal]]("used_resources").asInstanceOf[Map[String, AnyVal]]
val resources = node.asInstanceOf[Map[String, AnyVal]]("resources").asInstanceOf[Map[String, AnyVal]]
SchedulerNode(
name = HashUtil.hexSha1(node.asInstanceOf[Map[String, String]]("id")),
capacity = SchedulerNodeSize(Quantity.of(resources.getOrElse("cpus", 0)), MegaByte.of(s"${resources.getOrElse("mem", "0")}M")),
used = Option(SchedulerNodeSize(Quantity.of(used.getOrElse("cpus", 0)), MegaByte.of(s"${used.getOrElse("mem", "0")}M")))
)
}
catch {
case e: Exception ⇒
e.printStackTrace()
throw e
}
}.toOption
case _ ⇒ None
}
}
private def routingGroups: Future[List[RoutingGroup]] = {
client.get().map {
_.flatMap { app ⇒
(app.id.split('/').filterNot(_.isEmpty).toList match {
case group :: id :: Nil ⇒ Option(group → id)
case id :: Nil ⇒ Option("" → id)
case _ ⇒ None
}) map {
case (group, id) ⇒
val containers = instances(app)
val appPorts = app.container.flatMap(_.docker).map(_.portMappings.map(_.containerPort)).getOrElse(Nil).flatten
val containerPorts = app.container.map(_.portMappings.map(_.containerPort)).getOrElse(Nil).flatten
val ports = appPorts ++ containerPorts
RoutingGroup(
name = id,
kind = "app",
namespace = group,
labels = app.labels,
image = app.container.flatMap(_.docker).map(_.image),
instances = containers.map { container ⇒
RoutingInstance(
ip = container.host,
ports = ports.zip(container.ports).map(port ⇒ RoutingInstancePort(port._1, port._2))
)
}
)
}
}
}
}
private def get(deploymentServices: List[DeploymentServices], equalityRequest: ServiceEqualityRequest): Unit = {
log.info("getting deployment services")
val replyTo = sender()
deploymentServices.flatMap(ds ⇒ ds.services.map((ds.deployment, _))).foreach {
case (deployment, service) ⇒
val id = appId(deployment, service.breed)
log.debug(s"marathon sending request: $id")
client.get(id).foreach {
case Some(app) ⇒
val cluster = deployment.clusters.find { c ⇒ c.services.exists { s ⇒ s.breed.name == service.breed.name } }
val equality = ServiceEqualityResponse(
deployable = !equalityRequest.deployable || checkDeployable(service, app),
ports = !equalityRequest.ports || checkPorts(deployment, cluster, service, app),
environmentVariables = !equalityRequest.environmentVariables || checkEnvironmentVariables(deployment, cluster, service, app),
health = !equalityRequest.health || checkHealth(deployment, service, app)
)
replyTo ! ContainerService(
deployment,
service,
Option(containers(app)),
app.taskStats.map(ts ⇒ MarathonCounts.toServiceHealth(ts.totalSummary.stats.counts)),
equality = equality
)
case None ⇒ replyTo ! ContainerService(deployment, service, None, None)
}
}
}
private def checkDeployable(service: DeploymentService, app: App): Boolean = {
if (CommandDeployableType.matches(service.breed.deployable))
service.breed.deployable.definition == app.cmd.getOrElse("")
else if (DockerDeployableType.matches(service.breed.deployable))
service.breed.deployable.definition == app.container.flatMap(_.docker).map(_.image).getOrElse("")
else true
}
private def checkPorts(deployment: Deployment, cluster: Option[DeploymentCluster], service: DeploymentService, app: App): Boolean = cluster.exists { c ⇒
logger.info("CheckPorts for deployment {} network {}", deployment.name, service.network.getOrElse("Empty"))
val appPorts = app.container.map(_.portMappings.flatMap(_.containerPort)).getOrElse(Nil).toSet
val containerPorts = app.container.flatMap(_.docker).map(_.portMappings.flatMap(_.containerPort)).getOrElse(Nil).toSet
val servicePorts = portMappings(deployment, c, service, "").map(_.containerPort).toSet
// due to changes in Marathon 1.5.x, both container (docker) and app port mapping should be chacked
if (appPorts != servicePorts)
logger.info("appPorts {}, servicePorts {}", appPorts.toString(), servicePorts.toString())
if (containerPorts != servicePorts)
logger.info("containerPorts {}, servicePorts {}", containerPorts.toString(), servicePorts.toString())
/**
* If it is in host network, ports are defined in portDefinitions,
* currently portDefinitions are not defined in the object so
* if host network is host checkport will return true
*/
val isPortDefinitionsDefined =
Try(service.dialects.getOrElse(MarathonDriverActor.dialect, Map())
.asInstanceOf[Map[String, Any]].get("portDefinitions").isDefined)
.recoverWith {
case t ⇒
logger.error("Port definitions are not in the dialect", t)
Try(false)
}.get
appPorts == servicePorts || containerPorts == servicePorts || isPortDefinitionsDefined
}
private def checkEnvironmentVariables(deployment: Deployment, cluster: Option[DeploymentCluster], service: DeploymentService, app: App): Boolean = cluster.exists { c ⇒
app.env == environment(deployment, c, service)
}
private def checkHealth(deployment: Deployment, service: DeploymentService, app: App): Boolean = {
MarathonHealthCheck.equalHealthChecks(deployment.ports, service.healthChecks.getOrElse(List()), app.healthChecks)
}
private def get(workflow: Workflow, replyTo: ActorRef): Unit = {
log.debug(s"marathon get workflow: ${workflow.name}")
client.get(appId(workflow)).foreach {
case Some(app) ⇒
replyTo ! ContainerWorkflow(
workflow,
Option(containers(app)),
app.taskStats.map(ts ⇒ MarathonCounts.toServiceHealth(ts.totalSummary.stats.counts))
)
case _ ⇒ replyTo ! ContainerWorkflow(workflow, None)
}
}
private def noGlobalOverride(arg: Argument): MarathonApp ⇒ MarathonApp = identity[MarathonApp]
private def applyGlobalOverride(workflowDeployment: Boolean): PartialFunction[Argument, MarathonApp ⇒ MarathonApp] = {
case Argument("override.workflow.docker.network", networkOverrideValue) ⇒
app ⇒
if (workflowDeployment)
app.copy(container = app.container.map(c ⇒ c.copy(docker = c.docker.copy(
network = networkOverrideValue,
portMappings = c.docker.portMappings.map(portMapping ⇒ networkOverrideValue match {
case "USER" ⇒ portMapping.copy(hostPort = None)
case _ ⇒ portMapping
})
))))
else app
case Argument("override.deployment.docker.network", networkOverrideValue) ⇒
app ⇒
if (!workflowDeployment)
app.copy(container = app.container.map(c ⇒ c.copy(docker = c.docker.copy(
network = networkOverrideValue,
portMappings = c.docker.portMappings.map(portMapping ⇒ networkOverrideValue match {
case "USER" ⇒ portMapping.copy(hostPort = None)
case _ ⇒ portMapping
})
))))
else app
case arg @ Argument("override.workflow.docker.privileged", runPrivileged) ⇒
app ⇒
if (workflowDeployment)
Try(runPrivileged.toBoolean).map(
privileged ⇒ app.copy(container = app.container.map(c ⇒ c.copy(docker = c.docker.copy(privileged = privileged))))
).getOrElse(throw NotificationErrorException(InvalidArgumentValueError(arg), s"${arg.key} -> ${arg.value}"))
else app
case arg @ Argument("override.deployment.docker.privileged", runPrivileged) ⇒
app ⇒
if (!workflowDeployment)
Try(runPrivileged.toBoolean).map(
privileged ⇒ app.copy(container = app.container.map(c ⇒ c.copy(docker = c.docker.copy(privileged = privileged))))
).getOrElse(throw NotificationErrorException(InvalidArgumentValueError(arg), s"${arg.key} -> ${arg.value}"))
else app
case Argument("override.workflow.ipAddress.networkName", networkName) ⇒
app ⇒
if (workflowDeployment)
app.copy(ipAddress = Some(MarathonAppIpAddress(resolveWithNamespace(networkName))))
else app
case Argument("override.deployment.ipAddress.networkName", networkName) ⇒
app ⇒
if (!workflowDeployment)
app.copy(ipAddress = Some(MarathonAppIpAddress(resolveWithNamespace(networkName))))
else app
case Argument("override.workflow.fetch.uri", uriValue) ⇒
app ⇒
if (workflowDeployment)
app.copy(fetch =
app.fetch match {
case None ⇒ Some(List(UriObject(uriValue)))
case Some(l) ⇒ Some(UriObject(uriValue) :: l)
})
else app
case Argument("override.deployment.fetch.uri", uriValue) ⇒
app ⇒
if (!workflowDeployment)
app.copy(fetch =
app.fetch match {
case None ⇒ Some(List(UriObject(uriValue)))
case Some(l) ⇒ Some(UriObject(uriValue) :: l)
})
else app
case arg @ Argument("override.workflow.noHealthChecks", noHealthChecks) ⇒
app ⇒
if (workflowDeployment)
Try(noHealthChecks.toBoolean).map(
noHealthChecks ⇒ if (noHealthChecks) {
app.copy(healthChecks = Nil)
}
else app
).getOrElse(throw NotificationErrorException(InvalidArgumentValueError(arg), s"${arg.key} -> ${arg.value}"))
else app
case arg @ Argument("override.deployment.noHealthChecks", noHealthChecks) ⇒
app ⇒
if (!workflowDeployment)
Try(noHealthChecks.toBoolean).map(
noHealthChecks ⇒ if (noHealthChecks) {
app.copy(healthChecks = Nil)
}
else app
).getOrElse(throw NotificationErrorException(InvalidArgumentValueError(arg), s"${arg.key} -> ${arg.value}"))
else app
case Argument(argName, argValue) if argName.startsWith("override.workflow.labels.") ⇒
app ⇒
if (workflowDeployment) {
val labelName = argName.drop("override.workflow.labels.".length)
app.copy(labels = app.labels + (labelName → argValue))
}
else app
case Argument(argName, argValue) if argName.startsWith("override.deployment.labels.") ⇒
app ⇒
if (!workflowDeployment) {
val labelName = argName.drop("override.deployment.labels.".length)
app.copy(labels = app.labels + (labelName → argValue))
}
else app
}
private def deploy(deployment: Deployment, cluster: DeploymentCluster, service: DeploymentService, update: Boolean): Future[Any] = {
validateDeployable(service.breed.deployable)
val id = appId(deployment, service.breed)
val name = s"${deployment.name} / ${service.breed.deployable.definition}"
if (update) log.info(s"marathon update service: $name") else log.info(s"marathon create service: $name")
val constraints = (namespaceConstraint +: Nil).filter(_.nonEmpty)
log.info(s"Deploying Deployment and using Arguments : ${service.arguments}")
val app = MarathonApp(
id,
container(deployment, cluster, service.copy(arguments = service.arguments.filterNot(applyGlobalOverride(false).isDefinedAt))),
None,
service.scale.get.instances,
service.scale.get.cpu.value,
Math.round(service.scale.get.memory.value).toInt,
environment(deployment, cluster, service),
cmd(deployment, cluster, service),
healthChecks = retrieveHealthChecks(cluster, service).map(MarathonHealthCheck.apply(service.breed.ports, _)),
labels = labels(deployment, cluster, service),
constraints = constraints,
fetch = None
)
// Iterate through all Argument objects and if they represent an override, apply them
logger.info(s"MarathonDriverActor - ServiceDialect for deployment {} service dialect: {}", deployment.name, service.dialects.toString())
val appWithGlobalOverrides = service.arguments.foldLeft(app)((app, argument) ⇒
applyGlobalOverride(false).applyOrElse(argument, noGlobalOverride)(app))
val payload = requestPayload(deployment, cluster, service, purge(appWithGlobalOverrides))
log.info(s"MarathonDriverActor - Deploying $payload")
deploy(update, id, payload)
}
private def deploy(workflow: Workflow, update: Boolean): Future[Any] = {
val breed = workflow.breed.asInstanceOf[DefaultBreed]
validateDeployable(breed.deployable)
val id = appId(workflow)
if (update) log.info(s"marathon update workflow: ${workflow.name}") else log.info(s"marathon create workflow: ${workflow.name}")
val scale = workflow.scale.get.asInstanceOf[DefaultScale]
val constraints = (namespaceConstraint +: Nil).filter(_.nonEmpty)
log.info(s"Deploying Workflow and using Arguments:: ${workflow.arguments}")
val marathonApp = MarathonApp(
id,
container(workflow.copy(arguments = workflow.arguments.filterNot(applyGlobalOverride(true).isDefinedAt))),
None,
scale.instances,
scale.cpu.value,
Math.round(scale.memory.value).toInt,
environment(workflow),
cmd(workflow),
labels = labels(workflow),
healthChecks = retrieveHealthChecks(workflow).map(MarathonHealthCheck.apply(breed.ports, _)),
constraints = constraints,
fetch = None
)
// Iterate through all Argument objects and if they represent an override, apply them
val marathonAppWithGlobalOverrides = workflow.arguments.foldLeft(marathonApp)((app, argument) ⇒
applyGlobalOverride(true).applyOrElse(argument, noGlobalOverride)(app))
val toDeploy = requestPayload(workflow, purge(marathonAppWithGlobalOverrides))
log.info(s"Deploying ${workflow.name} with id $id")
deploy(update, id, toDeploy)
}
private def purge(app: MarathonApp): MarathonApp = {
// workaround - empty args may cause Marathon to reject the request, so removing args altogether
if (app.args.isEmpty) app.copy(args = null) else app
}
private def deploy(update: Boolean, id: String, payload: JValue) = {
if (update) {
log.debug(s"MarathonDriverActor - marathon sending request: $id")
client.get(id).flatMap { response ⇒
val changed = Extraction.decompose(response).children.headOption match {
case Some(app) ⇒ app.diff(payload).changed
case None ⇒ payload
}
if (changed != JNothing) {
log.info(s"MarathonDriverActor - Changes detected in app $id configuration")
client.put(id, changed)
}
else {
log.info(s"MarathonDriverActor - Nothing has changed in app $id configuration")
Future.successful(false)
}
}
}
else client.post(id, payload)
}
private def container(workflow: Workflow): Option[Container] = {
if (DockerDeployableType.matches(workflow.breed.asInstanceOf[DefaultBreed].deployable)) Some(Container(docker(workflow))) else None
}
private def container(deployment: Deployment, cluster: DeploymentCluster, service: DeploymentService): Option[Container] = {
logger.info("service breed deployable {}", service.breed.deployable.toString)
if (DockerDeployableType.matches(service.breed.deployable)) Some(Container(docker(deployment, cluster, service))) else {
logger.info("MarathonDriverActor container deployable check for {}", deployment.name)
None
}
}
private def cmd(workflow: Workflow): Option[String] = {
if (CommandDeployableType.matches(workflow.breed.asInstanceOf[DefaultBreed].deployable)) Some(workflow.breed.asInstanceOf[DefaultBreed].deployable.definition) else None
}
private def cmd(deployment: Deployment, cluster: DeploymentCluster, service: DeploymentService): Option[String] = {
if (CommandDeployableType.matches(service.breed.deployable)) Some(service.breed.deployable.definition) else None
}
private def requestPayload(deployment: Deployment, cluster: DeploymentCluster, service: DeploymentService, app: MarathonApp): JValue = {
val (local, dialect) = (deployment.dialects.get(MarathonDriverActor.dialect), cluster.dialects.get(MarathonDriverActor.dialect), service.dialects.get(MarathonDriverActor.dialect)) match {
case (_, _, Some(d)) ⇒
logger.info("MarathonDriverActor - getting dialect from service {}", d)
Some(service) → d
case (_, Some(d), None) ⇒
logger.info("MarathonDriverActor - getting dialect from cluster {}", d)
None → d
case (Some(d), None, None) ⇒
logger.info("MarathonDriverActor - getting dialect from deployment {}", d)
None → d
case _ ⇒
logger.info("MarathonDriverActor - No dialect defined")
None → Map()
}
(app.container, app.cmd, dialect) match {
case (None, None, map: Map[_, _]) if map.asInstanceOf[Map[String, _]].get("cmd").nonEmpty ⇒
case (None, None, _) ⇒ throwException(UndefinedMarathonApplication)
case _ ⇒
}
val base = Extraction.decompose(app) match {
case JObject(l) ⇒ JObject(l.filter({ case (k, v) ⇒ k != "args" || v != JNull }))
case other ⇒ other
}
base merge Extraction.decompose(interpolate(deployment, local, dialect))
}
private def requestPayload(workflow: Workflow, app: MarathonApp): JValue = {
val dialect = workflow.dialects.getOrElse(MarathonDriverActor.dialect, Map())
(app.container, app.cmd, dialect) match {
case (None, None, map: Map[_, _]) if map.asInstanceOf[Map[String, _]].get("cmd").nonEmpty ⇒
case (None, None, _) ⇒ throwException(UndefinedMarathonApplication)
case _ ⇒
}
val base = Extraction.decompose(app) match {
case JObject(l) ⇒ JObject(l.filter({ case (k, v) ⇒ k != "args" || v != JNull }))
case other ⇒ other
}
Extraction.decompose(interpolate(workflow, dialect)) merge base
}
private def undeploy(deployment: Deployment, service: DeploymentService) = {
val id = appId(deployment, service.breed)
log.info(s"marathon delete app: $id")
client.delete(id)
}
private def undeploy(workflow: Workflow) = {
val id = appId(workflow)
log.info(s"marathon delete workflow: ${workflow.name}")
client.delete(id)
}
private def containers(app: App): Containers = {
log.info("[Marathon Driver . containers]")
val scale = DefaultScale(Quantity(app.cpus), MegaByte(app.mem), app.instances)
Containers(scale, instances(app))
}
private def instances(app: App): List[ContainerInstance] = app.tasks.map { task ⇒
val portsAndIpForUserNetwork = for {
container ← app.container
docker ← container.docker
networkName ← Option(docker.network.getOrElse("")) // This is a hack to support 1.4 and 1.5 at the same time
ipAddressToUse ← task.ipAddresses.headOption
if (networkName == "USER"
|| app.networks.map(_.mode).contains("container"))
} yield (ipAddressToUse.ipAddress,
docker.portMappings.flatMap(_.containerPort) ++ container.portMappings.flatMap(_.containerPort))
portsAndIpForUserNetwork match {
case None ⇒
val network = Try(app.container.get.docker.get.network.get).getOrElse("Empty")
log.debug(s"Ports for ${task.id} => ${task.ports} network: $network")
ContainerInstance(task.id, task.host, task.ports, task.startedAt.isDefined)
case Some(portsAndIp) ⇒
val network = Try(app.container.get.docker.get.network.get).getOrElse("Empty")
log.debug(s"Ports (USER network) for ${task.id} => ${portsAndIp._2} network: $network")
ContainerInstance(task.id, portsAndIp._1, portsAndIp._2, task.startedAt.isDefined)
}
}
private def deploy(request: AnyRef): Future[Any] = {
implicit val formats: DefaultFormats = DefaultFormats
val r = parse(StringInput(request.toString))
val app = r.extract[MarathonApp]
log.info(s"Deploying ${app.id}")
deploy(update = true, app.id, r)
}
private def undeploy(request: AnyRef): Future[Any] = {
implicit val formats: DefaultFormats = DefaultFormats
val app = parse(StringInput(request.toString)).extract[MarathonApp]
log.info(s"marathon delete app: ${app.id}")
client.delete(app.id)
}
}
| magneticio/vamp | dcos/src/main/scala/io/vamp/container_driver/marathon/MarathonDriverActor.scala | Scala | apache-2.0 | 24,715 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.io
import hbparquet.hadoop.util.ContextUtil
import htsjdk.samtools.BAMRecordCodec
import htsjdk.samtools.ValidationStringency
import htsjdk.samtools.SAMRecord
import htsjdk.samtools.util.BlockCompressedInputStream
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.FSDataInputStream
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.bdgenomics.adam.models.ReferenceRegion
import org.seqdoop.hadoop_bam.util.SAMHeaderReader
import org.seqdoop.hadoop_bam.util.WrapSeekable
import org.seqdoop.hadoop_bam.SAMRecordWritable
import org.seqdoop.hadoop_bam.BAMRecordReader
import org.seqdoop.hadoop_bam.FileVirtualSplit
import org.seqdoop.hadoop_bam.BAMInputFormat
import scala.annotation.tailrec
object BAMFilteredRecordReader {
private var optViewRegion: Option[ReferenceRegion] = None
def setRegion(viewRegion: ReferenceRegion) {
optViewRegion = Some(viewRegion)
}
}
/**
* Scala implementation of BAMRecordReader, but with
* nextKeyValue() that filters by ReferenceRegion
*/
class BAMFilteredRecordReader extends BAMRecordReader {
val key: LongWritable = new LongWritable()
val record: SAMRecordWritable = new SAMRecordWritable
var stringency: ValidationStringency = _
var bci: BlockCompressedInputStream = _
var codec: BAMRecordCodec = _
var fileStart: Long = _
var virtualEnd: Long = _
var isInitialized: Boolean = false
override def initialize(spl: InputSplit, ctx: TaskAttemptContext) {
// Check to ensure this method is only be called once (see Hadoop API)
if (isInitialized) {
close()
}
isInitialized = true
val conf: Configuration = ContextUtil.getConfiguration(ctx)
val split: FileVirtualSplit = spl.asInstanceOf[FileVirtualSplit]
val file: Path = split.getPath()
val fs: FileSystem = file.getFileSystem(conf)
this.stringency = SAMHeaderReader.getValidationStringency(conf)
val in: FSDataInputStream = fs.open(file)
// Sets codec to translate between in-memory and disk representation of record
codec = new BAMRecordCodec(SAMHeaderReader.readSAMHeaderFrom(in, conf))
in.seek(0)
bci = new BlockCompressedInputStream(
new WrapSeekable[FSDataInputStream](
in, fs.getFileStatus(file).getLen(), file))
// Gets BGZF virtual offset for the split
val virtualStart = split.getStartVirtualOffset()
fileStart = virtualStart >>> 16
virtualEnd = split.getEndVirtualOffset()
// Starts looking from the BGZF virtual offset
bci.seek(virtualStart)
// Reads records from this input stream
codec.setInputStream(bci)
}
override def close() = {
bci.close()
}
override def getCurrentKey(): LongWritable = {
key
}
override def getCurrentValue(): SAMRecordWritable = {
record
}
/**
* This method gets the nextKeyValue for our RecordReader, but filters by only
* returning records within a specified ReferenceRegion.
* This function is tail recursive to avoid stack overflow when predicate data
* can be sparse.
*/
@tailrec final override def nextKeyValue(): Boolean = {
if (bci.getFilePointer() >= virtualEnd) {
false
} else {
val r: SAMRecord = codec.decode()
// Since we're reading from a BAMRecordCodec directly we have to set the
// validation stringency ourselves.
if (this.stringency != null) {
r.setValidationStringency(this.stringency)
}
// This if/else block pushes the predicate down onto a BGZIP block that
// the index has said contains data in our specified region.
if (r == null) {
false
} else {
val start = r.getStart
val end = r.getEnd
val refReg = BAMFilteredRecordReader.optViewRegion.get
val regStart = refReg.start
val regEnd = refReg.end
if ((r.getContig() == refReg.referenceName) &&
(((start >= regStart) && (end <= regEnd))
|| ((start <= regStart) && (end >= regStart) && (end <= regEnd))
|| ((end >= regEnd) && (start >= regStart) && (start <= regEnd)))) {
key.set(BAMRecordReader.getKey(r))
record.set(r)
true
} else {
nextKeyValue()
}
}
}
}
}
| VinACE/adam | adam-core/src/main/java/org/bdgenomics/adam/io/BAMFilteredRecordReader.scala | Scala | apache-2.0 | 5,261 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.sql.Timestamp
import java.text.DateFormat
import java.util.{Calendar, TimeZone}
import scala.util.control.NonFatal
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodegenFallback, ExprCode}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
/**
* Common base class for time zone aware expressions.
*/
trait TimeZoneAwareExpression extends Expression {
/** The expression is only resolved when the time zone has been set. */
override lazy val resolved: Boolean =
childrenResolved && checkInputDataTypes().isSuccess && timeZoneId.isDefined
/** the timezone ID to be used to evaluate value. */
def timeZoneId: Option[String]
/** Returns a copy of this expression with the specified timeZoneId. */
def withTimeZone(timeZoneId: String): TimeZoneAwareExpression
@transient lazy val timeZone: TimeZone = DateTimeUtils.getTimeZone(timeZoneId.get)
}
/**
* Returns the current date at the start of query evaluation.
* All calls of current_date within the same query return the same value.
*
* There is no code generation since this expression should get constant folded by the optimizer.
*/
@ExpressionDescription(
usage = "_FUNC_() - Returns the current date at the start of query evaluation.")
case class CurrentDate(timeZoneId: Option[String] = None)
extends LeafExpression with TimeZoneAwareExpression with CodegenFallback {
def this() = this(None)
override def foldable: Boolean = true
override def nullable: Boolean = false
override def dataType: DataType = DateType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def eval(input: InternalRow): Any = {
DateTimeUtils.millisToDays(System.currentTimeMillis(), timeZone)
}
override def prettyName: String = "current_date"
}
/**
* Returns the current timestamp at the start of query evaluation.
* All calls of current_timestamp within the same query return the same value.
*
* There is no code generation since this expression should get constant folded by the optimizer.
*/
@ExpressionDescription(
usage = "_FUNC_() - Returns the current timestamp at the start of query evaluation.")
case class CurrentTimestamp() extends LeafExpression with CodegenFallback {
override def foldable: Boolean = true
override def nullable: Boolean = false
override def dataType: DataType = TimestampType
override def eval(input: InternalRow): Any = {
System.currentTimeMillis() * 1000L
}
override def prettyName: String = "current_timestamp"
}
/**
* Expression representing the current batch time, which is used by StreamExecution to
* 1. prevent optimizer from pushing this expression below a stateful operator
* 2. allow IncrementalExecution to substitute this expression with a Literal(timestamp)
*
* There is no code generation since this expression should be replaced with a literal.
*/
case class CurrentBatchTimestamp(
timestampMs: Long,
dataType: DataType,
timeZoneId: Option[String] = None)
extends LeafExpression with TimeZoneAwareExpression with Nondeterministic with CodegenFallback {
def this(timestampMs: Long, dataType: DataType) = this(timestampMs, dataType, None)
override def nullable: Boolean = false
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def prettyName: String = "current_batch_timestamp"
override protected def initializeInternal(partitionIndex: Int): Unit = {}
/**
* Need to return literal value in order to support compile time expression evaluation
* e.g., select(current_date())
*/
override protected def evalInternal(input: InternalRow): Any = toLiteral.value
def toLiteral: Literal = dataType match {
case _: TimestampType =>
Literal(DateTimeUtils.fromJavaTimestamp(new Timestamp(timestampMs)), TimestampType)
case _: DateType => Literal(DateTimeUtils.millisToDays(timestampMs, timeZone), DateType)
}
}
/**
* Adds a number of days to startdate.
*/
@ExpressionDescription(
usage = "_FUNC_(start_date, num_days) - Returns the date that is `num_days` after `start_date`.",
extended = """
Examples:
> SELECT _FUNC_('2016-07-30', 1);
2016-07-31
""")
case class DateAdd(startDate: Expression, days: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = startDate
override def right: Expression = days
override def inputTypes: Seq[AbstractDataType] = Seq(DateType, IntegerType)
override def dataType: DataType = DateType
override def nullSafeEval(start: Any, d: Any): Any = {
start.asInstanceOf[Int] + d.asInstanceOf[Int]
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (sd, d) => {
s"""${ev.value} = $sd + $d;"""
})
}
override def prettyName: String = "date_add"
}
/**
* Subtracts a number of days to startdate.
*/
@ExpressionDescription(
usage = "_FUNC_(start_date, num_days) - Returns the date that is `num_days` before `start_date`.",
extended = """
Examples:
> SELECT _FUNC_('2016-07-30', 1);
2016-07-29
""")
case class DateSub(startDate: Expression, days: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = startDate
override def right: Expression = days
override def inputTypes: Seq[AbstractDataType] = Seq(DateType, IntegerType)
override def dataType: DataType = DateType
override def nullSafeEval(start: Any, d: Any): Any = {
start.asInstanceOf[Int] - d.asInstanceOf[Int]
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (sd, d) => {
s"""${ev.value} = $sd - $d;"""
})
}
override def prettyName: String = "date_sub"
}
@ExpressionDescription(
usage = "_FUNC_(timestamp) - Returns the hour component of the string/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2009-07-30 12:58:59');
12
""")
case class Hour(child: Expression, timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(child: Expression) = this(child, None)
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType)
override def dataType: DataType = IntegerType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override protected def nullSafeEval(timestamp: Any): Any = {
DateTimeUtils.getHours(timestamp.asInstanceOf[Long], timeZone)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getHours($c, $tz)")
}
}
@ExpressionDescription(
usage = "_FUNC_(timestamp) - Returns the minute component of the string/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2009-07-30 12:58:59');
58
""")
case class Minute(child: Expression, timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(child: Expression) = this(child, None)
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType)
override def dataType: DataType = IntegerType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override protected def nullSafeEval(timestamp: Any): Any = {
DateTimeUtils.getMinutes(timestamp.asInstanceOf[Long], timeZone)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getMinutes($c, $tz)")
}
}
@ExpressionDescription(
usage = "_FUNC_(timestamp) - Returns the second component of the string/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2009-07-30 12:58:59');
59
""")
case class Second(child: Expression, timeZoneId: Option[String] = None)
extends UnaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(child: Expression) = this(child, None)
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType)
override def dataType: DataType = IntegerType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override protected def nullSafeEval(timestamp: Any): Any = {
DateTimeUtils.getSeconds(timestamp.asInstanceOf[Long], timeZone)
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getSeconds($c, $tz)")
}
}
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the day of year of the date/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2016-04-09');
100
""")
case class DayOfYear(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = IntegerType
override protected def nullSafeEval(date: Any): Any = {
DateTimeUtils.getDayInYear(date.asInstanceOf[Int])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getDayInYear($c)")
}
}
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the year component of the date/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2016-07-30');
2016
""")
case class Year(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = IntegerType
override protected def nullSafeEval(date: Any): Any = {
DateTimeUtils.getYear(date.asInstanceOf[Int])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getYear($c)")
}
}
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the quarter of the year for date, in the range 1 to 4.",
extended = """
Examples:
> SELECT _FUNC_('2016-08-31');
3
""")
case class Quarter(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = IntegerType
override protected def nullSafeEval(date: Any): Any = {
DateTimeUtils.getQuarter(date.asInstanceOf[Int])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getQuarter($c)")
}
}
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the month component of the date/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2016-07-30');
7
""")
case class Month(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = IntegerType
override protected def nullSafeEval(date: Any): Any = {
DateTimeUtils.getMonth(date.asInstanceOf[Int])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getMonth($c)")
}
}
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the day of month of the date/timestamp.",
extended = """
Examples:
> SELECT _FUNC_('2009-07-30');
30
""")
case class DayOfMonth(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = IntegerType
override protected def nullSafeEval(date: Any): Any = {
DateTimeUtils.getDayOfMonth(date.asInstanceOf[Int])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, c => s"$dtu.getDayOfMonth($c)")
}
}
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the week of the year of the given date.",
extended = """
Examples:
> SELECT _FUNC_('2008-02-20');
8
""")
case class WeekOfYear(child: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = IntegerType
@transient private lazy val c = {
val c = Calendar.getInstance(DateTimeUtils.getTimeZone("UTC"))
c.setFirstDayOfWeek(Calendar.MONDAY)
c.setMinimalDaysInFirstWeek(4)
c
}
override protected def nullSafeEval(date: Any): Any = {
c.setTimeInMillis(date.asInstanceOf[Int] * 1000L * 3600L * 24L)
c.get(Calendar.WEEK_OF_YEAR)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, time => {
val cal = classOf[Calendar].getName
val c = ctx.freshName("cal")
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
ctx.addMutableState(cal, c,
s"""
$c = $cal.getInstance($dtu.getTimeZone("UTC"));
$c.setFirstDayOfWeek($cal.MONDAY);
$c.setMinimalDaysInFirstWeek(4);
""")
s"""
$c.setTimeInMillis($time * 1000L * 3600L * 24L);
${ev.value} = $c.get($cal.WEEK_OF_YEAR);
"""
})
}
}
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(timestamp, fmt) - Converts `timestamp` to a value of string in the format specified by the date format `fmt`.",
extended = """
Examples:
> SELECT _FUNC_('2016-04-08', 'y');
2016
""")
// scalastyle:on line.size.limit
case class DateFormatClass(left: Expression, right: Expression, timeZoneId: Option[String] = None)
extends BinaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(left: Expression, right: Expression) = this(left, right, None)
override def dataType: DataType = StringType
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, StringType)
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override protected def nullSafeEval(timestamp: Any, format: Any): Any = {
val df = DateTimeUtils.newDateFormat(format.toString, timeZone)
UTF8String.fromString(df.format(new java.util.Date(timestamp.asInstanceOf[Long] / 1000)))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
val tz = ctx.addReferenceMinorObj(timeZone)
defineCodeGen(ctx, ev, (timestamp, format) => {
s"""UTF8String.fromString($dtu.newDateFormat($format.toString(), $tz)
.format(new java.util.Date($timestamp / 1000)))"""
})
}
override def prettyName: String = "date_format"
}
/**
* Converts time string with given pattern.
* Deterministic version of [[UnixTimestamp]], must have at least one parameter.
*/
@ExpressionDescription(
usage = "_FUNC_(expr[, pattern]) - Returns the UNIX timestamp of the given time.",
extended = """
Examples:
> SELECT _FUNC_('2016-04-08', 'yyyy-MM-dd');
1460041200
""")
case class ToUnixTimestamp(
timeExp: Expression,
format: Expression,
timeZoneId: Option[String] = None)
extends UnixTime {
def this(timeExp: Expression, format: Expression) = this(timeExp, format, None)
override def left: Expression = timeExp
override def right: Expression = format
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
def this(time: Expression) = {
this(time, Literal("yyyy-MM-dd HH:mm:ss"))
}
override def prettyName: String = "to_unix_timestamp"
}
/**
* Converts time string with given pattern.
* (see [http://docs.oracle.com/javase/tutorial/i18n/format/simpleDateFormat.html])
* to Unix time stamp (in seconds), returns null if fail.
* Note that hive Language Manual says it returns 0 if fail, but in fact it returns null.
* If the second parameter is missing, use "yyyy-MM-dd HH:mm:ss".
* If no parameters provided, the first parameter will be current_timestamp.
* If the first parameter is a Date or Timestamp instead of String, we will ignore the
* second parameter.
*/
@ExpressionDescription(
usage = "_FUNC_([expr[, pattern]]) - Returns the UNIX timestamp of current or specified time.",
extended = """
Examples:
> SELECT _FUNC_();
1476884637
> SELECT _FUNC_('2016-04-08', 'yyyy-MM-dd');
1460041200
""")
case class UnixTimestamp(timeExp: Expression, format: Expression, timeZoneId: Option[String] = None)
extends UnixTime {
def this(timeExp: Expression, format: Expression) = this(timeExp, format, None)
override def left: Expression = timeExp
override def right: Expression = format
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
def this(time: Expression) = {
this(time, Literal("yyyy-MM-dd HH:mm:ss"))
}
def this() = {
this(CurrentTimestamp())
}
override def prettyName: String = "unix_timestamp"
}
abstract class UnixTime
extends BinaryExpression with TimeZoneAwareExpression with ExpectsInputTypes {
override def inputTypes: Seq[AbstractDataType] =
Seq(TypeCollection(StringType, DateType, TimestampType), StringType)
override def dataType: DataType = LongType
override def nullable: Boolean = true
private lazy val constFormat: UTF8String = right.eval().asInstanceOf[UTF8String]
private lazy val formatter: DateFormat =
try {
DateTimeUtils.newDateFormat(constFormat.toString, timeZone)
} catch {
case NonFatal(_) => null
}
override def eval(input: InternalRow): Any = {
val t = left.eval(input)
if (t == null) {
null
} else {
left.dataType match {
case DateType =>
DateTimeUtils.daysToMillis(t.asInstanceOf[Int], timeZone) / 1000L
case TimestampType =>
t.asInstanceOf[Long] / 1000000L
case StringType if right.foldable =>
if (constFormat == null || formatter == null) {
null
} else {
try {
formatter.parse(
t.asInstanceOf[UTF8String].toString).getTime / 1000L
} catch {
case NonFatal(_) => null
}
}
case StringType =>
val f = right.eval(input)
if (f == null) {
null
} else {
val formatString = f.asInstanceOf[UTF8String].toString
try {
DateTimeUtils.newDateFormat(formatString, timeZone).parse(
t.asInstanceOf[UTF8String].toString).getTime / 1000L
} catch {
case NonFatal(_) => null
}
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
left.dataType match {
case StringType if right.foldable =>
val df = classOf[DateFormat].getName
if (formatter == null) {
ExprCode("", "true", ctx.defaultValue(dataType))
} else {
val formatterName = ctx.addReferenceObj("formatter", formatter, df)
val eval1 = left.genCode(ctx)
ev.copy(code = s"""
${eval1.code}
boolean ${ev.isNull} = ${eval1.isNull};
${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)};
if (!${ev.isNull}) {
try {
${ev.value} = $formatterName.parse(${eval1.value}.toString()).getTime() / 1000L;
} catch (java.text.ParseException e) {
${ev.isNull} = true;
}
}""")
}
case StringType =>
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
nullSafeCodeGen(ctx, ev, (string, format) => {
s"""
try {
${ev.value} = $dtu.newDateFormat($format.toString(), $tz)
.parse($string.toString()).getTime() / 1000L;
} catch (java.lang.IllegalArgumentException e) {
${ev.isNull} = true;
} catch (java.text.ParseException e) {
${ev.isNull} = true;
}
"""
})
case TimestampType =>
val eval1 = left.genCode(ctx)
ev.copy(code = s"""
${eval1.code}
boolean ${ev.isNull} = ${eval1.isNull};
${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)};
if (!${ev.isNull}) {
${ev.value} = ${eval1.value} / 1000000L;
}""")
case DateType =>
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
val eval1 = left.genCode(ctx)
ev.copy(code = s"""
${eval1.code}
boolean ${ev.isNull} = ${eval1.isNull};
${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)};
if (!${ev.isNull}) {
${ev.value} = $dtu.daysToMillis(${eval1.value}, $tz) / 1000L;
}""")
}
}
}
/**
* Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
* representing the timestamp of that moment in the current system time zone in the given
* format. If the format is missing, using format like "1970-01-01 00:00:00".
* Note that hive Language Manual says it returns 0 if fail, but in fact it returns null.
*/
@ExpressionDescription(
usage = "_FUNC_(unix_time, format) - Returns `unix_time` in the specified `format`.",
extended = """
Examples:
> SELECT _FUNC_(0, 'yyyy-MM-dd HH:mm:ss');
1970-01-01 00:00:00
""")
case class FromUnixTime(sec: Expression, format: Expression, timeZoneId: Option[String] = None)
extends BinaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(sec: Expression, format: Expression) = this(sec, format, None)
override def left: Expression = sec
override def right: Expression = format
override def prettyName: String = "from_unixtime"
def this(unix: Expression) = {
this(unix, Literal("yyyy-MM-dd HH:mm:ss"))
}
override def dataType: DataType = StringType
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] = Seq(LongType, StringType)
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
private lazy val constFormat: UTF8String = right.eval().asInstanceOf[UTF8String]
private lazy val formatter: DateFormat =
try {
DateTimeUtils.newDateFormat(constFormat.toString, timeZone)
} catch {
case NonFatal(_) => null
}
override def eval(input: InternalRow): Any = {
val time = left.eval(input)
if (time == null) {
null
} else {
if (format.foldable) {
if (constFormat == null || formatter == null) {
null
} else {
try {
UTF8String.fromString(formatter.format(
new java.util.Date(time.asInstanceOf[Long] * 1000L)))
} catch {
case NonFatal(_) => null
}
}
} else {
val f = format.eval(input)
if (f == null) {
null
} else {
try {
UTF8String.fromString(DateTimeUtils.newDateFormat(f.toString, timeZone)
.format(new java.util.Date(time.asInstanceOf[Long] * 1000L)))
} catch {
case NonFatal(_) => null
}
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val df = classOf[DateFormat].getName
if (format.foldable) {
if (formatter == null) {
ExprCode("", "true", "(UTF8String) null")
} else {
val formatterName = ctx.addReferenceObj("formatter", formatter, df)
val t = left.genCode(ctx)
ev.copy(code = s"""
${t.code}
boolean ${ev.isNull} = ${t.isNull};
${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)};
if (!${ev.isNull}) {
try {
${ev.value} = UTF8String.fromString($formatterName.format(
new java.util.Date(${t.value} * 1000L)));
} catch (java.lang.IllegalArgumentException e) {
${ev.isNull} = true;
}
}""")
}
} else {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
nullSafeCodeGen(ctx, ev, (seconds, f) => {
s"""
try {
${ev.value} = UTF8String.fromString($dtu.newDateFormat($f.toString(), $tz).format(
new java.util.Date($seconds * 1000L)));
} catch (java.lang.IllegalArgumentException e) {
${ev.isNull} = true;
}"""
})
}
}
}
/**
* Returns the last day of the month which the date belongs to.
*/
@ExpressionDescription(
usage = "_FUNC_(date) - Returns the last day of the month which the date belongs to.",
extended = """
Examples:
> SELECT _FUNC_('2009-01-12');
2009-01-31
""")
case class LastDay(startDate: Expression) extends UnaryExpression with ImplicitCastInputTypes {
override def child: Expression = startDate
override def inputTypes: Seq[AbstractDataType] = Seq(DateType)
override def dataType: DataType = DateType
override def nullSafeEval(date: Any): Any = {
DateTimeUtils.getLastDayOfMonth(date.asInstanceOf[Int])
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, sd => s"$dtu.getLastDayOfMonth($sd)")
}
override def prettyName: String = "last_day"
}
/**
* Returns the first date which is later than startDate and named as dayOfWeek.
* For example, NextDay(2015-07-27, Sunday) would return 2015-08-02, which is the first
* Sunday later than 2015-07-27.
*
* Allowed "dayOfWeek" is defined in [[DateTimeUtils.getDayOfWeekFromString]].
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(start_date, day_of_week) - Returns the first date which is later than `start_date` and named as indicated.",
extended = """
Examples:
> SELECT _FUNC_('2015-01-14', 'TU');
2015-01-20
""")
// scalastyle:on line.size.limit
case class NextDay(startDate: Expression, dayOfWeek: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = startDate
override def right: Expression = dayOfWeek
override def inputTypes: Seq[AbstractDataType] = Seq(DateType, StringType)
override def dataType: DataType = DateType
override def nullable: Boolean = true
override def nullSafeEval(start: Any, dayOfW: Any): Any = {
val dow = DateTimeUtils.getDayOfWeekFromString(dayOfW.asInstanceOf[UTF8String])
if (dow == -1) {
null
} else {
val sd = start.asInstanceOf[Int]
DateTimeUtils.getNextDateForDayOfWeek(sd, dow)
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (sd, dowS) => {
val dateTimeUtilClass = DateTimeUtils.getClass.getName.stripSuffix("$")
val dayOfWeekTerm = ctx.freshName("dayOfWeek")
if (dayOfWeek.foldable) {
val input = dayOfWeek.eval().asInstanceOf[UTF8String]
if ((input eq null) || DateTimeUtils.getDayOfWeekFromString(input) == -1) {
s"""
|${ev.isNull} = true;
""".stripMargin
} else {
val dayOfWeekValue = DateTimeUtils.getDayOfWeekFromString(input)
s"""
|${ev.value} = $dateTimeUtilClass.getNextDateForDayOfWeek($sd, $dayOfWeekValue);
""".stripMargin
}
} else {
s"""
|int $dayOfWeekTerm = $dateTimeUtilClass.getDayOfWeekFromString($dowS);
|if ($dayOfWeekTerm == -1) {
| ${ev.isNull} = true;
|} else {
| ${ev.value} = $dateTimeUtilClass.getNextDateForDayOfWeek($sd, $dayOfWeekTerm);
|}
""".stripMargin
}
})
}
override def prettyName: String = "next_day"
}
/**
* Adds an interval to timestamp.
*/
case class TimeAdd(start: Expression, interval: Expression, timeZoneId: Option[String] = None)
extends BinaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(start: Expression, interval: Expression) = this(start, interval, None)
override def left: Expression = start
override def right: Expression = interval
override def toString: String = s"$left + $right"
override def sql: String = s"${left.sql} + ${right.sql}"
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, CalendarIntervalType)
override def dataType: DataType = TimestampType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(start: Any, interval: Any): Any = {
val itvl = interval.asInstanceOf[CalendarInterval]
DateTimeUtils.timestampAddInterval(
start.asInstanceOf[Long], itvl.months, itvl.microseconds, timeZone)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, (sd, i) => {
s"""$dtu.timestampAddInterval($sd, $i.months, $i.microseconds, $tz)"""
})
}
}
/**
* Given a timestamp, which corresponds to a certain time of day in UTC, returns another timestamp
* that corresponds to the same time of day in the given timezone.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(timestamp, timezone) - Given a timestamp, which corresponds to a certain time of day in UTC, returns another timestamp that corresponds to the same time of day in the given timezone.",
extended = """
Examples:
> SELECT from_utc_timestamp('2016-08-31', 'Asia/Seoul');
2016-08-31 09:00:00
""")
// scalastyle:on line.size.limit
case class FromUTCTimestamp(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, StringType)
override def dataType: DataType = TimestampType
override def prettyName: String = "from_utc_timestamp"
override def nullSafeEval(time: Any, timezone: Any): Any = {
DateTimeUtils.fromUTCTime(time.asInstanceOf[Long],
timezone.asInstanceOf[UTF8String].toString)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
if (right.foldable) {
val tz = right.eval()
if (tz == null) {
ev.copy(code = s"""
|boolean ${ev.isNull} = true;
|long ${ev.value} = 0;
""".stripMargin)
} else {
val tzTerm = ctx.freshName("tz")
val utcTerm = ctx.freshName("utc")
val tzClass = classOf[TimeZone].getName
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
ctx.addMutableState(tzClass, tzTerm, s"""$tzTerm = $dtu.getTimeZone("$tz");""")
ctx.addMutableState(tzClass, utcTerm, s"""$utcTerm = $dtu.getTimeZone("UTC");""")
val eval = left.genCode(ctx)
ev.copy(code = s"""
|${eval.code}
|boolean ${ev.isNull} = ${eval.isNull};
|long ${ev.value} = 0;
|if (!${ev.isNull}) {
| ${ev.value} = $dtu.convertTz(${eval.value}, $utcTerm, $tzTerm);
|}
""".stripMargin)
}
} else {
defineCodeGen(ctx, ev, (timestamp, format) => {
s"""$dtu.fromUTCTime($timestamp, $format.toString())"""
})
}
}
}
/**
* Subtracts an interval from timestamp.
*/
case class TimeSub(start: Expression, interval: Expression, timeZoneId: Option[String] = None)
extends BinaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(start: Expression, interval: Expression) = this(start, interval, None)
override def left: Expression = start
override def right: Expression = interval
override def toString: String = s"$left - $right"
override def sql: String = s"${left.sql} - ${right.sql}"
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, CalendarIntervalType)
override def dataType: DataType = TimestampType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(start: Any, interval: Any): Any = {
val itvl = interval.asInstanceOf[CalendarInterval]
DateTimeUtils.timestampAddInterval(
start.asInstanceOf[Long], 0 - itvl.months, 0 - itvl.microseconds, timeZone)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, (sd, i) => {
s"""$dtu.timestampAddInterval($sd, 0 - $i.months, 0 - $i.microseconds, $tz)"""
})
}
}
/**
* Returns the date that is num_months after start_date.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(start_date, num_months) - Returns the date that is `num_months` after `start_date`.",
extended = """
Examples:
> SELECT _FUNC_('2016-08-31', 1);
2016-09-30
""")
// scalastyle:on line.size.limit
case class AddMonths(startDate: Expression, numMonths: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = startDate
override def right: Expression = numMonths
override def inputTypes: Seq[AbstractDataType] = Seq(DateType, IntegerType)
override def dataType: DataType = DateType
override def nullSafeEval(start: Any, months: Any): Any = {
DateTimeUtils.dateAddMonths(start.asInstanceOf[Int], months.asInstanceOf[Int])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, (sd, m) => {
s"""$dtu.dateAddMonths($sd, $m)"""
})
}
override def prettyName: String = "add_months"
}
/**
* Returns number of months between dates date1 and date2.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(timestamp1, timestamp2) - Returns number of months between `timestamp1` and `timestamp2`.",
extended = """
Examples:
> SELECT _FUNC_('1997-02-28 10:30:00', '1996-10-30');
3.94959677
""")
// scalastyle:on line.size.limit
case class MonthsBetween(date1: Expression, date2: Expression, timeZoneId: Option[String] = None)
extends BinaryExpression with TimeZoneAwareExpression with ImplicitCastInputTypes {
def this(date1: Expression, date2: Expression) = this(date1, date2, None)
override def left: Expression = date1
override def right: Expression = date2
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, TimestampType)
override def dataType: DataType = DoubleType
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Option(timeZoneId))
override def nullSafeEval(t1: Any, t2: Any): Any = {
DateTimeUtils.monthsBetween(t1.asInstanceOf[Long], t2.asInstanceOf[Long], timeZone)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val tz = ctx.addReferenceMinorObj(timeZone)
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
defineCodeGen(ctx, ev, (l, r) => {
s"""$dtu.monthsBetween($l, $r, $tz)"""
})
}
override def prettyName: String = "months_between"
}
/**
* Given a timestamp, which corresponds to a certain time of day in the given timezone, returns
* another timestamp that corresponds to the same time of day in UTC.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(timestamp, timezone) - Given a timestamp, which corresponds to a certain time of day in the given timezone, returns another timestamp that corresponds to the same time of day in UTC.",
extended = """
Examples:
> SELECT _FUNC_('2016-08-31', 'Asia/Seoul');
2016-08-30 15:00:00
""")
// scalastyle:on line.size.limit
case class ToUTCTimestamp(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(TimestampType, StringType)
override def dataType: DataType = TimestampType
override def prettyName: String = "to_utc_timestamp"
override def nullSafeEval(time: Any, timezone: Any): Any = {
DateTimeUtils.toUTCTime(time.asInstanceOf[Long],
timezone.asInstanceOf[UTF8String].toString)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
if (right.foldable) {
val tz = right.eval()
if (tz == null) {
ev.copy(code = s"""
|boolean ${ev.isNull} = true;
|long ${ev.value} = 0;
""".stripMargin)
} else {
val tzTerm = ctx.freshName("tz")
val utcTerm = ctx.freshName("utc")
val tzClass = classOf[TimeZone].getName
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
ctx.addMutableState(tzClass, tzTerm, s"""$tzTerm = $dtu.getTimeZone("$tz");""")
ctx.addMutableState(tzClass, utcTerm, s"""$utcTerm = $dtu.getTimeZone("UTC");""")
val eval = left.genCode(ctx)
ev.copy(code = s"""
|${eval.code}
|boolean ${ev.isNull} = ${eval.isNull};
|long ${ev.value} = 0;
|if (!${ev.isNull}) {
| ${ev.value} = $dtu.convertTz(${eval.value}, $tzTerm, $utcTerm);
|}
""".stripMargin)
}
} else {
defineCodeGen(ctx, ev, (timestamp, format) => {
s"""$dtu.toUTCTime($timestamp, $format.toString())"""
})
}
}
}
/**
* Parses a column to a date based on the given format.
*/
@ExpressionDescription(
usage = """
_FUNC_(date_str[, fmt]) - Parses the `date_str` expression with the `fmt` expression to
a date. Returns null with invalid input. By default, it follows casting rules to a date if
the `fmt` is omitted.
""",
extended = """
Examples:
> SELECT _FUNC_('2009-07-30 04:17:52');
2009-07-30
> SELECT _FUNC_('2016-12-31', 'yyyy-MM-dd');
2016-12-31
""")
case class ParseToDate(left: Expression, format: Option[Expression], child: Expression)
extends RuntimeReplaceable {
def this(left: Expression, format: Expression) {
this(left, Option(format),
Cast(Cast(UnixTimestamp(left, format), TimestampType), DateType))
}
def this(left: Expression) = {
// backwards compatability
this(left, None, Cast(left, DateType))
}
override def flatArguments: Iterator[Any] = Iterator(left, format)
override def sql: String = {
if (format.isDefined) {
s"$prettyName(${left.sql}, ${format.get.sql})"
} else {
s"$prettyName(${left.sql})"
}
}
override def prettyName: String = "to_date"
}
/**
* Parses a column to a timestamp based on the supplied format.
*/
@ExpressionDescription(
usage = """
_FUNC_(timestamp[, fmt]) - Parses the `timestamp` expression with the `fmt` expression to
a timestamp. Returns null with invalid input. By default, it follows casting rules to
a timestamp if the `fmt` is omitted.
""",
extended = """
Examples:
> SELECT _FUNC_('2016-12-31 00:12:00');
2016-12-31 00:12:00
> SELECT _FUNC_('2016-12-31', 'yyyy-MM-dd');
2016-12-31 00:00:00
""")
case class ParseToTimestamp(left: Expression, format: Option[Expression], child: Expression)
extends RuntimeReplaceable {
def this(left: Expression, format: Expression) = {
this(left, Option(format), Cast(UnixTimestamp(left, format), TimestampType))
}
def this(left: Expression) = this(left, None, Cast(left, TimestampType))
override def flatArguments: Iterator[Any] = Iterator(left, format)
override def sql: String = {
if (format.isDefined) {
s"$prettyName(${left.sql}, ${format.get.sql})"
} else {
s"$prettyName(${left.sql})"
}
}
override def prettyName: String = "to_timestamp"
override def dataType: DataType = TimestampType
}
/**
* Returns date truncated to the unit specified by the format.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(date, fmt) - Returns `date` with the time portion of the day truncated to the unit specified by the format model `fmt`.",
extended = """
Examples:
> SELECT _FUNC_('2009-02-12', 'MM');
2009-02-01
> SELECT _FUNC_('2015-10-27', 'YEAR');
2015-01-01
""")
// scalastyle:on line.size.limit
case class TruncDate(date: Expression, format: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = date
override def right: Expression = format
override def inputTypes: Seq[AbstractDataType] = Seq(DateType, StringType)
override def dataType: DataType = DateType
override def nullable: Boolean = true
override def prettyName: String = "trunc"
private lazy val truncLevel: Int =
DateTimeUtils.parseTruncLevel(format.eval().asInstanceOf[UTF8String])
override def eval(input: InternalRow): Any = {
val level = if (format.foldable) {
truncLevel
} else {
DateTimeUtils.parseTruncLevel(format.eval().asInstanceOf[UTF8String])
}
if (level == -1) {
// unknown format
null
} else {
val d = date.eval(input)
if (d == null) {
null
} else {
DateTimeUtils.truncDate(d.asInstanceOf[Int], level)
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val dtu = DateTimeUtils.getClass.getName.stripSuffix("$")
if (format.foldable) {
if (truncLevel == -1) {
ev.copy(code = s"""
boolean ${ev.isNull} = true;
${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)};""")
} else {
val d = date.genCode(ctx)
ev.copy(code = s"""
${d.code}
boolean ${ev.isNull} = ${d.isNull};
${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)};
if (!${ev.isNull}) {
${ev.value} = $dtu.truncDate(${d.value}, $truncLevel);
}""")
}
} else {
nullSafeCodeGen(ctx, ev, (dateVal, fmt) => {
val form = ctx.freshName("form")
s"""
int $form = $dtu.parseTruncLevel($fmt);
if ($form == -1) {
${ev.isNull} = true;
} else {
${ev.value} = $dtu.truncDate($dateVal, $form);
}
"""
})
}
}
}
/**
* Returns the number of days from startDate to endDate.
*/
@ExpressionDescription(
usage = "_FUNC_(endDate, startDate) - Returns the number of days from `startDate` to `endDate`.",
extended = """
Examples:
> SELECT _FUNC_('2009-07-31', '2009-07-30');
1
> SELECT _FUNC_('2009-07-30', '2009-07-31');
-1
""")
case class DateDiff(endDate: Expression, startDate: Expression)
extends BinaryExpression with ImplicitCastInputTypes {
override def left: Expression = endDate
override def right: Expression = startDate
override def inputTypes: Seq[AbstractDataType] = Seq(DateType, DateType)
override def dataType: DataType = IntegerType
override def nullSafeEval(end: Any, start: Any): Any = {
end.asInstanceOf[Int] - start.asInstanceOf[Int]
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (end, start) => s"$end - $start")
}
}
| setjet/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/datetimeExpressions.scala | Scala | apache-2.0 | 45,536 |
package kmeans
import scala.annotation.tailrec
import scala.collection.{GenMap, GenSeq, _}
import scala.util.Random
import org.scalameter._
import common._
class KMeans {
def generatePoints(k: Int, num: Int): Seq[Point] = {
val randx = new Random(1)
val randy = new Random(3)
val randz = new Random(5)
(0 until num)
.map({ i =>
val x = ((i + 1) % k) * 1.0 / k + randx.nextDouble() * 0.5
val y = ((i + 5) % k) * 1.0 / k + randy.nextDouble() * 0.5
val z = ((i + 7) % k) * 1.0 / k + randz.nextDouble() * 0.5
new Point(x, y, z)
}).to[mutable.ArrayBuffer]
}
def initializeMeans(k: Int, points: Seq[Point]): Seq[Point] = {
val rand = new Random(7)
(0 until k).map(_ => points(rand.nextInt(points.length))).to[mutable.ArrayBuffer]
}
def findClosest(p: Point, means: GenSeq[Point]): Point = {
assert(means.size > 0)
var minDistance = p.squareDistance(means(0))
var closest = means(0)
var i = 1
while (i < means.length) {
val distance = p.squareDistance(means(i))
if (distance < minDistance) {
minDistance = distance
closest = means(i)
}
i += 1
}
closest
}
def classify(points: GenSeq[Point], means: GenSeq[Point]): GenMap[Point, GenSeq[Point]] = {
if (means.isEmpty) GenMap[Point, GenSeq[Point]]()
else {
var map = points.map { p =>
(findClosest(p, means), p)
}.groupBy(_._1).map(x => (x._1, x._2.map(y => y._2))).toMap
means.filterNot(x => map.contains(x)).foreach(x => map +=(x, GenSeq[Point]()))
map
}
}
def findAverage(oldMean: Point, points: GenSeq[Point]): Point = if (points.length == 0) oldMean else {
var x = 0.0
var y = 0.0
var z = 0.0
points.seq.foreach { p =>
x += p.x
y += p.y
z += p.z
}
new Point(x / points.length, y / points.length, z / points.length)
}
def update(classified: GenMap[Point, GenSeq[Point]], oldMeans: GenSeq[Point]): GenSeq[Point] = {
oldMeans.map { om =>
val points = classified(om)
val (sumX, sumY, sumZ) = points.map(x => (x.x, x.y, x.z))
.fold((0.0, 0.0, 0.0))( (a, b) =>
(a._1 + b._1, a._2 + b._2, a._3 + b._3)
)
new Point(sumX / points.size, sumY / points.size, sumZ / points.size)
}
}
def converged(eta: Double)(oldMeans: GenSeq[Point], newMeans: GenSeq[Point]): Boolean = {
for(i <- (0 until oldMeans.size)){
if(oldMeans(i).squareDistance(newMeans(i)) > eta) return false
}
true
}
@tailrec
final def kMeans(points: GenSeq[Point], means: GenSeq[Point], eta: Double): GenSeq[Point] = {
val pointsMap = classify(points, means)
val newMeans = update(pointsMap, means)
if (!converged(eta)(means, newMeans)) kMeans(points, newMeans, eta) // your implementation need to be tail recursive
else newMeans
}
}
/** Describes one point in three-dimensional space.
*
* Note: deliberately uses reference equality.
*/
class Point(val x: Double, val y: Double, val z: Double) {
private def square(v: Double): Double = v * v
def squareDistance(that: Point): Double = {
square(that.x - x) + square(that.y - y) + square(that.z - z)
}
private def round(v: Double): Double = (v * 100).toInt / 100.0
override def toString = s"(${round(x)}, ${round(y)}, ${round(z)})"
}
object KMeansRunner {
val standardConfig = config(
Key.exec.minWarmupRuns -> 20,
Key.exec.maxWarmupRuns -> 40,
Key.exec.benchRuns -> 25,
Key.verbose -> true
) withWarmer(new Warmer.Default)
def main(args: Array[String]) {
val kMeans = new KMeans()
val numPoints = 500000
val eta = 0.01
val k = 32
val points = kMeans.generatePoints(k, numPoints)
val means = kMeans.initializeMeans(k, points)
val seqtime = standardConfig measure {
kMeans.kMeans(points, means, eta)
}
println(s"sequential time: $seqtime ms")
val partime = standardConfig measure {
val parPoints = points.par
val parMeans = means.par
kMeans.kMeans(parPoints, parMeans, eta)
}
println(s"parallel time: $partime ms")
println(s"speedup: ${seqtime / partime}")
}
}
| alvsanand/scala_specialization_coursera | parprog1_kmeans/src/main/scala/kmeans/KMeans.scala | Scala | apache-2.0 | 4,187 |
package test
import java.util.Date
trait Currency {
val symbol: String
val name: String
}
// unfortunately Objects is not supported by json4s (or not yet)
case class Dollar() extends Currency {
val symbol = "$"
val name = "dollar"
}
case class Euro() extends Currency {
val symbol = "€"
val name = "euro"
}
trait Place
case class Address(
address: String,
town: String
) extends Place
case class GpsCoords(
latitude: Double,
north: Boolean,
longitude: Double,
east: Boolean
) extends Place
case class Receipt(
amount: Double,
currency: Currency,
when: Date,
where: Place,
what: String,
keywords: Set[String]
)
case class ReceiptsArchive(
receipts: List[Receipt],
description: Option[String] = None
)
object Dummy {
import org.json4s._
import org.json4s.native.Serialization
import org.json4s.native.Serialization.{read, write, writePretty}
def today() = new Date()
def main(args: Array[String]) {
//implicit val formats = Serialization.formats(NoTypeHints)
implicit val formats = Serialization.formats(
ShortTypeHints(
List(
classOf[Euro],
classOf[Dollar],
classOf[Address],
classOf[GpsCoords]
)
)
)
val receipts = ReceiptsArchive(
Receipt(15, Euro(), today(), Address("XIII", "Paris"), "meal", Set("food", "4work")) ::
Receipt(1, Euro(), today(), Address("I", "Paris"), "bread", Set("food")) :: Nil,
Some("2013 archives")
)
val json: String = write(receipts)
println(writePretty(receipts))
val decoded: ReceiptsArchive = read[ReceiptsArchive](json)
println(decoded)
assert(json == write(decoded))
}
}
| weimeittx/IM | runner/src/main/scala/test/Test.scala | Scala | apache-2.0 | 2,062 |
package cs.ucla.edu.bwaspark.datatype
import scala.collection.mutable.MutableList
class CigarType {
var cigarSegs: MutableList[CigarSegType] = new MutableList[CigarSegType]
var cigarStr: String = new String
}
| peterpengwei/bwa-spark-fpga | src/main/scala/cs/ucla/edu/bwaspark/datatype/CigarType.scala | Scala | gpl-2.0 | 216 |
package de.tu_berlin.impro3.scala.spatio_temporal_dynamics.parsers
import de.tu_berlin.impro3.scala.spatio_temporal_dynamics._
import model.Tweet
import java.text.SimpleDateFormat
import java.lang.{ ThreadLocal => JThreadLocal }
import java.util.Locale
import org.json4s._
import org.json4s.native.JsonMethods
/** [[Tweet]] parser for JSON formatted input. */
class JsonParser extends Parser {
val dateFormat = new JThreadLocal[SimpleDateFormat]{
override def initialValue() =
new SimpleDateFormat("EEE MMM dd HH:mm:ss ZZZZZ yyyy", Locale.US)
}
def parse(json: JValue) = for {
JString(text) <- json \ "text"
JString(createdAt) <- json \ "created_at"
time = dateFormat.get.parse(createdAt).getTime
JString(user) <- json \ "user" \ "id_str"
JArray(hashTags) <- json \ "entities" \ "hashtags"
tags = for {
tag <- hashTags
JString(text) <- tag \ "text"
} yield text.toLowerCase
gps = json \ "coordinates" \ "coordinates" match {
case JArray(JDouble(lon) :: JDouble(lat) :: Nil) => Some(lat, lon)
case _ => None
}
} yield Tweet(text, user, time, tags, gps)
/** Possibly parse a single [[Tweet]]. */
def parse(text: String) = try {
parse(JsonMethods.parse(text)) match {
case (tweet: Tweet) :: _ => Some(tweet)
case _ => None
}
} catch { case _: Exception => None }
}
| joroKr21/spatio-temporal-dynamics | impro3-ws14-scala/src/main/scala/de/tu_berlin/impro3/scala/spatio_temporal_dynamics/parsers/JsonParser.scala | Scala | apache-2.0 | 1,433 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.load
import cc.factorie.app.nlp._
import cc.factorie.app.nlp.pos.{PennPosDomain, PennPosTag}
import cc.factorie.variable.Span
//import cc.factorie.app.nlp.coref.mention.{MentionEntityType, MentionList, Mention, Entity}
import cc.factorie.app.nlp.coref._ //{Mention,Mention,MentionList,Entity}
import cc.factorie.app.nlp.phrase.{OntonotesEntityType, Phrase, OntonotesPhraseEntityType}
import scala.collection.mutable.{ListBuffer, ArrayBuffer, Map, Stack}
import scala.collection.mutable
import scala.util.control.Breaks._
class EntityKey(val name: String)
object LoadConll2011 {
//this is used when loading gold entity type annotation. If this variable is set to true, the loader
// only uses the entity type if its boundaries exactly match the boundaries of the annotated mention
val useExactEntTypeMatch = false
// to be used with test-with-gold-mention-boundaries
val autoFileFilter = new java.io.FileFilter() {
override def accept(file: java.io.File): Boolean =
file.getName.endsWith("auto_conll")
}
// to be used with test-key
val goldFileFilter = new java.io.FileFilter() {
override def accept(file: java.io.File): Boolean =
file.getName.endsWith("gold_conll")
}
@inline def unescapeBrackets(s: String) =
s match {
case "-LRB-" => "("
case "-RRB-" => ")"
case "-LSB-" => "["
case "-RSB-" => "]"
case "-LCB-" => "{"
case "-RCB-" => "}"
case _ => s
}
//(15|(43
final val copularVerbs = collection.immutable.HashSet[String]() ++ Seq("is","are","was","'m")
//val openEntity = """\\( (\\d+)""".r
val singleLineEntity = """"""
val tokenizer = """(\\(|\\||\\)|\\d+)""".r
val entityTypeTokenizer = """(\\(|[^\\)]+|\\)|)""".r
//val corefEntityTokenizer = """(\\(|[^\\)]+|\\)|)""".r
val asteriskStripper = """\\*""".r
private def tokenizeEntityType(s: String): Array[String] = {
entityTypeTokenizer.findAllIn(s).map(x => asteriskStripper.replaceAllIn(x,"")).map(_.toString).toArray
}
// disperseEntityTypes optionally gives entity type information to all things that are coreferent with something that has entity type annotation
//2 Documents in Train: 161.5 mentions/doc
def loadWithParse(f: String, loadSingletons: Boolean = true, limitNumDocuments:Int = -1, callDisperseEntityTypes:Boolean = false): Seq[Document] = {
// println("loading " + f)
val docs = ArrayBuffer[Document]()
var coref: WithinDocCoref = null
var currDoc: Document = null
var currSent: Sentence = null
var currEntId: Int = 0
var docTokInd: Int = -1
var numMentions = 0 // total number mentions in a document
val entities = Map[String, WithinDocEntity]()
var sentenceId: Int = -1
var tokenId: Int = -1
val parseStack = collection.mutable.Stack[(String,Int)]()
var currParseTree:ConstituencyParse = null
val source = scala.io.Source.fromFile(f)
var prevPhrase = ""
var prevWord = ""
val goldMentionBoundaries = new scala.collection.mutable.LinkedHashMap[Span[Section,Token],CoreferentEntityChunk]
val _spanToEntityType = new scala.collection.mutable.LinkedHashMap[Span[Section,Token],String]
var unResolvedEntityType:EntityTypeChunk = null
val openEntityStack = mutable.Stack[CoreferentEntityChunk]()
breakable { for (l <- source.getLines()) {
if (l.startsWith("#begin document ")) {
if (docs.length == limitNumDocuments) break()
val fId = l.split("[()]")(1) + "-" + l.takeRight(3)
currDoc = new Document("").setName(fId)
currDoc.getCoref
coref = currDoc.getTargetCoref // This also puts a newly created WithinDocCoref in currDoc.attr.
currDoc.annotators(classOf[Token]) = UnknownDocumentAnnotator.getClass // register that we have token boundaries
currDoc.annotators(classOf[Sentence]) = UnknownDocumentAnnotator.getClass // register that we have token boundaries
//currDoc.attr += new FileIdentifier(fId, true, fId.split("/")(0), "CoNLL")
docs += currDoc
} else if (l.startsWith("#end document")) {
coref = null
currDoc = null
currEntId = 0
_spanToEntityType.clear()
goldMentionBoundaries.clear()
openEntityStack.clear()
entities.clear()
parseStack.clear()
docTokInd = -1
sentenceId = -1
tokenId = -1
} else if (l.length == 0) {
currDoc.appendString("\\n")
parseStack.clear()
currSent = null
} else {
docTokInd += 1
val fields = l.split("\\\\s+")
val tokId = fields(2).toInt
val word = unescapeBrackets(fields(3))
currDoc.appendString(" ")
if (tokId == 0) {
currSent = new Sentence(currDoc)
currParseTree = new ConstituencyParse(currSent,0,"TOP")
prevPhrase = ""
prevWord = ""
}
val token = new Token(currSent, word)
PennPosDomain.unfreeze() //todo: factorie PennPosDomain currently contains all of the ontonotes tags. Might want to freeze this up for thread safety
token.attr += new PennPosTag(token,fields(4))
tokenId += 1
if (tokId == 0) sentenceId += 1
val entityTypeTokens = tokenizeEntityType(fields(10)).filterNot(_.isEmpty)
entityTypeTokens match {
case Array("(",entityTypeString:String,")") => _spanToEntityType.put(new TokenSpan(currSent.section,docTokInd,1).value,entityTypeString) //todo:Don't forget to change this to new span
case Array("(",entityTypeString) =>
assert(unResolvedEntityType eq null,"Nested Entity Types Found")
unResolvedEntityType = new EntityTypeChunk(entityTypeString,docTokInd)
case Array(")") =>
_spanToEntityType.put(new TokenSpan(currSent.section,unResolvedEntityType.start,docTokInd-unResolvedEntityType.start+1).value,unResolvedEntityType.entityType)
unResolvedEntityType = null
case _ =>
}
val entityLabels = fields.last.split('|').map(_.trim)
for(label <- entityLabels){
val corefTags = tokenizeEntityType(label).filterNot(l => l.isEmpty)
corefTags match {
case Array("(",entityId,")") => goldMentionBoundaries.put(new Span(currSent.section,docTokInd,1),new CoreferentEntityChunk(fields(0)+"-*"+entityId,docTokInd))
case Array("(",entityId) => openEntityStack.push(new CoreferentEntityChunk(fields(0)+"-*"+entityId,docTokInd))
case Array(entityId,")") =>
val lastOpenedEntity = openEntityStack.pop()
goldMentionBoundaries.put(new TokenSpan(currSent.section,lastOpenedEntity.mentionStart,docTokInd - lastOpenedEntity.mentionStart + 1).value,lastOpenedEntity)
case _ =>
}
}
val constituencyLabels = fields(5).split("\\\\*")
if (constituencyLabels.length >= 1 && loadSingletons) {
val bracketOpens = constituencyLabels(0)
val bracketCloses = if (constituencyLabels.length > 1) constituencyLabels(1) else ""
for (nonTerminal <- bracketOpens.split("\\\\(").drop(1)) {
parseStack.push((nonTerminal, docTokInd))
currParseTree.addChild(nonTerminal,docTokInd)
}
for (close <- bracketCloses) {
val (phrase, start) = parseStack.pop()
val parentPhrase = if(!parseStack.isEmpty) parseStack(0)._1 else ""
//if(Vector("NP","PRP","PP").contains(phrase))
currParseTree.current.setEnd(docTokInd)
if (phrase == "NP") {
val span = new TokenSpan(currDoc.asSection, start, docTokInd - start + 1)
val newMention = coref.addMention(new Phrase(span, span.tokens.indexOf(currParseTree.current.getHeadToken(docTokInd))))
numMentions += 1
currParseTree.closeLabel(docTokInd)
val entityTypesForSpan = _spanToEntityType.filterKeys(span.value.contains)
if(!entityTypesForSpan.isEmpty){
val exactMatch = entityTypesForSpan.find(entitySpan => (entitySpan._1.start == start) && (entitySpan._1.end == docTokInd) )
val exactMatchExists = exactMatch ne null
if (!useExactEntTypeMatch ||(useExactEntTypeMatch && exactMatchExists))
newMention.phrase.attr += new OntonotesPhraseEntityType(newMention.phrase, entityTypesForSpan.find(s => s._1.exists(t=> t == newMention.phrase.headToken)).getOrElse(entityTypesForSpan.head)._2,exactMatchExists)
else
newMention.phrase.attr += new OntonotesPhraseEntityType(newMention.phrase, "O",exactMatchExists)
} else
newMention.phrase.attr += new OntonotesPhraseEntityType(newMention.phrase, "O")
val entityChunkForMention = goldMentionBoundaries.getOrElse(newMention.phrase.value,new CoreferentEntityChunk(fields(0)+"-"+(-coref.mentions.size),start,true))
//Register that we have found this mention
entityChunkForMention.found = true
newMention.attr += new EntityKey(entityChunkForMention.entityId)
val corefEntity = entities.getOrElseUpdate(entityChunkForMention.entityId,coref.entityFromUniqueId(entityChunkForMention.entityId))
corefEntity += newMention
}else currParseTree.closeLabel(docTokInd)
prevPhrase = phrase
}
}
//this makes mentions for the ground truth mentions that weren't found by the NP, PRP Rules
for ((goldMentionSpan,goldMentionEntityInfo) <- goldMentionBoundaries.filter{case (mentionSpan,mentionEntityInfo) => !mentionEntityInfo.found}) {
//assert(currParseTree.current.parent.start == start,"Not in Parent")
val span = new TokenSpan(currDoc.asSection, goldMentionSpan.start, goldMentionSpan.length)
val newMention = coref.addMention(new Phrase(span, getSimpleHeadToken(span)))
val entityTypesForSpan = _spanToEntityType.filterKeys(span.value.contains)
if(!entityTypesForSpan.isEmpty){
val exactMatch = entityTypesForSpan.getOrElse(span.value,null)//.find(entitySpan => (entitySpan._1.start == start) && (entitySpan._1.end == docTokInd) )
val exactMatchExists = exactMatch ne null
if (!useExactEntTypeMatch ||(useExactEntTypeMatch && exactMatchExists))
newMention.phrase.attr += new OntonotesPhraseEntityType(newMention.phrase, entityTypesForSpan.find(s => s._1.exists(t=> t == newMention.phrase.headToken)).getOrElse(entityTypesForSpan.head)._2,exactMatchExists)
else
newMention.phrase.attr += new OntonotesPhraseEntityType(newMention.phrase, "O",exactMatchExists)
} else
newMention.phrase.attr += new OntonotesPhraseEntityType(newMention.phrase, "O")
numMentions += 1
val entityChunkForMention = goldMentionBoundaries.getOrElse(newMention.phrase.value,new CoreferentEntityChunk(fields(0)+"-"+coref.mentions.size+1,goldMentionSpan.start,true))
entityChunkForMention.found = true
newMention.attr += new EntityKey(entityChunkForMention.entityId)
val corefEntity = entities.getOrElseUpdate(entityChunkForMention.entityId,coref.entityFromUniqueId(entityChunkForMention.entityId))
corefEntity += newMention
}
prevWord = word
}
}} // closing "breakable"
if (callDisperseEntityTypes) disperseEntityTypes(docs.map(_.getTargetCoref))
source.close()
docs
}
case class CoreferentEntityChunk(entityId:String,mentionStart:Int,var found:Boolean = false)
case class EntityTypeChunk(entityType:String, start:Int)
def disperseEntityTypes(corefDocs:Seq[WithinDocCoref]):Unit = {
for(corefDoc <- corefDocs){
val entities = corefDoc.mentions.toSeq.groupBy(m => m.entity).filter(x => x._2.length > 1)
for(ent <- entities){
val entityTypes = ent._2.map(m => m.phrase.attr[OntonotesPhraseEntityType].categoryValue).filter(t => t != "O").distinct
if(entityTypes.length > 1){
// println("warning: there were coreferent mentions with different annotated entity types: " + entityTypes.mkString(" ") + "\\n" + ent._2.map(m => m.span.string).mkString(" "))
}else if(entityTypes.length == 1){
val newType = entityTypes(0)
ent._2.foreach(m => m.phrase.attr[OntonotesPhraseEntityType].target.setCategory(newType)(null))
}
}
}
}
/**This is a span-level offset. Since we don't have a dep parse, we just take the final noun in the span */
def getSimpleHeadToken(span: TokenSpan): Int = {
//val interiorNP = parseTree.current.children.find(_.label == "NP")
val toReturn = span.value.lastIndexWhere(_.posTag.categoryValue.startsWith("NN"))
//val allNP = span.value.filter(_.posTag.categoryValue.startsWith("NN")).map(_.string).toSeq
if(toReturn == -1){
span.length - 1
}else{
toReturn
}
}
}
class ConstituencyParse(val sent: Sentence,rootStart:Int,rootLabel:String){
var current = new ConstLabel(rootLabel,rootStart)
def addChild(label:String,start:Int) = {
val newChild = new ConstLabel(label,start,current)
current.children += newChild
current = newChild
}
def closeLabel(end:Int){
current.setEnd(end)
current = current.parent
}
class ConstLabel(val label:String,val start:Int,parentNode:ConstLabel = null){
val parent:ConstLabel = parentNode
val children:ArrayBuffer[ConstLabel] = new ArrayBuffer[ConstituencyParse.this.type#ConstLabel]()
var endIdx:Int = -1
var span:TokenSpan = null
def setEnd(end:Int) = {
span = new TokenSpan(sent.section,start,end - start + 1)
endIdx = end
}
def getHeadToken(docTokInd:Int):Token ={
val childNP = children.filter(_.label == "NP")
val possNP = span.tokens.find(_.posTag.intValue == PennPosDomain.posIndex)
if(possNP.isDefined && possNP.get != span.last && possNP.get.next.posTag.categoryValue.startsWith("N")) {
return possNP.get.next
}
else if(!childNP.isEmpty) childNP.head.getHeadToken(docTokInd)
else {
span.value.foreach(t=>assert(t.posTag != null))
val lastIndexOfNoun = span.value.lastIndexWhere(_.posTag.categoryValue.startsWith("NN"))
if(lastIndexOfNoun == -1 && span!=null) {
//println("** Head Error: " + span.string+" "+span.last.string)
span.last
}
else span.tokens(lastIndexOfNoun)
}
}
}
}
| hlin117/factorie | src/main/scala/cc/factorie/app/nlp/load/LoadConll2011.scala | Scala | apache-2.0 | 15,317 |
package org.jetbrains.plugins.scala.lang.psi.impl.base.types
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
/**
* @author Alexander Podkhalyuzin
* Date: 14.03.2008
*/
class ScSequenceArgImpl(node: ASTNode) extends ScalaPsiElementImpl (node) with ScSequenceArg{
override def toString: String = "SequenceArgumentType"
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScSequenceArgImpl.scala | Scala | apache-2.0 | 434 |
package com.github.karlhigley.spark.neighbors.collision
import scala.util.hashing.MurmurHash3
import org.apache.spark.mllib.linalg.SparseVector
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import com.github.karlhigley.spark.neighbors.lsh.{ BitSignature, HashTableEntry, IntSignature }
/**
* A banding collision strategy for candidate identification with Minhash
*/
private[neighbors] class BandingCollisionStrategy(
bands: Int
) extends CollisionStrategy with Serializable {
/**
* Convert hash tables into an RDD that is "collidable" using groupByKey.
* The new keys contain the hash table id, the band id, and a hashed version
* of the banded signature.
*/
def apply(hashTables: RDD[_ <: HashTableEntry[_]]): RDD[(Product, Point)] = {
val bandEntries = hashTables.flatMap(entry => {
val elements = entry.sigElements
val banded = elements.grouped(elements.size / bands).zipWithIndex
banded.map {
case (bandSig, bandNum) => {
// Arrays are mutable and can't be used in RDD keys
// Use a hash value (i.e. an int) as a substitute
val bandSigHash = MurmurHash3.arrayHash(bandSig)
val key = (entry.table, bandNum, bandSigHash).asInstanceOf[Product]
(key, (entry.id, entry.point))
}
}
})
bandEntries
}
}
| L2V/like2vec | src/prediction/src/main/scala/com/github/karlhigley/spark/neighbors/collision/BandingCollisionStrategy.scala | Scala | apache-2.0 | 1,362 |
// test shadowing of implicits by synonymous non-implicit symbols
// whether they be inherited, imported (explicitly or using a wildcard) or defined directly
class A
class B
trait S {
implicit def aToB(a: A): B = new B
}
class T1 extends S {
def x: B = {
val aToB = 3
// ok: doesn't compile, because aToB method requires 'T.this.' prefix
//aToB(new A)
// bug: compiles, using T.this.aToB,
// despite it not being accessible without a prefix
new A
}
}
object O {
implicit def aToB(a: A): B = new B
}
class T2a {
import O._
def x: B = {
val aToB = 3
// ok: doesn't compile, because aToB method requires 'T.this.' prefix
//aToB(new A)
// bug: compiles, using T.this.aToB,
// despite it not being accessible without a prefix
new A
}
}
class T2b {
import O.aToB
def x: B = {
val aToB = 3
// ok: doesn't compile, because aToB method requires 'T.this.' prefix
//aToB(new A)
// bug: compiles, using T.this.aToB,
// despite it not being accessible without a prefix
new A
}
}
class T3 {
implicit def aToB(a: A): B = new B
def x: B = {
val aToB = 3
// ok: doesn't compile, because aToB method requires 'T.this.' prefix
//aToB(new A)
// bug: compiles, using T.this.aToB,
// despite it not being accessible without a prefix
new A
}
} | felixmulder/scala | test/files/neg/t3453.scala | Scala | bsd-3-clause | 1,485 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.views
package helpers
import org.scalatest.{Matchers, WordSpec}
import play.api.data.Forms.{mapping, _}
import play.api.data.{Field, Form, FormError}
import play.api.test.Helpers._
import uk.gov.hmrc.play.MessagesSupport
import uk.gov.hmrc.play.views.html.helpers.InputRadioGroup
class InputRadioGroupSpec extends WordSpec with Matchers with MessagesSupport {
case class DummyFormData(radioValue: String)
val max = 10
def dummyForm =
Form(
mapping(
"radioValue" -> text(maxLength = max)
)(DummyFormData.apply)(DummyFormData.unapply)
)
val inputRadioGroup = new InputRadioGroup()
"@helpers.inputRadioGroup" should {
"render an option" in {
val doc = jsoupDocument(
inputRadioGroup(dummyForm("radioValue"), Seq("myValue" -> "myLabel"), '_inputClass -> "myInputClass")
)
val input = doc.getElementById("radioValue-myvalue")
input.attr("type") shouldBe "radio"
input.attr("name") shouldBe "radioValue"
input.attr("value") shouldBe "myValue"
input.attr("class") shouldBe "myInputClass"
input.parent().text() shouldBe "myLabel"
}
"render label for radio button with the correct class" in {
val doc = jsoupDocument(
inputRadioGroup(dummyForm("radioValue"), Seq("myValue" -> "myLabel"), '_labelClass -> "labelClass")
)
doc.getElementsByAttributeValue("for", "radioValue-myvalue").attr("class") shouldBe "labelClass"
}
"render multiple options" in {
val doc =
jsoupDocument(inputRadioGroup(dummyForm("radioValue"), Seq("myValue1" -> "myLabel1", "myValue2" -> "myLabel2")))
doc.getElementById("radioValue-myvalue1") should not be null
doc.getElementById("radioValue-myvalue2") should not be null
}
"render a selected option" in {
val doc = jsoupDocument(
inputRadioGroup(dummyForm.fill(DummyFormData("myValue"))("radioValue"), Seq("myValue" -> "myLabel"))
)
val input = doc.getElementById("radioValue-myvalue")
input.attr("checked") shouldBe "checked"
}
"render the radio group label" in {
val doc = jsoupDocument(
inputRadioGroup(
dummyForm("radioValue"),
Seq("myValue" -> "myLabel"),
'_legend -> "My Radio Group",
'_legendID -> "radioGroup legendID",
'_groupDivClass -> "radioGroupDiv",
'_groupClass -> "radioGroupFieldset",
'_labelClass -> "myLabelClass",
'_inputClass -> "inputClass"
)
)
val radioGroupDiv = doc.getElementsByClass("radioGroupDiv").first()
radioGroupDiv.attr("class") shouldBe "radioGroupDiv"
val radioGroupFieldset = radioGroupDiv.getElementsByTag("fieldset").first()
radioGroupFieldset.attr("class") shouldBe "radioGroupFieldset"
radioGroupFieldset.getElementsByTag("legend").first().text() shouldBe "My Radio Group"
radioGroupFieldset.getElementsByTag("legend").attr("id") shouldBe "radioGroup legendID"
radioGroupFieldset.attr("class") should not include "form-field--error"
val radioGroupField = radioGroupFieldset.getElementsByTag("label").first()
radioGroupField.attr("class") should include("myLabelClass")
radioGroupField.ownText() shouldBe "myLabel"
val radioGroupFieldInput = radioGroupFieldset.getElementsByTag("input")
radioGroupFieldInput.attr("class") shouldBe "inputClass"
}
"renders errors" in {
val field: Field = Field(
form = dummyForm,
name = "",
constraints = Seq.empty,
format = None,
errors = Seq(FormError("error.maxLength", "too long")),
value = None
)
val doc = jsoupDocument(inputRadioGroup(field, Seq("myValue" -> "myLabel"), '_inputClass -> "myInputClass"))
doc.getElementsByTag("fieldset").first().attr("class") should include("form-field--error")
doc.getElementsByClass("error-notification").first().text() shouldBe "too long"
}
}
}
| hmrc/play-ui | src/test/scala/uk/gov/hmrc/play/views/helpers/InputRadioGroupSpec.scala | Scala | apache-2.0 | 4,721 |
package com.twitter.finagle.client
import com.twitter.finagle.{Addr, Dtab}
import com.twitter.util.{Closable, Var, Witness}
import java.util.concurrent.atomic.AtomicReference
import scala.collection.mutable
/**
* For all paths registered to a given client and Dtab, the [[EndpointRegistry]]
* keeps a reference to observe changes to the current weight and endpoints. A path
* and Var[Addr] are added for a given client and Dtab by calling
* `addObservation`. A path is removed for a given client and Dtab by calling
* `removeObservation`.
*/
private[twitter] object EndpointRegistry {
private type Observation = (AtomicReference[Addr], Closable)
private type EndpointMap = mutable.Map[String, Observation]
private type DtabMap = mutable.Map[Dtab, EndpointMap]
val registry = new EndpointRegistry()
}
private[twitter] class EndpointRegistry {
import EndpointRegistry._
// synchronized on `this`
private[this] val registry = mutable.Map.empty[String, DtabMap]
/**
* Returns a map of Dtabs to a map of paths to addrs for a given client
*
* @param client Name of the client
*/
def endpoints(client: String): Map[Dtab, Map[String, Addr]] = synchronized {
registry.get(client) match {
case Some(dtabMap) =>
dtabMap.mapValues { paths =>
paths.mapValues { case (observation, _) => observation.get() }.toMap
}.toMap
case None => Map.empty
}
}
/**
* Register a collection of endpoints for a given client, Dtab, and path.
* If the path already exits for the given client and Dtab, it is replaced.
*
* @param client Name of the client
* @param dtab Dtab for this path
* @param path Path to observe endpoints for
* @param endpoints Collection of endpoints for this serverset
*/
def addObservation(
client: String,
dtab: Dtab,
path: String,
endpoints: Var[Addr]
): Unit = {
val ar: AtomicReference[Addr] = new AtomicReference()
val closable = endpoints.changes.register(Witness(ar))
val observation = (ar, closable)
synchronized {
registry.get(client) match {
case Some(dtabMap) =>
dtabMap.get(dtab) match {
case Some(dtabEntry) =>
// If the path already exists, replace it and close the observation
val prev = dtabEntry.put(path, observation)
prev.foreach { case (_, closable) => closable.close() }
case None =>
dtabMap += ((dtab, mutable.Map(path -> observation)))
}
case None =>
val endpointMap: EndpointMap = mutable.Map(path -> observation)
val dtabMap: DtabMap = mutable.Map(dtab -> endpointMap)
registry.put(client, dtabMap)
}
}
}
/**
* Deregister a dtab and path to observe for a given client. If, after removal,
* there are no paths for a dtab, remove the dtab from the client's registry
* entry. If, after removal, there are no dtabs for the client, remove the
* client from the registry.
*
* @param client Name of the client
* @param dtab Dtab to remove the path for
* @param path Path to remove observation for
*/
def removeObservation(client: String, dtab: Dtab, path: String) = synchronized {
registry.get(client).foreach { dtabEntries =>
dtabEntries.get(dtab).foreach { entry =>
entry.remove(path).foreach { case (_, closable) => closable.close() }
if (entry.isEmpty) {
dtabEntries.remove(dtab)
if (dtabEntries.isEmpty) {
registry.remove(client)
}
}
}
}
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/client/EndpointRegistry.scala | Scala | apache-2.0 | 3,598 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.nio.charset.StandardCharsets
import scala.reflect.runtime.universe.{typeTag, TypeTag}
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, ScalaReflection}
import org.apache.spark.sql.catalyst.encoders.ExamplePointUDT
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
class LiteralExpressionSuite extends SparkFunSuite with ExpressionEvalHelper {
test("null") {
checkEvaluation(Literal.create(null, BooleanType), null)
checkEvaluation(Literal.create(null, ByteType), null)
checkEvaluation(Literal.create(null, ShortType), null)
checkEvaluation(Literal.create(null, IntegerType), null)
checkEvaluation(Literal.create(null, LongType), null)
checkEvaluation(Literal.create(null, FloatType), null)
checkEvaluation(Literal.create(null, DoubleType), null)
checkEvaluation(Literal.create(null, StringType), null)
checkEvaluation(Literal.create(null, BinaryType), null)
checkEvaluation(Literal.create(null, DecimalType.USER_DEFAULT), null)
checkEvaluation(Literal.create(null, DateType), null)
checkEvaluation(Literal.create(null, TimestampType), null)
checkEvaluation(Literal.create(null, CalendarIntervalType), null)
checkEvaluation(Literal.create(null, ArrayType(ByteType, true)), null)
checkEvaluation(Literal.create(null, ArrayType(StringType, true)), null)
checkEvaluation(Literal.create(null, MapType(StringType, IntegerType)), null)
checkEvaluation(Literal.create(null, StructType(Seq.empty)), null)
}
test("default") {
checkEvaluation(Literal.default(BooleanType), false)
checkEvaluation(Literal.default(ByteType), 0.toByte)
checkEvaluation(Literal.default(ShortType), 0.toShort)
checkEvaluation(Literal.default(IntegerType), 0)
checkEvaluation(Literal.default(LongType), 0L)
checkEvaluation(Literal.default(FloatType), 0.0f)
checkEvaluation(Literal.default(DoubleType), 0.0)
checkEvaluation(Literal.default(StringType), "")
checkEvaluation(Literal.default(BinaryType), "".getBytes(StandardCharsets.UTF_8))
checkEvaluation(Literal.default(DecimalType.USER_DEFAULT), Decimal(0))
checkEvaluation(Literal.default(DecimalType.SYSTEM_DEFAULT), Decimal(0))
checkEvaluation(Literal.default(DateType), DateTimeUtils.toJavaDate(0))
checkEvaluation(Literal.default(TimestampType), DateTimeUtils.toJavaTimestamp(0L))
checkEvaluation(Literal.default(CalendarIntervalType), new CalendarInterval(0, 0L))
checkEvaluation(Literal.default(ArrayType(StringType)), Array())
checkEvaluation(Literal.default(MapType(IntegerType, StringType)), Map())
checkEvaluation(Literal.default(StructType(StructField("a", StringType) :: Nil)), Row(""))
// ExamplePointUDT.sqlType is ArrayType(DoubleType, false).
checkEvaluation(Literal.default(new ExamplePointUDT), Array())
}
test("boolean literals") {
checkEvaluation(Literal(true), true)
checkEvaluation(Literal(false), false)
checkEvaluation(Literal.create(true), true)
checkEvaluation(Literal.create(false), false)
}
test("int literals") {
List(0, 1, Int.MinValue, Int.MaxValue).foreach { d =>
checkEvaluation(Literal(d), d)
checkEvaluation(Literal(d.toLong), d.toLong)
checkEvaluation(Literal(d.toShort), d.toShort)
checkEvaluation(Literal(d.toByte), d.toByte)
checkEvaluation(Literal.create(d), d)
checkEvaluation(Literal.create(d.toLong), d.toLong)
checkEvaluation(Literal.create(d.toShort), d.toShort)
checkEvaluation(Literal.create(d.toByte), d.toByte)
}
checkEvaluation(Literal(Long.MinValue), Long.MinValue)
checkEvaluation(Literal(Long.MaxValue), Long.MaxValue)
checkEvaluation(Literal.create(Long.MinValue), Long.MinValue)
checkEvaluation(Literal.create(Long.MaxValue), Long.MaxValue)
}
test("double literals") {
List(0.0, -0.0, Double.NegativeInfinity, Double.PositiveInfinity).foreach { d =>
checkEvaluation(Literal(d), d)
checkEvaluation(Literal(d.toFloat), d.toFloat)
checkEvaluation(Literal.create(d), d)
checkEvaluation(Literal.create(d.toFloat), d.toFloat)
}
checkEvaluation(Literal(Double.MinValue), Double.MinValue)
checkEvaluation(Literal(Double.MaxValue), Double.MaxValue)
checkEvaluation(Literal(Float.MinValue), Float.MinValue)
checkEvaluation(Literal(Float.MaxValue), Float.MaxValue)
checkEvaluation(Literal.create(Double.MinValue), Double.MinValue)
checkEvaluation(Literal.create(Double.MaxValue), Double.MaxValue)
checkEvaluation(Literal.create(Float.MinValue), Float.MinValue)
checkEvaluation(Literal.create(Float.MaxValue), Float.MaxValue)
}
test("string literals") {
checkEvaluation(Literal(""), "")
checkEvaluation(Literal("test"), "test")
checkEvaluation(Literal("\\u0000"), "\\u0000")
checkEvaluation(Literal.create(""), "")
checkEvaluation(Literal.create("test"), "test")
checkEvaluation(Literal.create("\\u0000"), "\\u0000")
}
test("sum two literals") {
checkEvaluation(Add(Literal(1), Literal(1)), 2)
checkEvaluation(Add(Literal.create(1), Literal.create(1)), 2)
}
test("binary literals") {
checkEvaluation(Literal.create(new Array[Byte](0), BinaryType), new Array[Byte](0))
checkEvaluation(Literal.create(new Array[Byte](2), BinaryType), new Array[Byte](2))
checkEvaluation(Literal.create(new Array[Byte](0)), new Array[Byte](0))
checkEvaluation(Literal.create(new Array[Byte](2)), new Array[Byte](2))
}
test("decimal") {
List(-0.0001, 0.0, 0.001, 1.2, 1.1111, 5).foreach { d =>
checkEvaluation(Literal(Decimal(d)), Decimal(d))
checkEvaluation(Literal(Decimal(d.toInt)), Decimal(d.toInt))
checkEvaluation(Literal(Decimal(d.toLong)), Decimal(d.toLong))
checkEvaluation(Literal(Decimal((d * 1000L).toLong, 10, 3)),
Decimal((d * 1000L).toLong, 10, 3))
checkEvaluation(Literal(BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal(new java.math.BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal.create(Decimal(d)), Decimal(d))
checkEvaluation(Literal.create(Decimal(d.toInt)), Decimal(d.toInt))
checkEvaluation(Literal.create(Decimal(d.toLong)), Decimal(d.toLong))
checkEvaluation(Literal.create(Decimal((d * 1000L).toLong, 10, 3)),
Decimal((d * 1000L).toLong, 10, 3))
checkEvaluation(Literal.create(BigDecimal(d.toString)), Decimal(d))
checkEvaluation(Literal.create(new java.math.BigDecimal(d.toString)), Decimal(d))
}
}
private def toCatalyst[T: TypeTag](value: T): Any = {
val ScalaReflection.Schema(dataType, _) = ScalaReflection.schemaFor[T]
CatalystTypeConverters.createToCatalystConverter(dataType)(value)
}
test("array") {
def checkArrayLiteral[T: TypeTag](a: Array[T]): Unit = {
checkEvaluation(Literal(a), toCatalyst(a))
checkEvaluation(Literal.create(a), toCatalyst(a))
}
checkArrayLiteral(Array(1, 2, 3))
checkArrayLiteral(Array("a", "b", "c"))
checkArrayLiteral(Array(1.0, 4.0))
checkArrayLiteral(Array(CalendarInterval.MICROS_PER_DAY, CalendarInterval.MICROS_PER_HOUR))
val arr = collection.mutable.WrappedArray.make(Array(1.0, 4.0))
checkEvaluation(Literal(arr), toCatalyst(arr))
}
test("seq") {
def checkSeqLiteral[T: TypeTag](a: Seq[T], elementType: DataType): Unit = {
checkEvaluation(Literal.create(a), toCatalyst(a))
}
checkSeqLiteral(Seq(1, 2, 3), IntegerType)
checkSeqLiteral(Seq("a", "b", "c"), StringType)
checkSeqLiteral(Seq(1.0, 4.0), DoubleType)
checkSeqLiteral(Seq(CalendarInterval.MICROS_PER_DAY, CalendarInterval.MICROS_PER_HOUR),
CalendarIntervalType)
}
test("map") {
def checkMapLiteral[T: TypeTag](m: T): Unit = {
checkEvaluation(Literal.create(m), toCatalyst(m))
}
checkMapLiteral(Map("a" -> 1, "b" -> 2, "c" -> 3))
checkMapLiteral(Map("1" -> 1.0, "2" -> 2.0, "3" -> 3.0))
}
test("struct") {
def checkStructLiteral[T: TypeTag](s: T): Unit = {
checkEvaluation(Literal.create(s), toCatalyst(s))
}
checkStructLiteral((1, 3.0, "abcde"))
checkStructLiteral(("de", 1, 2.0f))
checkStructLiteral((1, ("fgh", 3.0)))
}
test("unsupported types (map and struct) in Literal.apply") {
def checkUnsupportedTypeInLiteral(v: Any): Unit = {
val errMsgMap = intercept[RuntimeException] {
Literal(v)
}
assert(errMsgMap.getMessage.startsWith("Unsupported literal type"))
}
checkUnsupportedTypeInLiteral(Map("key1" -> 1, "key2" -> 2))
checkUnsupportedTypeInLiteral(("mike", 29, 1.0))
}
test("SPARK-24571: char literals") {
checkEvaluation(Literal('X'), "X")
checkEvaluation(Literal.create('0'), "0")
checkEvaluation(Literal('\\u0000'), "\\u0000")
checkEvaluation(Literal.create('\\n'), "\\n")
}
test("fromString converts String/DataType input correctly") {
checkEvaluation(Literal.fromString(false.toString, BooleanType), false)
checkEvaluation(Literal.fromString(null, NullType), null)
checkEvaluation(Literal.fromString(Int.MaxValue.toByte.toString, ByteType), Int.MaxValue.toByte)
checkEvaluation(Literal.fromString(Short.MaxValue.toShort.toString, ShortType), Short.MaxValue
.toShort)
checkEvaluation(Literal.fromString(Int.MaxValue.toString, IntegerType), Int.MaxValue)
checkEvaluation(Literal.fromString(Long.MaxValue.toString, LongType), Long.MaxValue)
checkEvaluation(Literal.fromString(Float.MaxValue.toString, FloatType), Float.MaxValue)
checkEvaluation(Literal.fromString(Double.MaxValue.toString, DoubleType), Double.MaxValue)
checkEvaluation(Literal.fromString("1.23456", DecimalType(10, 5)), Decimal(1.23456))
checkEvaluation(Literal.fromString("Databricks", StringType), "Databricks")
val dateString = "1970-01-01"
checkEvaluation(Literal.fromString(dateString, DateType), java.sql.Date.valueOf(dateString))
val timestampString = "0000-01-01 00:00:00"
checkEvaluation(Literal.fromString(timestampString, TimestampType),
java.sql.Timestamp.valueOf(timestampString))
val calInterval = new CalendarInterval(1, 1)
checkEvaluation(Literal.fromString(calInterval.toString, CalendarIntervalType), calInterval)
}
}
| guoxiaolongzte/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/LiteralExpressionSuite.scala | Scala | apache-2.0 | 11,249 |
package org.jetbrains.plugins.scala
package codeInspection.typeChecking
import com.intellij.codeInspection.LocalInspectionTool
import org.jetbrains.plugins.scala.codeInspection.ScalaLightInspectionFixtureTestAdapter
/**
* Nikolay.Tropin
* 9/26/13
*/
class ComparingUnrelatedTypesInspectionTest extends ScalaLightInspectionFixtureTestAdapter {
protected def classOfInspection: Class[_ <: LocalInspectionTool] = classOf[ComparingUnrelatedTypesInspection]
protected val annotation: String = ComparingUnrelatedTypesInspection.inspectionName
def testWeakConformance() {
val text1 = s"""val a = 0
|val b: Short = 1
|${START}b == a$END"""
val text2 = s"""val a = 0
|val b = 1.0
|${START}b != a$END"""
val text3 = s"""val a = 0.0
|val b: Byte = 100
|${START}a == b$END"""
val text4 = s"${START}1 == 1.0$END"
checkTextHasNoErrors(text1)
checkTextHasNoErrors(text2)
checkTextHasNoErrors(text3)
checkTextHasNoErrors(text4)
}
def testValueTypes() {
val text1 = s"""val a = true
|val b = 1
|${START}b == a$END"""
val text2 = s"""val a = true
|val b = 0.0
|${START}a != b$END"""
val text3 = s"${START}true != 0$END"
val text4: String = s"${START}1.isInstanceOf[Boolean]$END"
checkTextHasError(text1)
checkTextHasError(text2)
checkTextHasError(text3)
checkTextHasError(text4)
}
def testString() {
val text1 = s"""val a = "a"
|val b = Array('a')
|${START}b == a$END"""
val text2 = s"""val a = "0"
|val b = 0
|${START}a == b$END"""
val text3 = s"""val s = "s"
|${START}s == 's'$END"""
val text4 = s"""val a = "a"
|val b: CharSequence = null
|${START}b != a$END"""
checkTextHasError(text1)
checkTextHasError(text2)
checkTextHasError(text3)
checkTextHasNoErrors(text4)
}
def testInheritors() {
val text1 = s"""val a = scala.collection.Iterable(1)
|val b = List(0)
|${START}b == a$END"""
val text2 = s"""case class A(i: Int)
|final class B extends A(1)
|val a: A = A(0)
|val b: B = new B
|${START}a == b$END"""
val text3 = """trait A
|object B extends A
|B.isInstanceOf[A]"""
checkTextHasNoErrors(text1)
checkTextHasNoErrors(text2)
checkTextHasNoErrors(text3)
}
def testFinal() {
val text1 = s"""case class A(i: Int)
|class B extends A(1)
|val a: A = A(0)
|val b: B = new B
|${START}a == b$END"""
val text2 = s"""final class A extends Serializable
|final class B extends Serializable
|val a: A = new A
|val b: B = new B
|${START}a == b$END"""
val text3 = s"""final class A extends Serializable
|final class B extends Serializable
|val a: A = new A
|${START}a.isInstanceOf[B]$END"""
checkTextHasNoErrors(text1)
checkTextHasError(text2)
checkTextHasError(text3)
}
def testTraits() {
val text1 = s"""trait A
|trait B
|val a: A = _
|val b: B = _
|${START}a == b$END"""
checkTextHasNoErrors(text1)
}
def testObject() {
val text1 = s"""trait A
|object B extends A
|val a: A = _
|${START}a == B$END"""
val text2 = s"""trait A
|object B extends A
|class C extends A
|val c = new C
|${START}c == B$END"""
val text3 = s"""trait A
|object B extends A
|class C extends A
|val c: A = new C
|${START}c != B$END"""
checkTextHasNoErrors(text1)
checkTextHasError(text2)
checkTextHasNoErrors(text3)
}
def testBoxedTypes() {
val text1 = """val i = new java.lang.Integer(0)
|i == 100"""
val text2 = """val b = new java.lang.Boolean(false)
|b equals true"""
val text3 = "def test(i: Integer) = if (i == null) \\"foo\\" else \\"bar\\""
checkTextHasNoErrors(text1)
checkTextHasNoErrors(text2)
checkTextHasNoErrors(text3)
}
def testExistential(): Unit = {
checkTextHasNoErrors("Seq(1).isInstanceOf[List[_])")
checkTextHasError(s"${START}Some(1).isInstanceOf[List[_]]$END")
checkTextHasNoErrors("def foo(x: Some[_]) { x == Some(1) }")
checkTextHasError(s"def foo(x: Some[_]) { ${START}x == Seq(1)$END }")
}
def testNumeric(): Unit = {
checkTextHasNoErrors("BigInt(1) == 1")
checkTextHasNoErrors("BigInt(1) == 1L")
checkTextHasNoErrors("BigInt(1) == new java.lang.Integer(1)")
checkTextHasError(s"${START}BigInt(1) == true$END")
checkTextHasError(s"${START}BigInt(1) == 1.toString$END")
}
def testTypeAlias(): Unit = {
checkTextHasNoErrors(
"""
|object A {
| type Coord = Float
| def isZero(n: Coord): Boolean = {
| n == 0
| }
|}
""".stripMargin)
checkTextHasError(
s"""
|object A {
| type Coord = String
| def isZero(n: Coord): Boolean = {
| ${START}n == 0$END
| }
|}
""".stripMargin)
checkTextHasNoErrors(
"""
|trait A {
| type Coord
|
| def isZero(n: Coord): Boolean = {
| n == 0
| }
|}
""".stripMargin)
}
def testOverridenMethods(): Unit = {
checkTextHasNoErrors(
"""
|case class Dummy(v: Int) {
| def ==(value: Int): String = v + " == " + value
| def !=(value: Int): Boolean = v != value
|}
|
|object Test {
| val a: String = Dummy(5) == 10
| val b: Boolean = Dummy(5) != 10
|}""".stripMargin)
checkTextHasError(
s"""
|case class Dummy(v: Int) {
| def ==(value: Int): String = v + " == " + value
| def !=(value: Int): Boolean = v != value
|}
|
|object Test {
| val b: Boolean = ${START}Dummy(5) eq 10$END
|}""".stripMargin)
}
def testOverridenWithImplicitParam(): Unit = {
checkTextHasError(
s"""
|class Store(val foo: Int, val bar: String)
|trait Binder[T] {
| def get(implicit store: Store): T
| def ==(other: Binder[T])(implicit store: Store) = get == other.get
| def ==(other: T)(implicit store: Store) = get == other
|}
|class FooBinder extends Binder[Int] {
| def get(implicit store: Store) = store.foo
|}
|class BarBinder extends Binder[String] {
| def get(implicit store: Store) = store.bar
|}
|
|val fooBinder = new FooBinder
|val barBinder = new BarBinder
|
|{
| implicit val store = new Store(12, ":)")
| (fooBinder == 12, fooBinder == 3, ${START}fooBinder == ":)"$END, barBinder == ":)") // (true, false, false, true)
|}
""".stripMargin
)
}
def testOverridenEquals(): Unit = {
checkTextHasError(
s"""
|case class Dummy(v: Int) {
| override def equals(other: Any): Boolean = other match {
| case Dummy(o) => o == v
| case _ => false
| }
|}
|
|object Test {
| val b: Boolean = ${START}Dummy(5) equals 10$END
|}""".stripMargin)
checkTextHasError(
s"""
|case class Dummy(v: Int) {
| override def equals(other: Any): Boolean = other match {
| case Dummy(o) => o == v
| case _ => false
| }
|}
|
|object Test {
| val b: Boolean = ${START}Dummy(5) == 10$END
|}""".stripMargin)
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/typeChecking/ComparingUnrelatedTypesInspectionTest.scala | Scala | apache-2.0 | 8,203 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job.yarn
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.samza.clustermanager.SamzaApplicationState
import org.apache.samza.config.Config
import org.apache.samza.coordinator.server.HttpServer
import org.apache.samza.coordinator.stream.CoordinatorStreamWriter
import org.apache.samza.coordinator.stream.messages.SetConfig
import org.apache.samza.metrics.ReadableMetricsRegistry
import org.apache.samza.util.Logging
import org.apache.samza.webapp.{ApplicationMasterRestServlet, ApplicationMasterWebServlet, YarnContainerHeartbeatServlet}
/**
* Samza's application master runs a very basic HTTP/JSON service to allow
* dashboards to check on the status of a job. SamzaAppMasterService starts
* up the web service when initialized.
*/
//This class is used in the refactored code path as called by run-jc.sh
class SamzaYarnAppMasterService(config: Config, samzaAppState: SamzaApplicationState, state: YarnAppState, registry: ReadableMetricsRegistry, yarnConfiguration: YarnConfiguration) extends Logging {
var rpcApp: HttpServer = null
var webApp: HttpServer = null
val SERVER_URL_OPT: String = "samza.autoscaling.server.url"
var securityManager: Option[SamzaAppMasterSecurityManager] = None
def onInit() {
// try starting the samza AM dashboard at a random rpc and tracking port
info("Starting webapp at a random rpc and tracking port")
rpcApp = new HttpServer(resourceBasePath = "scalate")
rpcApp.addServlet("/*", new ApplicationMasterRestServlet(config, samzaAppState, state, registry))
rpcApp.start
webApp = new HttpServer(resourceBasePath = "scalate")
webApp.addServlet("/*", new ApplicationMasterWebServlet(config, samzaAppState, state))
webApp.start
samzaAppState.jobModelManager.server.addServlet("/containerHeartbeat", new YarnContainerHeartbeatServlet(state, registry))
samzaAppState.jobModelManager.start
state.rpcUrl = rpcApp.getUrl
state.trackingUrl = webApp.getUrl
state.coordinatorUrl = samzaAppState.jobModelManager.server.getUrl
//write server url to coordinator stream
val coordinatorStreamWriter: CoordinatorStreamWriter = new CoordinatorStreamWriter(config)
coordinatorStreamWriter.start()
coordinatorStreamWriter.sendMessage(SetConfig.TYPE, SERVER_URL_OPT, state.coordinatorUrl.toString)
coordinatorStreamWriter.stop()
debug("Sent server url message with value: %s " format state.coordinatorUrl.toString)
info("Webapp is started at (rpc %s, tracking %s, coordinator %s)" format(state.rpcUrl, state.trackingUrl, state.coordinatorUrl))
// start YarnSecurityManger for a secure cluster
if (UserGroupInformation.isSecurityEnabled) {
securityManager = Option {
val securityManager = new SamzaAppMasterSecurityManager(config, yarnConfiguration)
securityManager.start
securityManager
}
}
}
def onShutdown() {
if (rpcApp != null) {
rpcApp.stop
}
if (webApp != null) {
webApp.stop
}
samzaAppState.jobModelManager.stop
securityManager.map {
securityManager => securityManager.stop
}
}
}
| abhishekshivanna/samza | samza-yarn/src/main/scala/org/apache/samza/job/yarn/SamzaYarnAppMasterService.scala | Scala | apache-2.0 | 4,021 |
package views.html.admin
import play.templates._
import play.templates.TemplateMagic._
import play.api.templates._
import play.api.templates.PlayMagic._
import models._
import controllers._
import play.api.i18n._
import play.api.mvc._
import play.api.data._
import views.html._
import org.bson.types.ObjectId
/**/
object adminNavbar extends BaseScalaTemplate[play.api.templates.Html,Format[play.api.templates.Html]](play.api.templates.HtmlFormat) with play.api.templates.Template1[String,play.api.templates.Html] {
/**/
def apply/*1.2*/(nav: String):play.api.templates.Html = {
_display_ {import views.ViewHelper
Seq[Any](format.raw/*1.15*/("""
"""),format.raw/*4.1*/("""
<div class="navbar navbar-inverse">
<div class="navbar-inner">
<a class="brand" href="/logout">退出</a>
<ul class="nav">
"""),_display_(Seq[Any](/*9.9*/ViewHelper/*9.19*/.showActiveNav(nav))),format.raw/*9.38*/("""
</ul>
</div>
</div>"""))}
}
def render(nav:String): play.api.templates.Html = apply(nav)
def f:((String) => play.api.templates.Html) = (nav) => apply(nav)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Wed Jul 17 08:05:00 CST 2013
SOURCE: /opt/dacOrder.git/app/views/admin/adminNavbar.scala.html
HASH: a65e57c5f83de4b60f817e0e1c9c452bda66dd23
MATRIX: 548->1|662->14|690->41|866->183|884->193|924->212
LINES: 20->1|24->1|26->4|31->9|31->9|31->9
-- GENERATED --
*/
| kandole/simple_reservation | target/scala-2.10/src_managed/main/views/html/admin/adminNavbar.template.scala | Scala | gpl-2.0 | 1,627 |
import java.io.FileWriter
import traits.Arg
object Main extends Arg {
val arq = argstext("file")
val im = SubImage(arq)
val instances = im.bodies.groupBy(_._2).toList.sortBy(_._1.hashCode()).zipWithIndex.flatMap { case ((_, seq), idx) =>
seq map { case ((x, y), _) => (x, y, idx) }
}
val classes = instances.map(_._3).distinct
val header = Seq(s"@relation $arq", s"@attribute x numeric", s"@attribute y numeric", s"@attribute class {${classes.mkString(",")}}")
val body = "@data" +: instances.map { case (x, y, l) => s"$x,$y,$l" }
val arff = header ++ body
val arq2 = arq + ".arff"
val fw = new FileWriter(arq2)
fw.write(arff.mkString("\n"))
fw.close()
println(s"$arq2 written!")
}
| davips/image2arff | src/main/scala/Main.scala | Scala | gpl-3.0 | 716 |
package com.wavesplatform.mining
import com.wavesplatform.state.{Blockchain, Diff}
import com.wavesplatform.test.FreeSpec
import com.wavesplatform.transaction.Transaction
import org.scalacheck.Gen
import org.scalamock.scalatest.PathMockFactory
class OneDimensionalMiningConstraintSuite extends FreeSpec with PathMockFactory {
"OneDimensionalMiningConstraint" - {
"should be full if the limit is 0, but not overfilled" in {
val tank = createConstConstraint(0, 1, "const")
tank.isFull shouldBe true
tank.isOverfilled shouldBe false
}
"put(transaction)" - tests { (maxTxs, txs) =>
val constraint = createConstConstraint(maxTxs, transactionSize = 1, "txSize")
txs.foldLeft(constraint)(_.put(stub[Blockchain], _, Diff.empty))
}
}
private def tests(toConstraint: (Int, List[Transaction]) => MiningConstraint): Unit = {
val dontReachLimitGen: Gen[MiningConstraint] = for {
maxTxs <- Gen.chooseNum(1, Int.MaxValue)
txNumber <- Gen.chooseNum(0, maxTxs - 1)
txs <- Gen.listOfN(math.min(txNumber, 15), randomTransactionGen)
} yield toConstraint(maxTxs, txs)
"multiple items don't reach the limit" in forAll(dontReachLimitGen) { updatedConstraint =>
updatedConstraint.isFull shouldBe false
updatedConstraint.isOverfilled shouldBe false
}
val reachSoftLimitGen: Gen[MiningConstraint] = for {
maxTxs <- Gen.chooseNum(1, 10)
txs <- Gen.listOfN(maxTxs, randomTransactionGen)
} yield toConstraint(maxTxs, txs)
"multiple items reach the limit softly" in forAll(reachSoftLimitGen) { updatedConstraint =>
updatedConstraint.isFull shouldBe true
updatedConstraint.isOverfilled shouldBe false
}
val reachHardLimitGen: Gen[MiningConstraint] = for {
maxTxs <- Gen.chooseNum(1, 10)
txNumber <- Gen.chooseNum(maxTxs + 1, maxTxs + 10)
txs <- Gen.listOfN(txNumber, randomTransactionGen)
} yield toConstraint(maxTxs, txs)
"multiple items reach the limit with gap" in forAll(reachHardLimitGen) { updatedConstraint =>
updatedConstraint.isFull shouldBe true
updatedConstraint.isOverfilled shouldBe true
}
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/mining/OneDimensionalMiningConstraintSuite.scala | Scala | mit | 2,188 |
package org.jetbrains.plugins.scala.lang.psi.impl.search
import com.intellij.openapi.application.QueryExecutorBase
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.search.searches.ClassInheritorsSearch
import com.intellij.psi.search.searches.ClassInheritorsSearch.SearchParameters
import com.intellij.psi.search.{LocalSearchScope, PsiSearchScopeUtil, SearchScope}
import com.intellij.util.Processor
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, inReadAction}
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScTemplateDefinition, ScTypeDefinition}
/**
* @author Nikolay.Tropin
*/
class ScalaLocalInheritorsSearcher extends QueryExecutorBase[PsiClass, ClassInheritorsSearch.SearchParameters] {
override def processQuery(params: SearchParameters, consumer: Processor[_ >: PsiClass]): Unit = {
val clazz = params.getClassToProcess
val (_, virtualFiles) = params.getScope match {
case local: LocalSearchScope if clazz.isInstanceOf[ScalaPsiElement] => (local, local.getVirtualFiles)
case _ => return
}
val project = clazz.getProject
for (virtualFile <- virtualFiles) {
ProgressManager.checkCanceled()
var continue = true
inReadAction {
if (continue) {
val psiFile: PsiFile = PsiManager.getInstance(project).findFile(virtualFile)
if (psiFile != null) {
psiFile.depthFirst().foreach {
case td: ScTemplateDefinition if continue =>
if (td.isInheritor(clazz, deep = true) && checkCandidate(td, params))
continue = consumer.process(td)
case _ =>
}
}
}
}
}
}
private def checkCandidate(candidate: PsiClass, parameters: ClassInheritorsSearch.SearchParameters): Boolean = {
val searchScope: SearchScope = parameters.getScope
ProgressManager.checkCanceled()
if (!PsiSearchScopeUtil.isInScope(searchScope, candidate)) false
else candidate match {
case _: ScNewTemplateDefinition => true
case td: ScTypeDefinition => parameters.getNameCondition.value(td.name)
}
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/search/ScalaLocalInheritorsSearcher.scala | Scala | apache-2.0 | 2,316 |
package com.googlecode.kanbanik.commands
import org.apache.shiro.SecurityUtils
import com.googlecode.kanbanik.builders.UserBuilder
import com.googlecode.kanbanik.model.User
import org.apache.shiro.subject.Subject
import com.googlecode.kanbanik.dtos.{SessionDto, ErrorDto, UserDto, EmptyDto}
class GetCurrentUserCommand extends Command[SessionDto, UserDto] {
override def execute(params: SessionDto, user: User): Either[UserDto, ErrorDto] = {
val sessionId = params.sessionId
if (sessionId.isDefined) {
val user = new Subject.Builder().sessionId(sessionId.get).buildSubject
if (user.isAuthenticated) {
val userPrincipal = user.getPrincipal.asInstanceOf[User]
// refresh from DB
Left(UserBuilder.buildDto(User.byId(userPrincipal.name), sessionId.getOrElse("")))
} else {
Left(UserBuilder.buildDto(User.unlogged, ""))
}
} else {
Left(UserBuilder.buildDto(User.unlogged, ""))
}
}
} | gudtago/kanbanik | kanbanik-server/src/main/scala/com/googlecode/kanbanik/commands/GetCurrentUserCommand.scala | Scala | apache-2.0 | 967 |
package uk.gov.gds.ier.transaction.crown.address
import uk.gov.gds.ier.test.FormTestSuite
import uk.gov.gds.ier.model.{PartialManualAddress, Addresses, PartialAddress}
class AddressFormTests
extends FormTestSuite
with AddressForms {
behavior of "AddressForms.addressForm"
it should "successfully bind a valid address" in {
val js = Json.toJson(
Map(
"address.uprn" -> "12345678",
"address.postcode" -> "SW1A1AA"
)
)
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors)),
success => {
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.uprn should be(Some("12345678"))
address.get.postcode should be("SW1A1AA")
}
)
}
it should "successfully bind a valid manual input address" in {
val js = Json.toJson(
Map(
"address.manualAddress.lineOne" -> "Unit 4, Elgar Business Centre",
"address.manualAddress.lineTwo" -> "Moseley Road",
"address.manualAddress.lineThree" -> "Hallow",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A1AA"
)
)
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors)),
success => {
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.manualAddress should be(Some(
PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")
))
)
address.get.postcode should be("SW1A1AA")
}
)
}
it should "error out on empty json" in {
val js = JsNull
addressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("Please answer this question"))
hasErrors.errorMessages("address") should be(Seq("Please answer this question"))
},
success => fail("Should have errored out")
)
}
it should "error out on empty values" in {
val js = Json.toJson(
Map(
"address.address" -> "",
"address.postcode" -> ""
)
)
addressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("Please answer this question"))
hasErrors.errorMessages("address") should be(Seq("Please answer this question"))
},
success => fail("Should have errored out")
)
}
it should "error out on empty values in manual address" in {
val js = Json.toJson(
Map(
"address.manualAddress" -> "",
"address.postcode" -> ""
)
)
addressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("Please answer this question"))
hasErrors.errorMessages("address") should be(Seq("Please answer this question"))
},
success => fail("Should have errored out")
)
}
it should "successfully bind possible Address list" in {
val possibleAddress = PartialAddress(addressLine = Some("123 Fake Street"),
uprn = Some("12345678"),
postcode = "AB12 3CD",
manualAddress = None)
val possibleAddressJS = serialiser.toJson(Addresses(List(possibleAddress)))
val js = Json.toJson(
Map(
"address.uprn" -> "12345678",
"address.postcode" -> "SW1A 1AA",
"possibleAddresses.jsonList" -> possibleAddressJS,
"possibleAddresses.postcode" -> "SW1A 1AA"
)
)
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors.prettyPrint)),
success => {
success.address.isDefined should be(true)
val Some(address) = success.address
success.possibleAddresses.isDefined should be(true)
val Some(possibleAddresses) = success.possibleAddresses
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
if (lastUkAddress.isDefined) {
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.uprn should be(Some("12345678"))
address.get.postcode should be("SW1A 1AA")
}
possibleAddresses.jsonList.addresses should be(List(possibleAddress))
}
)
}
it should "error out if it looks like you haven't selected your address" in {
val possibleAddress = PartialAddress(
addressLine = Some("123 Fake Street"),
uprn = Some("12345678"),
postcode = "AB12 3CD",
manualAddress = None
)
val possibleAddressJS = serialiser.toJson(Addresses(List(possibleAddress)))
val js = Json.toJson(
Map(
"address.postcode" -> "SW1A 1AA",
"possibleAddresses.jsonList" -> possibleAddressJS,
"possibleAddresses.postcode" -> "SW1A 1AA"
)
)
addressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(4)
hasErrors.errorMessages("address.uprn") should be(
Seq("Please select your address")
)
hasErrors.errorMessages("address.manualAddress") should be(
Seq("Please select your address")
)
hasErrors.globalErrorMessages should be(Seq("Please select your address"))
},
success => {
fail("Should have errored out")
}
)
}
it should "not error if you haven't selected your address but there is a manual address" in {
val possibleAddress = PartialAddress(
addressLine = Some("123 Fake Street"),
uprn = Some("12345678"),
postcode = "AB12 3CD",
manualAddress = None
)
val possibleAddressJS = serialiser.toJson(Addresses(List(possibleAddress)))
val js = Json.toJson(
Map(
"address.manualAddress.lineOne" -> "Unit 4, Elgar Business Centre",
"address.manualAddress.lineTwo" -> "Moseley Road",
"address.manualAddress.lineThree" -> "Hallow",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A 1AA",
"possibleAddresses.jsonList" -> possibleAddressJS,
"possibleAddresses.postcode" -> "SW1A 1AA"
)
)
addressForm.bind(js).fold(
hasErrors => fail("Should not fail"),
success => {
success.address.isDefined should be(true)
val Some(address) = success.address
success.possibleAddresses.isDefined should be(true)
val Some(possibleAddresses) = success.possibleAddresses
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
if (lastUkAddress.isDefined) {
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.manualAddress should be(Some(
PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")
))
)
address.get.postcode should be("SW1A 1AA")
}
possibleAddresses.jsonList.addresses should be(List(possibleAddress))
}
)
}
it should "not error out with empty text" in {
val js = Json.toJson(
Map(
"address.uprn" -> "87654321",
"address.postcode" -> "SW1A 1AA",
"possibleAddresses.jsonList" -> "",
"possibleAddresses.postcode" -> ""
)
)
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors.prettyPrint)),
success => {
success.address.isDefined should be(true)
val Some(address) = success.address
success.possibleAddresses.isDefined should be(false)
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
if (lastUkAddress.isDefined) {
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.uprn should be(Some("87654321"))
address.get.postcode should be("SW1A 1AA")
}
}
)
}
behavior of "AddressForms.lookupForm"
it should "succeed on valid postcode" in {
val js = Json.toJson(
Map(
"address.postcode" -> "SW1A 1AA"
)
)
lookupAddressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors.prettyPrint)),
success => {
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
if (lastUkAddress.isDefined) {
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.postcode should be("SW1A 1AA")
address.get.uprn should be(None)
address.get.manualAddress should be(None)
address.get.addressLine should be (None)
}
}
)
}
it should "fail out on no postcode" in {
val js = Json.toJson(Map("address.postcode" -> ""))
lookupAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.errorMessages("address.postcode") should be(
Seq("Please enter your postcode")
)
},
success => fail("Should have failed out")
)
}
it should "fail out on empty json" in {
val js = JsNull
lookupAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.errorMessages("address.postcode") should be(
Seq("Please enter your postcode")
)
},
success => fail("Should have failed out")
)
}
it should "fail out on missing values" in {
val js = Json.toJson(Map("" -> ""))
lookupAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.errorMessages("address.postcode") should be(
Seq("Please enter your postcode")
)
},
success => fail("Should have failed out")
)
}
behavior of "AddressForms.manualAddressForm"
it should "succeed on valid input" in {
val js = Json.toJson(
Map(
"address.manualAddress.lineOne" -> "Unit 4, Elgar Business Centre",
"address.manualAddress.lineTwo" -> "Moseley Road",
"address.manualAddress.lineThree" -> "Hallow",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A1AA"
)
)
manualAddressForm.bind(js).fold(
hasErrors => fail(hasErrors.errorsAsTextAll),
success => {
success.address.isDefined should be(true)
val lastUkAddress = success.address
lastUkAddress.isDefined should be(true)
val address = lastUkAddress.get.address
address.isDefined should be(true)
address.get.manualAddress should be(Some(
PartialManualAddress(
lineOne = Some("Unit 4, Elgar Business Centre"),
lineTwo = Some("Moseley Road"),
lineThree = Some("Hallow"),
city = Some("Worcester")
))
)
address.get.postcode should be("SW1A1AA")
}
)
}
it should "error out on empty values for manual address" in {
val js = Json.toJson(
Map(
"address.manualAddress" -> "",
"address.postcode" -> "SW1A 1AA"
)
)
manualAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("Please answer this question"))
hasErrors.errorMessages("address") should be(
Seq("Please answer this question")
)
},
success => fail("Should have errored out")
)
}
it should "error out on empty json for manual address" in {
val js = JsNull
manualAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("Please answer this question"))
hasErrors.errorMessages("address") should be(
Seq("Please answer this question")
)
},
success => fail("Should have errored out")
)
}
it should "error out on all empty values for manual address" in {
val js = Json.toJson(Map(
"address.manualAddress.lineOne" -> "",
"address.manualAddress.lineTwo" -> "",
"address.manualAddress.lineThree" -> "",
"address.manualAddress.city" -> "",
"address.postcode" -> "SW1A 1AA"
))
manualAddressForm.bind(js).fold(
hasErrors => {
hasErrors.keyedErrorsAsMap should matchMap(Map(
"address" -> Seq("Please answer this question")
))
},
success => fail("Should have errored out")
)
}
it should "error out on all empty lines for manual address" in {
val js = Json.toJson(Map(
"address.manualAddress.lineOne" -> "",
"address.manualAddress.lineTwo" -> "",
"address.manualAddress.lineThree" -> "",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A 1AA"
))
manualAddressForm.bind(js).fold(
hasErrors => {
hasErrors.keyedErrorsAsMap should matchMap(Map(
"address.manualAddress" -> Seq("At least one address line is required")
))
},
success => fail("Should have errored out")
)
}
it should "successfully bind when lineOne is not empty" in {
val js = Json.toJson(Map(
"address.manualAddress.lineOne" -> "line one",
"address.manualAddress.lineTwo" -> "",
"address.manualAddress.lineThree" -> "",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A1AA"
))
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors)),
success => {
val Some(lastUkAddress) = success.address
val Some(partialAddress) = lastUkAddress.address
val Some(manualAddress) = partialAddress.manualAddress
manualAddress should be(PartialManualAddress(
lineOne = Some("line one"),
lineTwo = None,
lineThree = None,
city = Some("Worcester")
))
partialAddress.postcode should be("SW1A1AA")
}
)
}
it should "successfully bind when lineTwo is not empty" in {
val js = Json.toJson(Map(
"address.manualAddress.lineOne" -> "",
"address.manualAddress.lineTwo" -> "line two",
"address.manualAddress.lineThree" -> "",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A1AA"
))
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors)),
success => {
val Some(lastUkAddress) = success.address
val Some(partialAddress) = lastUkAddress.address
val Some(manualAddress) = partialAddress.manualAddress
manualAddress should be(PartialManualAddress(
lineOne = None,
lineTwo = Some("line two"),
lineThree = None,
city = Some("Worcester")
))
partialAddress.postcode should be("SW1A1AA")
}
)
}
it should "successfully bind when lineThree is not empty" in {
val js = Json.toJson(Map(
"address.manualAddress.lineOne" -> "",
"address.manualAddress.lineTwo" -> "",
"address.manualAddress.lineThree" -> "line three",
"address.manualAddress.city" -> "Worcester",
"address.postcode" -> "SW1A1AA"
))
addressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors)),
success => {
val Some(lastUkAddress) = success.address
val Some(partialAddress) = lastUkAddress.address
val Some(manualAddress) = partialAddress.manualAddress
manualAddress should be(PartialManualAddress(
lineOne = None,
lineTwo = None,
lineThree = Some("line three"),
city = Some("Worcester")
))
partialAddress.postcode should be("SW1A1AA")
}
)
}
}
| alphagov/ier-frontend | test/uk/gov/gds/ier/transaction/crown/address/AddressFormTests.scala | Scala | mit | 16,648 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.ChangeLog
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 07/11/17.
*/
/**
* Change Log Repository
* @param session
* @param executionContext
*/
class ChangeLogRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.ChangeLogRepository[ChangeLog , Int]
with ChangeLogMapping {
def getById(id: Int): Future[ChangeLog] = {
Future(run(queryChangeLog.filter(_.changeLogId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[ChangeLog] = {
Future(run(queryChangeLog.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByChangeLogId(id : Int) : Future[List[ChangeLog]] = {
Future(run(queryChangeLog))
}
def getAll() : Future[List[ChangeLog]] = {
Future(run(queryChangeLog))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[ChangeLog]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countChangeLog()
elements <- if (offset > count) Future.successful(Nil)
else selectChangeLog(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countChangeLog() = {
Future(run(queryChangeLog.size).toInt)
}
private def selectChangeLog(offset: Int, limit: Int): Future[Seq[ChangeLog]] = {
Future(run(queryChangeLog).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/ChangeLogRepository.scala | Scala | gpl-3.0 | 2,718 |
package scalaxy.dsl
import scala.language.experimental.macros
import scala.reflect.macros.blackbox.Context
import scala.collection.GenTraversableOnce
import scala.collection.generic.CanBuildFrom
import scala.collection.breakOut
import scala.reflect.runtime.{ universe => ru }
object ReifiedFilterMonadicMacros {
def reifyFunction[A : c.WeakTypeTag, B : c.WeakTypeTag]
(c: Context)
(f: c.Expr[A => B])
: c.Expr[ReifiedFunction[A, B]] =
{
import c.universe._
val tf = c.typecheck(f.tree)
var definedSymbols = Set[Symbol]()
var referredSymbols = Set[Symbol]()
new Traverser {
override def traverse(tree: Tree) {
val sym = tree.symbol
if (sym != NoSymbol) {
tree match {
case _: DefTree =>
definedSymbols += sym
case Ident(_) =>
referredSymbols += sym
case _: RefTree =>
c.warning(
tree.pos,
s"Maybe an unsupported reference type: $tree (${showRaw(tree)})")
case _ =>
}
}
super.traverse(tree)
}
}.traverse(tf)
val capturedSymbols: Map[Symbol, String] =
(
for (capturedSymbol <- (referredSymbols -- definedSymbols)) yield {
capturedSymbol -> c.freshName(capturedSymbol.name.toString)
}
).toMap
val ttf = c.Expr[A => B](
new Transformer {
object CapturedSymbol {
def unapply(tree: Tree) = tree match {
case Ident(_) =>
capturedSymbols.get(tree.symbol).map(Some(_)).getOrElse(None)
case _ =>
None
}
}
override def transform(tree: Tree): Tree = tree match {
case CapturedSymbol(newName) =>
Ident(TermName(newName))
case _ =>
super.transform(tree)
}
}.transform(tf)
)
val capturesExpr = c.Expr[Map[String, () => Any]](
Apply(
reify({ Map }).tree,
for ((capturedSymbol, newName) <- capturedSymbols.toList) yield {
val s = c.literal(newName)
val v = c.Expr[Any](Ident(capturedSymbol))
reify((s.splice, () => v.splice)).tree
}
)
)
val reifiedTree = c.Expr[ru.Expr[ru.Tree]](c.reifyTree(
treeBuild.mkRuntimeUniverseRef,
EmptyTree,
ttf.tree
))
reify({
new ReifiedFunction(
ttf.splice,
capturesExpr.splice,
reifiedTree.splice.tree.asInstanceOf[ru.Function]
)
})
}
def foreachImpl[A, Repr, U](c: Context)(f: c.Expr[A => U]): c.Expr[Unit] =
{
import c.universe._
val reifiedFunction = reifyFunction(c)(f)
val colExpr = c.prefix.asInstanceOf[c.Expr[ReifiedFilterMonadic[A, Repr]]]
reify({
val col = colExpr.splice
col.reifiedForeach(reifiedFunction.splice, col.reifiedFilters)
})
}
def withFilterImpl[A, Repr](c: Context)(f: c.Expr[A => Boolean])
: c.Expr[ReifiedFilterMonadic[A, Repr]] =
{
import c.universe._
val reifiedFunction = reifyFunction(c)(f)
val colExpr = c.prefix.asInstanceOf[c.Expr[ReifiedFilterMonadic[A, Repr]]]
reify({
val col = colExpr.splice
col.withFilters(col.reifiedFilters :+ reifiedFunction.splice): ReifiedFilterMonadic[A, Repr]
})
}
def flatMapImpl[A, Repr, B, That]
(c: Context)
(f: c.Expr[A => GenTraversableOnce[B]])
(bf: c.Expr[CanBuildFrom[Repr, B, That]])
: c.Expr[That] =
{
import c.universe._
val reifiedFunction = reifyFunction(c)(f)
val colExpr = c.prefix.asInstanceOf[c.Expr[ReifiedFilterMonadic[A, Repr]]]
reify({
val col = colExpr.splice
col.reifiedFlatMap(reifiedFunction.splice, col.reifiedFilters)(bf.splice)
})
}
}
| nativelibs4java/Scalaxy | DSL/ReifiedFilterMonadicMacros.scala | Scala | bsd-3-clause | 3,851 |
package com.catinthedark.gban
import com.catinthedark.gban.network.{NetworkServerControl, NetworkClientControl, NetworkControl}
/**
* Created by over on 18.04.15.
*/
class Shared0(
val serverAddress: String
) {
val networkControl = if (serverAddress != null) {
new NetworkClientControl(serverAddress)
} else {
new NetworkServerControl()
}
var networkControlThread: Thread = null
}
| cat-in-the-dark/old48_34_game | src/main/scala/com/catinthedark/gban/Shared0.scala | Scala | mit | 403 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fb
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.resources.ResourceManagerWrapper
import org.orbeon.oxf.util.XPath
import org.orbeon.saxon.dom4j.DocumentWrapper
import org.orbeon.saxon.om.{DocumentInfo, NodeInfo}
import org.orbeon.scaxon.XML._
import org.orbeon.oxf.fr.FormRunner.orbeonRoles
trait PermissionsOps {
/** Loads the form-builder-permissions.xml. For code called from XForms, that instance is loaded in
* fr/includes/permissions-model.xml.
*/
def fbRoles: DocumentInfo = {
val supportedPaths = List("/config/form-builder-permissions.xml",
"/config/form-runner-roles.xml")
val resourceManager = ResourceManagerWrapper.instance
val documentOpt = supportedPaths.collectFirst{case key if resourceManager.exists(key) ⇒ resourceManager.getContentAsDOM4J(key)}
val document = documentOpt.getOrElse(throw new OXFException("Can't find configuration for Form Builder permissions"))
new DocumentWrapper(document, null, XPath.GlobalConfiguration)
}
// Whether, given permissions specified in XML, the user has Form Builder access to the given app/form
def hasAdminPermissionsFor(fbPermissions: NodeInfo, app: String, form: String): Boolean = {
def findAppElement(a: String) = fbPermissions \ "app" find (_ \@ "name" === a)
def findFormElement(e: NodeInfo, f: String) = e \ "form" find (_ \@ "name" === f)
def matchesApp(a: String) = findAppElement(a).isDefined
def matchesForm(e: NodeInfo, f: String) = findFormElement(e, f).isDefined
matchesApp("*") || (matchesApp(app) && (matchesForm(findAppElement(app).get, "*") || matchesForm(findAppElement(app).get, form)))
}
private def findConfiguredRoles(formBuilderRoles: NodeInfo) =
formBuilderRoles.root \ * \ "role"
// For XForms callers
// Result document contains a tree structure of apps and forms if roles are configured
def formBuilderPermissionsAsXML(formBuilderRoles: NodeInfo): NodeInfo = {
// NOTE: Whether in container or header mode, roles are parsed into the Orbeon-Roles header at this point
if (findConfiguredRoles(formBuilderRoles).isEmpty)
<apps has-roles="false"/>
else
<apps has-roles="true">{
formBuilderPermissions(formBuilderRoles, orbeonRoles).to[List].sortBy(_._1) map { case (app, forms) ⇒
<app name={app}>{ forms.to[List].sorted map { form ⇒ <form name={form}/> } }</app>
}
}</apps>
}
def formBuilderPermissions(formBuilderRoles: NodeInfo, incomingRoleNames: Set[String]): Map[String, Set[String]] = {
val configuredRoles = findConfiguredRoles(formBuilderRoles)
if (configuredRoles.isEmpty) {
// No role configured
Map()
} else {
// Roles configured
val allConfiguredRoleNames = configuredRoles map (_.attValue("name")) toSet
val applicableRoleNames = allConfiguredRoleNames & incomingRoleNames
val applicableRoles = configuredRoles filter (e ⇒ (applicableRoleNames + "*")(e.attValue("name")))
val applicableAppNames = applicableRoles map (_.attValue("app")) toSet
if (applicableAppNames("*")) {
// User has access to all apps (and therefore all forms)
Map("*" → Set("*"))
} else {
// User has access to certain apps only
(for {
app ← applicableAppNames
forms = {
val applicableFormsForApp = applicableRoles filter (_.attValue("app") == app) map (_.attValue("form")) toSet
if (applicableFormsForApp("*")) Set("*") else applicableFormsForApp
}
} yield
app → forms) toMap
}
}
}
}
| wesley1001/orbeon-forms | src/main/scala/org/orbeon/oxf/fb/PermissionsOps.scala | Scala | lgpl-2.1 | 4,325 |
/*
* Copyright (c) 2016, Team Mion
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Copyright (c) 2016, Team Mion
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package io.teammion.morefood.util
import java.util
import net.minecraft.inventory.IInventory
import net.minecraft.item.ItemStack
/**
* Provides easy iteration over IInventory
*
* @author Stefan Wimmer <[email protected]>
*/
class InventoryIterator(inv : IInventory)
extends util.Iterator[ItemStack]
{
private var i : Int = -1
/**
* Iterator has more elements
* @return "true" if correct
*/
override def hasNext : Boolean =
i < inv.getSizeInventory - 1
/**
* Gets next element and add 1 to index
* @return Next element
*/
override def next() : ItemStack =
{
i += 1
inv.getStackInSlot(i)
}
/**
* Get index of last element
* @return Index of last element
*/
def getIndex : Int =
i
/**
* Get inventory
* @return Inventory
*/
def getInventory : IInventory =
inv
/**
* Will call function for each element remaining
* @param fn Function to call
*/
def forEach(fn : (ItemStack, InventoryIterator) => Unit) : Unit =
{
val itr : InventoryIterator = new InventoryIterator(inv)
while (itr.hasNext)
fn(itr.next(), itr)
}
}
| teammion/tm-morefood | src/main/scala/io/teammion/morefood/util/InventoryIterator.scala | Scala | isc | 2,818 |
/**
* Copyright (c) 2002-2014, OnPoint Digital, Inc. All rights reserved
*
* THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* @author Alex Westphal 09/Jun/2014
* @version 09/Jun/2014
*/
package timez.syntax.time
import java.time.ZoneId
trait ZoneIdOps extends Ops[ZoneId] {
def id = self.getId
def rules = self.getRules
}
trait ZoneIdSyntax {
implicit def ToZoneIdOps(zoneId: ZoneId) = new ZoneIdOps {
override def self = zoneId
}
}
| alexwestphal/timez | src/main/scala/timez/syntax/time/ZoneIdSyntax.scala | Scala | bsd-3-clause | 890 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute, UnresolvedFunction, UnresolvedRelation, UnresolvedStar}
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, Cast, NamedExpression}
import org.apache.spark.sql.catalyst.plans.Inner
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.execution.{ProjectExec, SparkSqlParser, SubqueryExec}
import org.apache.spark.sql.execution.command.ProjectForDeleteCommand
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.carbondata.core.constants.CarbonCommonConstants
/**
* Insert into carbon table from other source
*/
object CarbonPreInsertionCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
plan.transform {
// Wait until children are resolved.
case p: LogicalPlan if !p.childrenResolved => p
case p@InsertIntoTable(relation: LogicalRelation, _, child, _, _)
if relation.relation.isInstanceOf[CarbonDatasourceHadoopRelation] =>
castChildOutput(p, relation.relation.asInstanceOf[CarbonDatasourceHadoopRelation], child)
}
}
def castChildOutput(p: InsertIntoTable,
relation: CarbonDatasourceHadoopRelation,
child: LogicalPlan)
: LogicalPlan = {
if (relation.carbonRelation.output.size > CarbonCommonConstants
.DEFAULT_MAX_NUMBER_OF_COLUMNS) {
sys
.error("Maximum supported column by carbon is:" + CarbonCommonConstants
.DEFAULT_MAX_NUMBER_OF_COLUMNS
)
}
if (child.output.size >= relation.carbonRelation.output.size) {
val newChildOutput = child.output.zipWithIndex.map { columnWithIndex =>
columnWithIndex._1 match {
case attr: Alias =>
Alias(attr.child, s"col${ columnWithIndex._2 }")(attr.exprId)
case attr: Attribute =>
Alias(attr, s"col${ columnWithIndex._2 }")(NamedExpression.newExprId)
case attr => attr
}
}
val newChild: LogicalPlan = if (newChildOutput == child.output) {
p.child
} else {
Project(newChildOutput, child)
}
InsertIntoCarbonTable(relation, p.partition, newChild, p.overwrite, p.ifNotExists)
} else {
sys.error("Cannot insert into target table because column number are different")
}
}
}
case class CarbonIUDAnalysisRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
private val parser = new SparkSqlParser(sparkSession.sessionState.conf)
private def processUpdateQuery(
table: UnresolvedRelation,
columns: List[String],
selectStmt: String,
filter: String): LogicalPlan = {
var includedDestColumns = false
var includedDestRelation = false
var addedTupleId = false
def prepareTargetReleation(relation: UnresolvedRelation): SubqueryAlias = {
val tupleId = UnresolvedAlias(Alias(UnresolvedFunction("getTupleId",
Seq.empty, isDistinct = false), "tupleId")())
val projList = Seq(
UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq))), tupleId)
// include tuple id and rest of the required columns in subqury
SubqueryAlias(table.alias.getOrElse(""),
Project(projList, relation), Option(table.tableIdentifier))
}
// get the un-analyzed logical plan
val targetTable = prepareTargetReleation(table)
val selectPlan = parser.parsePlan(selectStmt) transform {
case Project(projectList, child) if !includedDestColumns =>
includedDestColumns = true
if (projectList.size != columns.size) {
sys.error("Number of source and destination columns are not matching")
}
val renamedProjectList = projectList.zip(columns).map{ case(attr, col) =>
attr match {
case UnresolvedAlias(child22, _) =>
UnresolvedAlias(Alias(child22, col + "-updatedColumn")())
case UnresolvedAttribute(param) =>
UnresolvedAlias(Alias(attr, col + "-updatedColumn")())
// UnresolvedAttribute(col + "-updatedColumn")
// UnresolvedAlias(Alias(child, col + "-updatedColumn")())
case _ => attr
}
}
val list = Seq(
UnresolvedAlias(UnresolvedStar(Option(table.alias.toSeq)))) ++ renamedProjectList
Project(list, child)
case Filter(cond, child) if !includedDestRelation =>
includedDestRelation = true
Filter(cond, Join(child, targetTable, Inner, None))
case r @ UnresolvedRelation(t, a) if !includedDestRelation && t != table.tableIdentifier =>
includedDestRelation = true
Join(r, targetTable, Inner, None)
}
val updatedSelectPlan : LogicalPlan = if (!includedDestRelation) {
// special case to handle self join queries
// Eg. update tableName SET (column1) = (column1+1)
selectPlan transform {
case relation: UnresolvedRelation
if table.tableIdentifier == relation.tableIdentifier && !addedTupleId =>
addedTupleId = true
targetTable
}
} else {
selectPlan
}
val finalPlan = if (filter.length > 0) {
val alias = table.alias.getOrElse("")
var transformed: Boolean = false
// Create a dummy projection to include filter conditions
var newPlan: LogicalPlan = null
if (table.tableIdentifier.database.isDefined) {
newPlan = parser.parsePlan("select * from " +
table.tableIdentifier.database.getOrElse("") + "." +
table.tableIdentifier.table + " " + alias + " " + filter)
}
else {
newPlan = parser.parsePlan("select * from " +
table.tableIdentifier.table + " " + alias + " " + filter)
}
newPlan transform {
case UnresolvedRelation(t, Some(a))
if !transformed && t == table.tableIdentifier && a == alias =>
transformed = true
// Add the filter condition of update statement on destination table
SubqueryAlias(alias, updatedSelectPlan, Option(table.tableIdentifier))
}
} else {
updatedSelectPlan
}
val tid = CarbonTableIdentifierImplicit.toTableIdentifier(Seq(table.tableIdentifier.toString()))
val tidSeq = Seq(getDB.getDatabaseName(tid.database, sparkSession))
val destinationTable = UnresolvedRelation(table.tableIdentifier, table.alias)
ProjectForUpdate(destinationTable, columns, Seq(finalPlan))
}
def processDeleteRecordsQuery(selectStmt: String, table: UnresolvedRelation): LogicalPlan = {
val tidSeq = Seq(getDB.getDatabaseName(table.tableIdentifier.database, sparkSession),
table.tableIdentifier.table)
var addedTupleId = false
val parsePlan = parser.parsePlan(selectStmt)
val selectPlan = parsePlan transform {
case relation: UnresolvedRelation
if table.tableIdentifier == relation.tableIdentifier && !addedTupleId =>
addedTupleId = true
val tupleId = UnresolvedAlias(Alias(UnresolvedFunction("getTupleId",
Seq.empty, isDistinct = false), "tupleId")())
val alias = table.alias match {
case Some(alias) => Some(table.alias.toSeq)
case _ => None
}
val projList = Seq(
UnresolvedAlias(UnresolvedStar(alias)), tupleId)
// include tuple id in subqury
Project(projList, relation)
}
ProjectForDeleteCommand(
selectPlan,
tidSeq,
System.currentTimeMillis().toString)
}
override def apply(logicalplan: LogicalPlan): LogicalPlan = {
logicalplan transform {
case UpdateTable(t, cols, sel, where) => processUpdateQuery(t, cols, sel, where)
case DeleteRecords(statement, table) => processDeleteRecordsQuery(statement, table)
}
}
}
| aniketadnaik/carbondataStreamIngest | integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala | Scala | apache-2.0 | 8,731 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.async.ws
import scala.collection.mutable
import io.gatling.commons.stats.{ KO, OK }
import io.gatling.commons.util.ClockSingleton.nowMillis
import io.gatling.commons.validation.Success
import io.gatling.core.stats.StatsEngine
import io.gatling.http.action.async._
import io.gatling.http.ahc.HttpEngine
import io.gatling.http.check.async._
import akka.actor.Props
import org.asynchttpclient.ws.WebSocket
object WsActor {
def props(wsName: String, statsEngine: StatsEngine, httpEngine: HttpEngine) =
Props(new WsActor(wsName, statsEngine, httpEngine))
}
class WsActor(wsName: String, statsEngine: StatsEngine, httpEngine: HttpEngine) extends AsyncProtocolActor(statsEngine) {
private def goToOpenState(webSocket: WebSocket): NextTxBasedBehaviour =
tx => openState(webSocket, tx)
def receive = initialState
val initialState: Receive = {
case OnOpen(tx, webSocket, time) =>
import tx._
logger.debug(s"Websocket '$wsName' open")
val newSession = session.set(wsName, self)
val newTx = tx.copy(session = newSession)
check match {
case None =>
logResponse(session, requestName, OK, start, time)
context.become(openState(webSocket, newTx))
next ! newSession
case Some(c) =>
// hack, reset check so that there's no pending one
setCheck(newTx.copy(check = None), requestName, c, next, newSession, goToOpenState(webSocket))
}
case OnFailedOpen(tx, message, end) =>
import tx._
logger.debug(s"Websocket '$wsName' failed to open: $message")
logResponse(session, requestName, KO, start, end, Some(message))
next ! session.markAsFailed
context.stop(self)
}
def openState(webSocket: WebSocket, tx: AsyncTx): Receive = {
def handleClose(status: Int, reason: String, time: Long): Unit = {
if (tx.protocol.wsPart.reconnect)
if (tx.protocol.wsPart.maxReconnects.exists(_ <= tx.reconnectCount))
handleCrash(s"Websocket '$wsName' was unexpectedly closed with status $status and message $reason and max reconnect was reached", time)
else
disconnectedState(status, reason, tx)
else
handleCrash(s"Websocket '$wsName' was unexpectedly closed with status $status and message $reason", time)
}
def handleCrash(message: String, time: Long): Unit = {
tx.check.foreach { check =>
logResponse(tx.session, tx.requestName, KO, tx.start, time, Some(message))
}
context.become(crashedState(tx, message))
}
{
case Send(requestName, message, check, next, session) =>
logger.debug(s"Sending message check on WebSocket '$wsName': $message")
val now = nowMillis
check match {
case Some(c) =>
// do this immediately instead of self sending a Listen message
// so that other messages don't get a chance to be handled before
setCheck(tx, requestName + " Check", c, next, session, goToOpenState(webSocket))
case _ => reconciliate(tx, next, session, goToOpenState(webSocket))
}
message match {
case TextMessage(text) => webSocket.sendTextFrame(text)
case BinaryMessage(bytes) => webSocket.sendBinaryFrame(bytes)
}
logResponse(session, requestName, OK, now, now)
case SetCheck(requestName, check, next, session) =>
logger.debug(s"Setting check on WebSocket '$wsName'")
setCheck(tx, requestName, check, next, session, goToOpenState(webSocket))
case CancelCheck(requestName, next, session) =>
logger.debug(s"Cancelling check on WebSocket '$wsName'")
val newTx = tx
.applyUpdates(session)
.copy(check = None, pendingCheckSuccesses = Nil)
context.become(openState(webSocket, newTx))
next ! newTx.session
case CheckTimeout(check) =>
logger.debug(s"Check on WebSocket '$wsName' timed out")
tx.check match {
case Some(`check`) =>
check.expectation match {
case ExpectedCount(count) if count == tx.pendingCheckSuccesses.size =>
succeedPendingCheck(tx, tx.pendingCheckSuccesses, goToOpenState(webSocket))
case ExpectedRange(range) if range.contains(tx.pendingCheckSuccesses.size) =>
succeedPendingCheck(tx, tx.pendingCheckSuccesses, goToOpenState(webSocket))
case _ =>
val newTx = failPendingCheck(tx, "Check failed: Timeout")
context.become(openState(webSocket, newTx))
if (check.blocking)
// release blocked session
newTx.next ! newTx.applyUpdates(newTx.session).session
}
case _ =>
// ignore outdated timeout
}
case OnTextMessage(message, time) =>
logger.debug(s"Received text message on websocket '$wsName':$message")
tx.check.foreach { check =>
implicit val cache = mutable.Map.empty[Any, Any]
check.check(message, tx.session) match {
case Success(result) =>
val results = result :: tx.pendingCheckSuccesses
check.expectation match {
case UntilCount(count) if count == results.length =>
succeedPendingCheck(tx, results, goToOpenState(webSocket))
case _ =>
// let's pile up
val newTx = tx.copy(pendingCheckSuccesses = results)
context.become(openState(webSocket, newTx))
}
case _ =>
}
}
case OnByteMessage(message, time) =>
logger.debug(s"Received byte message on websocket '$wsName':$message. Beware, byte message checks are currently not supported")
case Reconciliate(requestName, next, session) =>
logger.debug(s"Reconciliating websocket '$wsName'")
reconciliate(tx, next, session, goToOpenState(webSocket))
case Close(requestName, next, session) =>
logger.debug(s"Closing websocket '$wsName'")
webSocket.sendCloseFrame()
val newTx = failPendingCheck(tx, "Check didn't succeed by the time the websocket was asked to closed")
.applyUpdates(session)
.copy(requestName = requestName, start = nowMillis, next = next)
context.become(closingState(newTx))
case OnClose(status, reason, time) =>
logger.debug(s"Websocket '$wsName' closed by the server")
// this close order wasn't triggered by the client, otherwise, we would have received a Close first and state would be closing or stopped
// FIXME what about pending checks?
handleClose(status, reason, time)
case unexpected =>
logger.info(s"Discarding unknown message $unexpected while in open state")
}
}
def closingState(tx: AsyncTx): Receive = {
case m: OnClose =>
import tx._
logResponse(session, requestName, OK, start, nowMillis)
next ! session.remove(wsName)
context.stop(self)
case unexpected =>
logger.info(s"Discarding unknown message $unexpected while in closing state")
}
def disconnectedState(status: Int, reason: String, tx: AsyncTx): Receive = {
case action: WsUserAction =>
// reconnect on first client message tentative
val newTx = tx.copy(reconnectCount = tx.reconnectCount + 1)
WsTx.start(newTx, self, httpEngine, statsEngine)
context.become(reconnectingState(status, reason, action))
case unexpected =>
// FIXME we're losing check timeout!
logger.info(s"Discarding unknown message $unexpected while in disconnected state")
}
def reconnectingState(status: Int, reason: String, pendingAction: WsUserAction): Receive = {
case OnOpen(tx, webSocket, _) =>
context.become(openState(webSocket, tx))
self ! pendingAction
case OnFailedOpen(tx, message, _) =>
context.become(crashedState(tx, s"Websocket '$wsName' originally crashed with status $status and message $message and failed to reconnect: $message"))
self ! pendingAction
case unexpected =>
// FIXME we're losing check timeout!
logger.info(s"Discarding unknown message $unexpected while in reconnecting state")
}
def crashedState(tx: AsyncTx, error: String): Receive = {
case action: WsUserAction =>
import action._
val now = nowMillis
logResponse(session, requestName, KO, now, now, Some(error))
next ! session.update(tx.updates).markAsFailed.remove(wsName)
context.stop(self)
case unexpected =>
logger.info(s"Discarding unknown message $unexpected while in crashed state")
}
}
| MykolaB/gatling | gatling-http/src/main/scala/io/gatling/http/action/async/ws/WsActor.scala | Scala | apache-2.0 | 9,381 |
package fpinscala.monoids
import fpinscala.parallelism.Nonblocking._
import fpinscala.parallelism.Nonblocking.Par.toParOps // infix syntax for `Par.map`, `Par.flatMap`, etc
trait Monoid[A] {
def op(a1: A, a2: A): A
def identity: A
def zero: A = identity
}
object Monoid {
val stringMonoid = new Monoid[String] {
def op(a1: String, a2: String) = a1 + a2
val identity = ""
}
def listMonoid[A] = new Monoid[List[A]] {
def op(a1: List[A], a2: List[A]) = a1 ++ a2
val identity = Nil
}
val intAddition: Monoid[Int] = new Monoid[Int] {
def op(a1: Int, a2: Int): Int = a1 + a2
def identity: Int = 0
}
val intMultiplication: Monoid[Int] = new Monoid[Int] {
def op(a1: Int, a2: Int): Int = a1 * a2
def identity: Int = 1
}
val booleanOr: Monoid[Boolean] = new Monoid[Boolean] {
def op(a1: Boolean, a2: Boolean): Boolean = a1 || a2
def identity: Boolean = false
}
val booleanAnd: Monoid[Boolean] = new Monoid[Boolean] {
def op(a1: Boolean, a2: Boolean): Boolean = a1 && a2
def identity: Boolean = true
}
def optionMonoid[A]: Monoid[Option[A]] = new Monoid[Option[A]] {
def op(a1: Option[A], a2: Option[A]): Option[A] = a1 orElse a2
def identity: Option[A] = None
}
def dual[A](m: Monoid[A]): Monoid[A] = new Monoid[A] {
def identity: A = m.identity
def op(a1: A, a2: A): A = m.op(a2, a1)
}
def endoMonoid[A]: Monoid[A => A] = new Monoid[A => A] {
def op(a1: A => A, a2: A => A): A => A = a1 compose a2
def identity: A => A = a => a
}
import fpinscala.testing._
import Prop._
def monoidLaws[A](gen: Gen[A])(m: Monoid[A]): Prop =
forAll(gen ** gen ** gen) {
case ((a, b), c) => m.op(m.op(a, b), c) == m.op(a, m.op(b, c))
} && forAll(gen)(a => m.op(m.identity, a) == a)
def trimMonoid(s: String): Monoid[String] = sys.error("todo")
def concatenate[A](as: List[A], m: Monoid[A]): A =
as.foldLeft(m.identity)(m.op)
def foldMap[A, B](as: List[A], m: Monoid[B])(f: A => B): B =
concatenate(as map f, m)
def foldRight[A, B](as: List[A])(z: B)(f: (A, B) => B): B =
foldMap(as, endoMonoid[B])(f.curried)(z)
def foldLeft[A, B](as: List[A])(z: B)(f: (B, A) => B): B =
foldMap(as, dual(endoMonoid[B]))(a => b => f(b, a))(z)
def foldMapV[A, B](as: IndexedSeq[A], m: Monoid[B])(f: A => B): B =
if (as.isEmpty) {
m.identity
} else if (as.length == 1) {
f(as.head)
} else {
val (l, r) = as.splitAt(as.length / 2)
m.op(foldMapV(l, m)(f), foldMapV(r, m)(f))
}
def homoMorphismProp[A, B](g: Gen[A], ma: Monoid[A], mb: Monoid[B])(f: A => B) =
forAll(g ** g) {
case (a1, a2) => f(ma.op(a1, a2)) == mb.op(f(a1), f(a2))
}
def isoMorphismProp[A, B](ga: Gen[A], gb: Gen[B], ma: Monoid[A], mb: Monoid[B])(fab: A => B, fba: B =>A) =
homoMorphismProp(ga, ma, mb)(fab) && homoMorphismProp(gb, mb, ma)(fba)
// these kinds of exercises shy people away from fp i think. there are more readable ways to achieve this - a case class for example could be better than an unreadable tuple3
def ordered(ints: IndexedSeq[Int]): Boolean = {
// Our monoid tracks the minimum and maximum element seen so far
// as well as whether the elements are so far ordered.
val mon = new Monoid[Option[(Int, Int, Boolean)]] {
def op(o1: Option[(Int, Int, Boolean)], o2: Option[(Int, Int, Boolean)]) =
(o1, o2) match {
// The ranges should not overlap if the sequence is ordered.
case (Some((x1, y1, p)), Some((x2, y2, q))) =>
Some((x1 min x2, y1 max y2, p && q && y1 <= x2))
case (x, None) => x
case (None, x) => x
}
val identity = None
}
// The empty sequence is ordered, and each element by itself is ordered.
foldMapV(ints, mon)(i => Some((i, i, true))).map(_._3).getOrElse(true)
}
sealed trait WC
case class Stub(chars: String) extends WC
case class Part(lStub: String, words: Int, rStub: String) extends WC
def par[A](m: Monoid[A]): Monoid[Par[A]] =
new Monoid[Par[A]] {
def identity: Par[A] = Par.unit(m.identity)
def op(a1: Par[A], a2: Par[A]): Par[A] = Par.map2(a1, a2)(m.op)
}
def parFoldMap[A,B](v: IndexedSeq[A], m: Monoid[B])(f: A => B): Par[B] =
Par.parMap(v)(f) flatMap (ps => foldMapV(ps, par(m))(Par lazyUnit _))
val wcMonoid: Monoid[WC] = new Monoid[WC] {
def identity: WC = Stub("")
def op(a1: WC, a2: WC): WC = a1 -> a2 match {
case (Stub(s), Stub(s1)) => Stub(s + s1)
case (Stub(s), Part(l, i, r)) => Part(s + l, i, r)
case (Part(l, i, r), Stub(s)) => Part(l, i, r + s)
case (Part(l, i, r), Part(l1, i1, r1)) =>
Part(l1, i + (if ((r + l1) isEmpty) 0 else 1) + i1, r1)
}
}
def count(s: String): Int = {
// A single character's count. Whitespace does not count,
// and non-whitespace starts a new Stub.
def wc(c: Char): WC =
if (c.isWhitespace)
Part("", 0, "")
else
Stub(c.toString)
// `unstub(s)` is 0 if `s` is empty, otherwise 1.
def unstub(s: String) = s.length min 1
foldMapV(s.toIndexedSeq, wcMonoid)(wc) match {
case Stub(s) => unstub(s)
case Part(l, w, r) => unstub(l) + w + unstub(r)
}
}
def productMonoid[A,B](A: Monoid[A], B: Monoid[B]): Monoid[(A, B)] =
new Monoid[(A, B)] {
def identity: (A, B) = A.identity -> B.identity
def op(ab1: (A, B), ab2: (A, B)): (A, B) = (ab1, ab2) match {
case ((a, b), (a1, b1)) => A.op(a, a1) -> B.op(b, b1)
}
}
def functionMonoid[A,B](B: Monoid[B]): Monoid[A => B] =
new Monoid[A => B] {
def identity: A => B = a => B.identity
def op(a1: A => B, a2: A => B): A => B = a => B.op(a1(a), a2(a))
}
def mapMergeMonoid[K,V](V: Monoid[V]): Monoid[Map[K, V]] =
new Monoid[Map[K, V]] {
def identity = Map[K,V]()
def op(a: Map[K, V], b: Map[K, V]) =
(a.keySet ++ b.keySet).foldLeft(identity) { (acc,k) =>
acc.updated(k, V.op(a.getOrElse(k, V.identity),
b.getOrElse(k, V.identity)))
}
}
def bag[A](as: IndexedSeq[A]): Map[A, Int] = {
val m = mapMergeMonoid[A, Int](intAddition)
IndexedSeqFoldable.foldMap(as)(a => Map(a -> 1))(m)
}
}
trait Foldable[F[_]] {
import Monoid._
def foldRight[A,B](as: F[A])(z: B)(f: (A, B) => B): B =
foldMap(as)(f.curried)(endoMonoid[B])(z)
def foldLeft[A,B](as: F[A])(z: B)(f: (B, A) => B): B =
foldMap(as)(a => (b: B) => f(b, a))(dual(endoMonoid[B]))(z)
def foldMap[A, B](as: F[A])(f: A => B)(mb: Monoid[B]): B =
foldRight(as)(mb.identity)((a, b) => mb.op(f(a), b))
def concatenate[A](as: F[A])(m: Monoid[A]): A =
foldLeft(as)(m.identity)(m.op)
def toList[A](as: F[A]): List[A] =
foldRight(as)(List.empty[A])(_ :: _)
}
object ListFoldable extends Foldable[List] {
override def foldRight[A, B](as: List[A])(z: B)(f: (A, B) => B) =
as.foldRight(z)(f)
override def foldLeft[A, B](as: List[A])(z: B)(f: (B, A) => B) =
as.foldLeft(z)(f)
override def foldMap[A, B](as: List[A])(f: A => B)(mb: Monoid[B]): B =
(as map f).foldRight(mb.identity)(mb.op)
override def toList[A](as: List[A]): List[A] = as
}
object IndexedSeqFoldable extends Foldable[IndexedSeq] {
import Monoid._
override def foldRight[A, B](as: IndexedSeq[A])(z: B)(f: (A, B) => B) =
as.foldRight(z)(f)
override def foldLeft[A, B](as: IndexedSeq[A])(z: B)(f: (B, A) => B) =
as.foldLeft(z)(f)
override def foldMap[A, B](as: IndexedSeq[A])(f: A => B)(mb: Monoid[B]): B =
foldMapV(as, mb)(f)
}
object StreamFoldable extends Foldable[Stream] {
override def foldRight[A, B](as: Stream[A])(z: B)(f: (A, B) => B) =
as.foldRight(z)(f)
override def foldLeft[A, B](as: Stream[A])(z: B)(f: (B, A) => B) =
as.foldLeft(z)(f)
}
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object TreeFoldable extends Foldable[Tree] {
override def foldMap[A, B](as: Tree[A])(f: A => B)(mb: Monoid[B]): B =
as match {
case Leaf(a) => f(a)
case Branch(l, r) => mb.op(foldMap(l)(f)(mb), foldMap(r)(f)(mb))
}
override def foldLeft[A, B](as: Tree[A])(z: B)(f: (B, A) => B) =
as match {
case Leaf(a) => f(z, a)
case Branch(l, r) => foldLeft(r)(foldLeft(l)(z)(f))(f)
}
override def foldRight[A, B](as: Tree[A])(z: B)(f: (A, B) => B) =
as match {
case Leaf(a) => f(a, z)
case Branch(l, r) => foldRight(l)(foldRight(r)(z)(f))(f)
}
}
object OptionFoldable extends Foldable[Option] {
override def foldMap[A, B](as: Option[A])(f: A => B)(mb: Monoid[B]): B =
as match {
case None => mb.identity
case Some(a) => f(a)
}
override def foldLeft[A, B](as: Option[A])(z: B)(f: (B, A) => B) = as match {
case None => z
case Some(a) => f(z, a)
}
override def foldRight[A, B](as: Option[A])(z: B)(f: (A, B) => B) = as match {
case None => z
case Some(a) => f(a, z)
}
}
| ailveen/fpinscala | exercises/src/main/scala/fpinscala/monoids/Monoid.scala | Scala | mit | 9,047 |
package com.github.diegopacheco.sandbox.sacala.twoten.implicitclazz
object ImplicitClassFun extends App {
implicit class RichInteger(n:Int) extends Ordered[Int]{
def isMin(m:Int) : Int = if (n <= m) n else m
def compare(m:Int): Int = if ( n <= m ) -1 else 0
}
val i1 = 10
val i2 = 20
println( i1.isMin(i2) )
} | diegopacheco/scala-playground | scala-2.10-playground/src/com/github/diegopacheco/sandbox/sacala/twoten/implicitclazz/ImplicitClassFun.scala | Scala | unlicense | 356 |
package com.komanov.serialization.jmh.tests
import com.komanov.serialization.converters.Converters
import com.komanov.serialization.jmh.ConverterType
import org.specs2.mutable.SpecWithJUnit
class ConverterTypeTest extends SpecWithJUnit {
"ConverterType" should {
"contain all converters" >> {
ConverterType.values().map(_.converter).toSet mustEqual Converters.all.map(_._2).toSet
}
}
}
| dkomanov/stuff | src/com/komanov/serialization/jmh/tests/ConverterTypeTest.scala | Scala | mit | 408 |
package chandu0101.scalajs.react.components.demo.components.materialui
import chandu0101.scalajs.react.components.demo.components.CodeExample
import chandu0101.scalajs.react.components.materialui._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scalacss.Defaults._
import scalacss.ScalaCssReact._
import scalacss.mutable.StyleSheet.Inline
object MuiSwitchesDemo {
val code =
"""
| MuiAppBar(title = "Title")()
|
""".stripMargin
val checkboxCode =
"""
| MuiCheckBox(name="checkboxName1",
| value="checkboxValue1",
| label="went for a run today"),
| MuiCheckBox(name="checkboxName2",
| value="checkboxValue2",
| label="feed the dog"),
| MuiCheckBox(name="checkboxName3",
| value="checkboxValue3",
| label="built a house on the moon",
| disabled = true
| )
|
""".stripMargin
val radioButtonCode =
"""
| MuiRadioButtonGroup(name = "shipspeed",
| defaultSelected = "not_light")(
| MuiRadioButton(value = "light" ,label = "prepare for light speed"),
| MuiRadioButton(value = "no_light" ,label = "light speed too slow"),
| MuiRadioButton(value = "ludicrous" ,label = "go to ludicrous speed",disabled = true)
| )
|
""".stripMargin
val toggleCode =
"""
|MuiToggle(name = "toggleName1" , value = "togglevalue1" ,label = "active thrusters"),
|MuiToggle(name = "toggleName2" , value = "togglevalue2" ,label = "auto-pilot",defaultToggled = true),
|MuiToggle(name = "toggleName3" , value = "togglevalue3" ,label = "initiate self-destruct sequence",disabled = true)
|
""".stripMargin
object Style extends Inline {
import dsl._
val container = style(maxWidth(1024 px))
val content = style(display.flex,
padding(30.px),
flexDirection.column,
alignItems.center)
}
val component = ReactComponentB[Unit]("MuiSwitchesDemo")
.render(P => {
<.div(Style.container,
<.h3("Switches"),
MuiTabs()(
MuiTab(label = "Checkbox")(
CodeExample(checkboxCode)(
<.div(Style.content,
MuiCheckBox(name="checkboxName1",
value="checkboxValue1",
label="went for a run today")(),
MuiCheckBox(name="checkboxName2",
value="checkboxValue2",
label="feed the dog")(),
MuiCheckBox(name="checkboxName3",
value="checkboxValue3",
label="built a house on the moon",
disabled = true
)()
)
)
),
MuiTab(label = "RadioButton")(
CodeExample(radioButtonCode)(
<.div(Style.content,
MuiRadioButtonGroup(name = "shipspeed",
defaultSelected = "not_light")(
MuiRadioButton(value = "light" ,label = "prepare for light speed")(),
MuiRadioButton(value = "no_light" ,label = "light speed too slow")(),
MuiRadioButton(value = "ludicrous" ,label = "go to ludicrous speed",disabled = true)()
)
)
)
),
MuiTab(label = "Toggle")(
CodeExample(toggleCode)(
<.div(Style.content,
MuiToggle(name = "toggleName1" , value = "togglevalue1" ,label = "active thrusters")(),
MuiToggle(name = "toggleName2" , value = "togglevalue2" ,label = "auto-pilot",defaultToggled = true)(),
MuiToggle(name = "toggleName3" , value = "togglevalue3" ,label = "initiate self-destruct sequence",disabled = true)()
)
)
)
)
)
}).buildU
def apply() = component()
}
| mproch/scalajs-react-components | demo/src/main/scala/chandu0101/scalajs/react/components/demo/components/materialui/MuiSwitchesDemo.scala | Scala | apache-2.0 | 3,950 |
package views
import org.specs2.mutable.Specification
import models.jbehave.JBehaveComposite
class JBehaveCompositesSpec extends Specification {
"jBehaveComposites#render" should {
val compositePackage = "com.technologyconversations.test"
val compositeClass = "TcBddComposites"
val steps = List("Given something", "When else", "Then OK")
val composite = JBehaveComposite("Given this is my composite", steps)
val stepsWithParams = List("""Given "my" <param1>""", "When <param2>", "Then <param3>")
val compositeWithParams = JBehaveComposite("""When this is "my" <param1>, <param2> and <param3>""", stepsWithParams)
val out = views.html.jBehaveComposites.render(
compositePackage,
compositeClass,
List(composite, compositeWithParams)
).toString().trim
"output package statement" in {
out must contain(s"package $compositePackage;")
}
"output import statements" in {
out must contain("import org.jbehave.core.annotations.*;")
out must contain("import com.technologyconversations.bdd.steps.util.BddVariable;")
}
"output class statement" in {
out must contain(s"public class $compositeClass {")
}
"output step annotation" in {
out must contain(s"""@Given("this is my composite")""")
}
"output step annotation with double quote escaped" in {
out must contain("""@When("this is \\"my\\" <param1>, <param2> and <param3>")""")
}
"output composite annotation" in {
out must contain(steps.mkString("""@Composite(steps = {"""", """", """", """"})"""))
}
"output composite annotation steps with double quotes escaped" in {
out must contain("""Given \\"my\\" <param1>""")
}
"output composite method" in {
out must contain("""public void compositeStep0() { }""")
}
"output composite methods using unique names" in {
out must contain("""public void compositeStep1(""")
}
"output composite methods with params" in {
out must contain("""(@Named("param1") BddVariable param1, @Named("param2") BddVariable param2""")
}
"output } at the end" in {
out must endWith("}")
}
}
}
| TechnologyConversations/TechnologyConversationsBdd | test/views/JBehaveCompositesSpec.scala | Scala | apache-2.0 | 2,180 |
// covariant linked list
abstract class M {
self =>
type T
final type selfType = M {type T <: self.T}
type actualSelfType >: self.type <: selfType
def next: selfType
// I don't understand why this doesn't compile, but that's a separate matter
// error: method all2 cannot be accessed in M.this.selfType
// because its instance type => Stream[M{type T <: M.this.selfType#T}]
// contains a malformed type: M.this.selfType#T
def all2: Stream[M {type T <: self.T}] = Stream.cons(self: actualSelfType, next.all2)
// compiles successfully
def all3: Stream[M {type T <: self.T}] = all3Impl(self: actualSelfType)
private def all3Impl(first: M {type T <: self.T}): Stream[M {type T <: self.T}] = Stream.cons(first, all3Impl(first.next))
def all4: Stream[M {type T <: self.T}] = Unrelated.all4Impl[T](self: actualSelfType)
}
// TODO!!! fix this bug for real, it compiles successfully, but weird types are inferred
object Unrelated {
// compiles successfully
def all4Impl[U](first: M {type T <: U}): Stream[M {type T <: U}] = Stream.cons(first, all4Impl[U](first.next))
// should compile successfully without the [U], but:
// def all4ImplFail[U](first: M {type T <: U}): Stream[M {type T <: U}] = Stream.cons(first, all4ImplFail(first.next))
//
// test/files/pos/t1279a.scala:31: error: type mismatch;
// found : first.selfType
// (which expands to) M{type T <: first.T}
// required: M{type T <: Nothing}
// def all4ImplFail[U](first: M {type T <: U}): Stream[M {type T <: U}] = Stream.cons(first, all4ImplFail(first.next))
// ^
// one error found
}
| loskutov/intellij-scala | testdata/scalacTests/failed/t1279a.scala | Scala | apache-2.0 | 1,720 |
package scala
package reflect
package internal
trait StdAttachments {
self: SymbolTable =>
/**
* Common code between reflect-internal Symbol and Tree related to Attachments.
*/
trait Attachable {
protected var rawatt: scala.reflect.macros.Attachments { type Pos = Position } = NoPosition
def attachments = rawatt
def setAttachments(attachments: scala.reflect.macros.Attachments { type Pos = Position }): this.type = { rawatt = attachments; this }
def updateAttachment[T: ClassTag](attachment: T): this.type = { rawatt = rawatt.update(attachment); this }
def removeAttachment[T: ClassTag]: this.type = { rawatt = rawatt.remove[T]; this }
def hasAttachment[T: ClassTag]: Boolean = rawatt.contains[T]
// cannot be final due to SynchronizedSymbols
def pos: Position = rawatt.pos
def pos_=(pos: Position): Unit = rawatt = (rawatt withPos pos)
def setPos(newpos: Position): this.type = { pos = newpos; this }
}
/** Attachment that knows how to import itself into another universe. */
trait ImportableAttachment {
def importAttachment(importer: Importer): this.type
}
/** Attachment that doesn't contain any reflection artifacts and can be imported as-is. */
trait PlainAttachment extends ImportableAttachment {
def importAttachment(importer: Importer): this.type = this
}
/** Stores the trees that give rise to a refined type to be used in reification.
* Unfortunately typed `CompoundTypeTree` is lacking essential info, and the reifier cannot use `CompoundTypeTree.tpe`.
* Therefore we need this hack (see `Reshape.toPreTyperTypeTree` for a detailed explanation).
*/
case class CompoundTypeTreeOriginalAttachment(parents: List[Tree], stats: List[Tree])
/** Attached to a Function node during type checking when the expected type is a SAM type (and not a built-in FunctionN).
*
* Ideally, we'd move to Dotty's Closure AST, which tracks the environment,
* the lifted method that has the implementation, and the target type.
* For backwards compatibility, an attachment is the best we can do right now.
*
* @param samTp the expected type that triggered sam conversion (may be a subtype of the type corresponding to sam's owner)
* @param sam the single abstract method implemented by the Function we're attaching this to
*
* @since 2.12.0-M4
*/
case class SAMFunction(samTp: Type, sam: Symbol) extends PlainAttachment
case object DelambdafyTarget extends PlainAttachment
/** When present, indicates that the host `Ident` has been created from a backquoted identifier.
*/
case object BackquotedIdentifierAttachment extends PlainAttachment
/** Identifies trees are either result or intermediate value of for loop desugaring.
*/
case object ForAttachment extends PlainAttachment
case object CoforAttachment extends PlainAttachment
/** Identifies unit constants which were inserted by the compiler (e.g. gen.mkBlock)
*/
case object SyntheticUnitAttachment extends PlainAttachment
/** Untyped list of subpatterns attached to selector dummy. */
case class SubpatternsAttachment(patterns: List[Tree])
abstract class InlineAnnotatedAttachment
case object NoInlineCallsiteAttachment extends InlineAnnotatedAttachment
case object InlineCallsiteAttachment extends InlineAnnotatedAttachment
/** Attached to a local class that has its outer field elided. A `null` constant may be passed
* in place of the outer parameter, can help callers to avoid capturing the outer instance.
*/
case object OuterArgCanBeElided extends PlainAttachment
case object UseInvokeSpecial extends PlainAttachment
/** An attachment carrying information between uncurry and erasure */
case class TypeParamVarargsAttachment(val typeParamRef: Type)
/** Attached to a class symbol to indicate that its children have been observed
* via knownDirectSubclasses. Children added subsequently will trigger an
* error to indicate that the earlier observation was incomplete.
*/
case object KnownDirectSubclassesCalled extends PlainAttachment
}
| shimib/scala | src/reflect/scala/reflect/internal/StdAttachments.scala | Scala | bsd-3-clause | 4,109 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.connector.catalog.TableProvider
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.Utils
/**
* A base interface for data source v2 implementations of the built-in file-based data sources.
*/
trait FileDataSourceV2 extends TableProvider with DataSourceRegister {
/**
* Returns a V1 [[FileFormat]] class of the same file data source.
* This is a solution for the following cases:
* 1. File datasource V2 implementations cause regression. Users can disable the problematic data
* source via SQL configuration and fall back to FileFormat.
* 2. Catalog support is required, which is still under development for data source V2.
*/
def fallbackFileFormat: Class[_ <: FileFormat]
lazy val sparkSession = SparkSession.active
protected def getPaths(map: CaseInsensitiveStringMap): Seq[String] = {
val objectMapper = new ObjectMapper()
val paths = Option(map.get("paths")).map { pathStr =>
objectMapper.readValue(pathStr, classOf[Array[String]]).toSeq
}.getOrElse(Seq.empty)
paths ++ Option(map.get("path")).toSeq
}
protected def getTableName(paths: Seq[String]): String = {
val name = shortName() + " " + paths.map(qualifiedPathName).mkString(",")
Utils.redact(sparkSession.sessionState.conf.stringRedactionPattern, name)
}
private def qualifiedPathName(path: String): String = {
val hdfsPath = new Path(path)
val fs = hdfsPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory).toString
}
}
| caneGuy/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileDataSourceV2.scala | Scala | apache-2.0 | 2,687 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.bloomberg
import org.apache.avro.Schema
import org.apache.avro.generic.{GenericDatumReader, GenericRecord}
import org.apache.avro.io.DecoderFactory
object AvroDeserializer {
def apply(data: Array[Byte], schema: Schema): GenericRecord = {
val reader = new GenericDatumReader[GenericRecord](schema)
val decoder = DecoderFactory.get().binaryDecoder(data, null)
reader.read(null, decoder)
}
}
| datamountaineer/stream-reactor | kafka-connect-bloomberg/src/test/scala/com/datamountaineer/streamreactor/connect/bloomberg/AvroDeserializer.scala | Scala | apache-2.0 | 1,054 |
package scalan.demo
import java.io.File
import scalan.{JNIExtractorOpsExp, Scalan}
import scalan.compilation.{KernelStore, KernelType}
import scalan.examples.Helpers._
import scalan.monads.{MonadsDsl, MonadsDslStd, MonadsDslExp}
trait Example4 extends Scalan with MonadsDsl {
def sum[F[_]:Cont]
(F: Monad[F])(n: Rep[Int])
(f: Rep[F[Int]] => Rep[Int]): Rep[Int] = {
import F.toMonadic
f {
SArray
.rangeFrom0(n)
.map(F.unit(_))
.foldLeft[F[Int]](F.unit(0)) {
case Pair(facc, fi) =>
for { acc <- facc; i <- fi }
yield acc + i
}
}
}
lazy val sumWithId = fun {(n: Rep[Int]) =>
sum(Monad[Id])(n)(r => r)
}
type Env = (Int,String)
type Read[A] = Reader[Env, A]
val M = Monad[Read]
lazy val sumWithReader = fun {(in: Rep[(Env,Int)]) =>
val Pair(env, n) = in
sum(M)(n)(r => r.run(env))
}
}
class Example4Std extends MonadsDslStd with Example4
class Example4Exp extends MonadsDslExp with JNIExtractorOpsExp with Example4 {
var doInvoke = true
override def invokeAll = doInvoke
override def rewriteDef[T](d: Def[T]) = d match {
//TODO this rule works only for this particular tests, but can be generalized
case ArrayFold(
Def(ArrayMap(_xs, Def(_h: Lambda[a,_]))),
Def(f: Lambda[e,Int]@unchecked),
Def(g: Lambda[_,_])) => {
val xs = _xs.asRep[Array[a]]
val h = _h.self.asRep[a => (e => Int)]
implicit val eA = xs.elem.eItem
implicit val eE = f.eA
fun { (env: Rep[e]) => {
val ys: Rep[Array[Int]] = xs.map(x => h(x)(env))
ys.foldLeft(f.self(env)){ case Pair(acc, y) =>
acc + y
}
}}
}
case _ => super.rewriteDef(d)
}
}
| scalan/scalan-starter | scalan-starter-core/src/test/scala/scalan/demo/Example4.scala | Scala | apache-2.0 | 1,771 |
package com.twitter.webbing.route
import com.twitter.finagle.Service
import com.twitter.finagle.http.{Status, MediaType, Response}
import com.twitter.logging.Logger
import com.twitter.util.{FuturePool, Future}
import java.io.{FileInputStream, File, InputStream}
import org.apache.commons.io.IOUtils
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.handler.codec.http.{HttpResponse, HttpMethod}
object NettyStaticRouter {
private val log = Logger("NettyStaticRouter")
case class Loaded(buffer: ChannelBuffer, length: Int)
type Loader = Service[String, Loaded]
case class NotFoundException(path: String)
extends Exception("Not found: %s".format(path))
private[this] def load(input: InputStream): Loaded = {
log.debug("loading...")
val bytes = IOUtils.toByteArray(input)
input.read(bytes)
Loaded(ChannelBuffers.wrappedBuffer(bytes), bytes.length)
}
/** Serves files from a local file system under the given root. */
class DirectoryLoader(
root: File,
pool: FuturePool = FuturePool.immediatePool)
extends Loader {
def apply(path: String): Future[Loaded] = pool {
new File(root, path) match {
case f if f.isFile && f.canRead && !f.getPath.contains("../") =>
load(new FileInputStream(f))
case _ => throw NotFoundException(path)
}
}
}
/** Serves packaged resources */
class ResourcesLoader(
obj: Any = this,
root: String = "/",
pool: FuturePool = FuturePool.unboundedPool)
extends Loader {
private[this] val log = Logger("resources")
private[this] val cls = obj.getClass
private[this] val cleanRoot = root.stripSuffix("/") + "/"
private[this] def lookup(p: String) = cleanRoot + p.stripPrefix("/")
def apply(path: String): Future[Loaded] = pool {
val p = lookup(path)
log.debug("getting resource: %s", p)
Option(cls.getResourceAsStream(p)) match {
case Some(s) if s.available > 0 =>
load(s)
case _ =>
log.warning("not found: %s", p)
throw NotFoundException(path)
}
}
}
private val Ext = """[^.]+\\.(.+)""".r
}
/** A router for serving static assets */
trait NettyStaticRouter { self: NettyHttpRouter =>
import NettyStaticRouter._
val staticLoader: Loader
val contentTypes: Map[String, String] = Map(
"css" -> "text/css",
"gif" -> "image/gif",
"html" -> MediaType.Html,
"jpg" -> "image/jpeg",
"jpeg" -> "image/jpeg",
"js" -> MediaType.Javascript,
"json" -> MediaType.Json,
"min.css" -> "text/css",
"min.js" -> MediaType.Javascript,
"png" -> "image/png",
"txt" -> "text/plain")
/** A route consisting of all unconsumed path segments */
val pathSegments = (str *)
/** A route that loads the given path if it exists */
def staticFileRoute(path: String): Route[Loaded] =
mkRoute { in =>
staticLoader(path) map(Success(_, in)) handle {
case nfe: NotFoundException => Failure(Status.NotFound, in)
}
}
/** A route that serves a static file if it exists. */
def staticFile(path: String, contentType: Option[String]): Route[HttpResponse] = {
log.debug("%s [%s]", path, contentType getOrElse "")
staticFileRoute(path) map { loaded =>
log.debug("%s loaded %d bytes", path, loaded.length)
val rsp = Response()
rsp.content = loaded.buffer
rsp.contentLength = loaded.length
contentType foreach { ct =>
rsp.contentType = ct
}
rsp.httpResponse
}
}
val staticRoute: Route[HttpResponse] = when(HttpMethod.GET) ~> {
pathSegments >> { segments =>
val path = segments mkString "/"
val contentType = segments.lastOption match {
case Some(Ext(e)) => contentTypes.get(e)
case _ => None
}
staticFile(path, contentType)
}
}
}
| finagle/webbing | route-finagle-http/src/main/scala/com/twitter/webbing/route/NettyStaticRouter.scala | Scala | apache-2.0 | 3,885 |
package im.actor.server.sequence
import akka.actor.ActorSystem
import im.actor.server.model.push.GooglePushCredentials
private[sequence] final class GooglePushProvider(userId: Int, system: ActorSystem) extends PushProvider {
private val googlePushExt = GooglePushExtension(system)
def deliverInvisible(seq: Int, creds: GooglePushCredentials): Unit = {
val message = GooglePushMessage(
to = creds.regId,
collapse_key = Some(s"seq-invisible-${userId.toString}"),
data = Some(Map("seq" → seq.toString)),
time_to_live = None
)
googlePushExt.send(creds.projectId, message)
}
def deliverVisible(
seq: Int,
creds: GooglePushCredentials,
data: PushData,
isTextEnabled: Boolean,
isSoundEnabled: Boolean,
isVibrationEnabled: Boolean
): Unit = {
val message = GooglePushMessage(
to = creds.regId,
collapse_key = Some(s"seq-visible-${userId.toString}"),
data = Some(Map("seq" → seq.toString) ++ (
data.text match {
case text if text.nonEmpty && isTextEnabled ⇒
Map("message" → text)
case _ ⇒ Map.empty
}
)),
time_to_live = None
)
googlePushExt.send(creds.projectId, message)
}
} | ljshj/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/sequence/GooglePushProvider.scala | Scala | mit | 1,295 |
package com.sksamuel.elastic4s.searches.sort
import com.sksamuel.elastic4s.searches.QueryDefinition
import org.elasticsearch.common.geo.{GeoDistance, GeoPoint}
import org.elasticsearch.common.unit.DistanceUnit
import org.elasticsearch.index.query.GeoValidationMethod
import org.elasticsearch.search.sort.{GeoDistanceSortBuilder, SortBuilders, SortMode, SortOrder}
class GeoDistanceSortDefinition(field: String,
geohashes: Seq[String] = Nil,
points: Seq[GeoPoint] = Nil) extends SortDefinition[GeoDistanceSortBuilder] {
val builder = SortBuilders.geoDistanceSort(field, geohashes: _*).points(points: _*)
def nested(nestedPath: String): this.type = {
builder.setNestedPath(nestedPath)
this
}
@deprecated("use sortMode", "5.0.0")
def mode(mode: SortMode): this.type = sortMode(mode)
def sortMode(mode: SortMode): this.type = {
builder.sortMode(mode)
this
}
def order(order: SortOrder): this.type = {
builder.order(order)
this
}
def validation(validation: GeoValidationMethod): this.type = {
builder.validation(validation)
this
}
def unit(unit: DistanceUnit): this.type = {
builder.unit(unit)
this
}
def nestedPath(nestedPath: String): this.type = {
builder.setNestedPath(nestedPath)
this
}
def nestedFilter(filter: QueryDefinition): this.type = {
builder.setNestedFilter(filter.builder)
this
}
def geoDistance(geoDistance: GeoDistance): this.type = {
builder.geoDistance(geoDistance)
this
}
}
| ulric260/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/sort/GeoDistanceSortDefinition.scala | Scala | apache-2.0 | 1,568 |
package models
import scala.slick.driver.H2Driver.simple._
import com.github.tototoshi.slick.H2JodaSupport._
import org.joda.time.DateTime
object Tables extends Tables
trait Tables {
sealed abstract class HistoryType(val code: String)
object HistoryType{
case object TotalAmount extends HistoryType("Total")
case object Deposit extends HistoryType("Deposit")
case object InvestmentAmount extends HistoryType("InvestmentAmount")
case object Withdrawal extends HistoryType("Withdrawal")
case object CreditPayment extends HistoryType("CreditPayment")
}
case class HistoryRow(
id: Long,
detail: String,
historyDate: DateTime,
dataType: String,
dataGroup: Int,
account: String,
amount: BigDecimal)
class History(tag: Tag) extends Table[HistoryRow](tag, "History") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def detail = column[String]("detail")
def historyDate = column[DateTime]("historyDate")
def dataType = column[String]("dataType")
def dataGroup = column[Int]("dataGroup")
def account = column[String]("account")
def amount = column[BigDecimal]("amount")
def * = (id, detail, historyDate, dataType, dataGroup, account, amount) <> (HistoryRow.tupled, HistoryRow.unapply _)
}
lazy val History = TableQuery[History]
case class RegularItemRow(
id: Long,
name: String,
itemType: String, // income or outgo
amount: BigDecimal)
class RegularItem(tag: Tag) extends Table[RegularItemRow](tag, "RegularItem") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name")
def itemType = column[String]("itemType")
def amount = column[BigDecimal]("amount")
def * = (id, name, itemType, amount) <> (RegularItemRow.tupled, RegularItemRow.unapply _)
}
lazy val RegularItem = TableQuery[RegularItem]
case class IrregularItemRow(
id: Long,
occurDate: DateTime,
name: String,
itemType: String, // income or outgo
amount: BigDecimal)
class IrregularItem(tag: Tag) extends Table[IrregularItemRow](tag, "IrregularItem") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def occurDate = column[DateTime]("occurDate")
def name = column[String]("name")
def itemType = column[String]("itemType")
def amount = column[BigDecimal]("amount")
def * = (id, occurDate, name, itemType, amount) <> (IrregularItemRow.tupled, IrregularItemRow.unapply _)
}
lazy val IrregularItem = TableQuery[IrregularItem]
} | withpop/money-backward | app/models/Tables.scala | Scala | mit | 2,611 |
import scala.concurrent.duration._
import play.api._
import play.api.libs.concurrent.Akka
import play.api.libs.concurrent.Execution.Implicits._
import com.thetestpeople.trt.utils.RichConfiguration._
import com.thetestpeople.trt.Config._
import com.thetestpeople.trt.utils.HasLogger
import scala.concurrent.Future
import play.api.mvc.WithFilters
import com.thetestpeople.trt.filters.LoggingFilter
import controllers.ControllerHelper
object Global extends WithFilters(LoggingFilter) with GlobalSettings with HasLogger {
private var factory: Factory = _
override def onStart(app: Application) {
logger.debug("onStart()")
factory = new Factory(Play.current.configuration)
factory.dbMigrator.migrate()
for (name ← app.configuration.getString("ui.applicationName"))
ControllerHelper.applicationName = name
initialiseCiImportWorker(app)
initialiseCiImportPoller(app)
initialiseAnalyseExecutionsPoller(app)
}
private def initialiseCiImportWorker(app: Application) {
Future {
factory.ciImportWorker.run()
}
}
private def getDuration(configuration: Configuration, key: String, default: FiniteDuration): FiniteDuration =
configuration.getMilliseconds(key).map(_.millis).getOrElse(default)
private def initialiseCiImportPoller(app: Application) {
val conf = app.configuration
val initialDelay = conf.getDuration(Ci.Poller.InitialDelay, default = 1.minute)
val interval = conf.getDuration(Ci.Poller.Interval, default = 1.minute)
if (conf.getBoolean(Ci.Poller.Enabled).getOrElse(true)) {
Akka.system(app).scheduler.schedule(initialDelay, interval) {
factory.service.syncAllCiImports()
}
logger.info("Initialised CI import poller")
}
}
private def initialiseAnalyseExecutionsPoller(app: Application) {
val conf = app.configuration
val initialDelay = conf.getDuration(CountsCalculator.Poller.InitialDelay, default = 5.seconds)
val interval = conf.getDuration(CountsCalculator.Poller.Interval, default = 2.minutes)
Akka.system(app).scheduler.scheduleOnce(Duration.Zero) {
factory.service.analyseAllExecutions()
}
Akka.system(app).scheduler.schedule(initialDelay, interval) {
factory.service.analyseAllExecutions()
}
logger.info("Scheduled analysis of all executions")
}
override def onStop(app: Application) {
logger.debug("onStop()")
factory.ciImportWorker.stop()
}
override def getControllerInstance[A](clazz: Class[A]): A = factory.getControllerInstance(clazz)
}
| thetestpeople/trt | app/Global.scala | Scala | mit | 2,541 |
package org.jetbrains.plugins.scala
package codeInsight.template.util
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.resolve.processor.BaseProcessor
import org.jetbrains.plugins.scala.lang.resolve.{ResolveTargets, ScalaResolveResult}
/**
* User: Alexander Podkhalyuzin
* Date: 30.01.2009
*/
class VariablesCompletionProcessor(override val kinds: Set[ResolveTargets.Value]) extends BaseProcessor(kinds) {
def execute(element: PsiElement, state: ResolveState): Boolean = {
val named = element.asInstanceOf[PsiNamedElement]
if (kindMatches(element)) {
candidatesSet += new ScalaResolveResult(named)
}
true
}
} | double-y/translation-idea-plugin | src/org/jetbrains/plugins/scala/codeInsight/template/util/VariablesCompletionProcessor.scala | Scala | apache-2.0 | 656 |
package com.agilogy.json
import org.scalatest.FlatSpec
import play.api.libs.json.{ Json, JsNumber }
class JsonComparatorWithEllipsisSpec extends FlatSpec {
import JsonComparator._
it should "compare Json strings with ellipsis when nothing replaces the ellipsis" in {
val actual = """{"a":1}"""
val expected = """{"a":1,...}"""
assert(diff(expected, actual) === Seq())
}
it should "compare Json strings with ellipsis when nothing replaces the ellipsis in nested objects" in {
val actual = """{"b":{"a":1}}"""
val expected = """{"b":{"a":1,...}}"""
assert(diff(expected, actual) === Seq())
}
it should "compare Json objects with ellipsis only" in {
val expected = """{...}"""
val actual = """{"a":1}"""
assert(diff(expected, actual) === Seq())
}
it should "compare Json objects with ellipsis in the value" in {
val expected = """{"a":..., "b":5, "c":...}"""
val actual = """{"a":1, "b":5, "c": 3}"""
assert(diff(expected, actual) === Seq())
}
it should "compare Json objects with ellipsis only when they are nested in an array" in {
val expected = """{"b": [{"a":1},...,{"a":3}]}"""
val actual = """{"b": [{"a":1},{"a":2},{"a":3}]}"""
assert(diff(expected, actual) === Seq())
}
it should "compare Json objects assuming {...} represents whatever object" in {
val expected = """{"b": [...,{...},...,{"a":3},...]}"""
val actual = """{"b": [{"a":3}]}"""
assert(diff(expected, actual) === Seq(Difference("/b[1]", Some(Json.obj("a" -> 3)), None)))
}
it should "fail comparing Json strings with ellipsis when something outside the ellipsis differs" in {
val actual = """{"a":1}"""
val expected = """{"a":2,...}"""
assert(diff(expected, actual) === Seq(Difference("/a", Some(JsNumber(2)), Some(JsNumber(1)))))
}
it should "fail comparing Json strings with ellipsis in nested objects when something outside the ellipsis differs" in {
val actual = """{"b":{"a":1}}"""
val expected = """{"b":{"a":2,...}}"""
assert(diff(expected, actual) === Seq(Difference("/b/a", Some(JsNumber(2)), Some(JsNumber(1)))))
}
it should "accept additional attributes when ellipsis is found in expected object" in {
val actual = """{"a":1,"b":2}"""
val expected = """{"a":1,...}"""
assert(diff(expected, actual) === Seq())
}
it should "accept additional attributes when ellipsis is found in nested expected object" in {
val actual = """{"c":{"a":1,"b":2}}"""
val expected = """{"c":{"a":1,...}}"""
assert(diff(expected, actual) === Seq())
}
it should "compare json arrays with ellipsis when the ellipsis represents zero elements" in {
assert(diff("""[1,...]""", """[1]""") === Seq())
assert(diff("""[...,1]""", """[1]""") === Seq())
assert(diff("""[...,1,...]""", """[1]""") == Seq())
}
it should "compare json arrays with nested ellipsis when the ellipsis represents zero elements" in {
assert(diff("""{"a":[1,...]}""", """{"a":[1]}""") === Seq())
assert(diff("""{"a":[...,1]}""", """{"a":[1]}""") == Seq())
assert(diff("""{"a":[...,1,...]}""", """{"a":[1]}""") == Seq())
}
it should "compare json arrays with ellipsis between elements" in {
assert(diff("""{"a":[1,...,2]}""", """{"a":[1,2]}""") === Seq())
assert(diff("""{"a":[1,...,2]}""", """{"a":[1,3,4,2]}""") === Seq())
assert(diff("""{"a":[1,...,2]}""", """{"a":[1,3,4]}""") === Seq(Difference("/a[3]", Some(JsNumber(2)), None)))
}
it should "fail to compare json arrays with ellipsis when something differs" in {
assert(diff("""[1,...]""", """[2]""") === Seq(Difference("[0]", Some(JsNumber(1)), Some(JsNumber(2)))))
}
it should "accept additional elements when an array has ellipsis" in {
assert(diff("""[1,...]""", """[1,2,3]""") === Seq())
}
it should "compare json arrays with ellipsis at the end" in {
assert(diff("""{"a":[1,...]}""", """{"a":[1]}""") === Seq())
assert(diff("""{"a":[1,...]}""", """{"a":[1,3,4,2]}""") === Seq())
assert(diff("""{"a":[...]}""", """{"a":[]}""") === Seq())
}
}
| agilogy/json-comparator | src/test/scala/com/agilogy/json/JsonComparatorWithEllipsisSpec.scala | Scala | apache-2.0 | 4,088 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.controller
import org.apache.predictionio.workflow.SharedSparkContext
import grizzled.slf4j.Logger
import org.scalatest.Matchers._
import org.scalatest.FunSuite
import org.scalatest.Inside
object MetricDevSuite {
class QIntSumMetric extends SumMetric[EmptyParams, Int, Int, Int, Int] {
def calculate(q: Int, p: Int, a: Int): Int = q
}
class QDoubleSumMetric extends SumMetric[EmptyParams, Int, Int, Int, Double] {
def calculate(q: Int, p: Int, a: Int): Double = q.toDouble
}
class QAverageMetric extends AverageMetric[EmptyParams, Int, Int, Int] {
def calculate(q: Int, p: Int, a: Int): Double = q.toDouble
}
class QOptionAverageMetric extends OptionAverageMetric[EmptyParams, Int, Int, Int] {
def calculate(q: Int, p: Int, a: Int): Option[Double] = {
if (q < 0) { None } else { Some(q.toDouble) }
}
}
class QStdevMetric extends StdevMetric[EmptyParams, Int, Int, Int] {
def calculate(q: Int, p: Int, a: Int): Double = q.toDouble
}
class QOptionStdevMetric extends OptionStdevMetric[EmptyParams, Int, Int, Int] {
def calculate(q: Int, p: Int, a: Int): Option[Double] = {
if (q < 0) { None } else { Some(q.toDouble) }
}
}
}
class MetricDevSuite
extends FunSuite with Inside with SharedSparkContext {
@transient lazy val logger = Logger[this.type]
test("Average Metric") {
val qpaSeq0 = Seq((1, 0, 0), (2, 0, 0), (3, 0, 0))
val qpaSeq1 = Seq((4, 0, 0), (5, 0, 0), (6, 0, 0))
val evalDataSet = Seq(
(EmptyParams(), sc.parallelize(qpaSeq0)),
(EmptyParams(), sc.parallelize(qpaSeq1)))
val m = new MetricDevSuite.QAverageMetric()
val result = m.calculate(sc, evalDataSet)
result shouldBe (21.0 / 6)
}
test("Option Average Metric") {
val qpaSeq0 = Seq((1, 0, 0), (2, 0, 0), (3, 0, 0))
val qpaSeq1 = Seq((-4, 0, 0), (-5, 0, 0), (6, 0, 0))
val evalDataSet = Seq(
(EmptyParams(), sc.parallelize(qpaSeq0)),
(EmptyParams(), sc.parallelize(qpaSeq1)))
val m = new MetricDevSuite.QOptionAverageMetric()
val result = m.calculate(sc, evalDataSet)
result shouldBe (12.0 / 4)
}
test("Stdev Metric") {
val qpaSeq0 = Seq((1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0))
val qpaSeq1 = Seq((5, 0, 0), (5, 0, 0), (5, 0, 0), (5, 0, 0))
val evalDataSet = Seq(
(EmptyParams(), sc.parallelize(qpaSeq0)),
(EmptyParams(), sc.parallelize(qpaSeq1)))
val m = new MetricDevSuite.QStdevMetric()
val result = m.calculate(sc, evalDataSet)
result shouldBe 2.0
}
test("Option Stdev Metric") {
val qpaSeq0 = Seq((1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0))
val qpaSeq1 = Seq((5, 0, 0), (5, 0, 0), (5, 0, 0), (5, 0, 0), (-5, 0, 0))
val evalDataSet = Seq(
(EmptyParams(), sc.parallelize(qpaSeq0)),
(EmptyParams(), sc.parallelize(qpaSeq1)))
val m = new MetricDevSuite.QOptionStdevMetric()
val result = m.calculate(sc, evalDataSet)
result shouldBe 2.0
}
test("Sum Metric [Int]") {
val qpaSeq0 = Seq((1, 0, 0), (2, 0, 0), (3, 0, 0))
val qpaSeq1 = Seq((4, 0, 0), (5, 0, 0), (6, 0, 0))
val evalDataSet = Seq(
(EmptyParams(), sc.parallelize(qpaSeq0)),
(EmptyParams(), sc.parallelize(qpaSeq1)))
val m = new MetricDevSuite.QIntSumMetric()
val result = m.calculate(sc, evalDataSet)
result shouldBe 21
}
test("Sum Metric [Double]") {
val qpaSeq0 = Seq((1, 0, 0), (2, 0, 0), (3, 0, 0))
val qpaSeq1 = Seq((4, 0, 0), (5, 0, 0), (6, 0, 0))
val evalDataSet = Seq(
(EmptyParams(), sc.parallelize(qpaSeq0)),
(EmptyParams(), sc.parallelize(qpaSeq1)))
val m = new MetricDevSuite.QDoubleSumMetric()
val result = m.calculate(sc, evalDataSet)
result shouldBe 21.0
}
}
| alex9311/PredictionIO | core/src/test/scala/org/apache/predictionio/controller/MetricTest.scala | Scala | apache-2.0 | 4,445 |
package io.ubiqesh.central.mqtt
import org.vertx.scala.core.Vertx
import org.vertx.java.core.{Handler => JHandler}
import org.vertx.java.core.buffer._
import org.vertx.scala.core.net.NetSocket
import io.netty.buffer.{Unpooled, ByteBuf}
import java.io.{ByteArrayInputStream, DataInputStream, DataInput}
import spray.json.JsObject
import scala.util.parsing.json.JSONObject
import io.ubiqesh.central.mqtt.decoder.{MqttDecoder, Decoder}
import io.ubiqesh.central.mqtt.encoder.Encoder
import io.ubiqesh.central.mqtt.commands._
import scala.collection.mutable
/**
* Created by Christoph Grotz on 30.12.13.
*/
class MqttServer(vertx: Vertx) {
val sockets = new mutable.HashSet[MqttSocket]()
def registerClient = (client: NetSocket) => {
val socket = new MqttSocket(client, this)
sockets.add(socket)
client.closeHandler({
sockets.remove(socket)
})
}
def publishMessage = (messageId: Option[Int], topic:String, payload: Array[Byte]) => {
sockets.foreach( socket => socket.publish(messageId, topic, payload))
}
}
| ubiqesh/ubiqesh | central/src/main/scala/io/ubiqesh/central/mqtt/MqttServer.scala | Scala | apache-2.0 | 1,046 |
package gitbucket.core.service
import gitbucket.core.model.{WebHook, RepositoryWebHook}
import org.scalatest.FunSuite
import gitbucket.core.model.WebHookContentType
class WebHookServiceSpec extends FunSuite with ServiceSpecBase {
lazy val service = new WebHookPullRequestService with AccountService with ActivityService with RepositoryService
with MergeService with PullRequestService with IssuesService with CommitsService with LabelsService
with MilestonesService with PrioritiesService with WebHookPullRequestReviewCommentService
test("WebHookPullRequestService.getPullRequestsByRequestForWebhook") {
withTestDB { implicit session =>
val user1 = generateNewUserWithDBRepository("user1", "repo1")
val user2 = generateNewUserWithDBRepository("user2", "repo2")
val user3 = generateNewUserWithDBRepository("user3", "repo3")
val issueUser = user("root")
val (issue1, pullreq1) = generateNewPullRequest("user1/repo1/master1", "user2/repo2/master2", loginUser = "root")
val (issue3, pullreq3) = generateNewPullRequest("user3/repo3/master3", "user2/repo2/master2", loginUser = "root")
val (issue32, pullreq32) =
generateNewPullRequest("user3/repo3/master32", "user2/repo2/master2", loginUser = "root")
generateNewPullRequest("user2/repo2/master2", "user1/repo1/master2", loginUser = "root")
service.addWebHook("user1", "repo1", "webhook1-1", Set(WebHook.PullRequest), WebHookContentType.FORM, Some("key"))
service.addWebHook("user1", "repo1", "webhook1-2", Set(WebHook.PullRequest), WebHookContentType.FORM, Some("key"))
service.addWebHook("user2", "repo2", "webhook2-1", Set(WebHook.PullRequest), WebHookContentType.FORM, Some("key"))
service.addWebHook("user2", "repo2", "webhook2-2", Set(WebHook.PullRequest), WebHookContentType.FORM, Some("key"))
service.addWebHook("user3", "repo3", "webhook3-1", Set(WebHook.PullRequest), WebHookContentType.FORM, Some("key"))
service.addWebHook("user3", "repo3", "webhook3-2", Set(WebHook.PullRequest), WebHookContentType.FORM, Some("key"))
assert(service.getPullRequestsByRequestForWebhook("user1", "repo1", "master1") == Map.empty)
val r = service.getPullRequestsByRequestForWebhook("user2", "repo2", "master2").mapValues(_.map(_.url).toSet)
assert(r.size == 3)
assert(r((issue1, issueUser, pullreq1, user1, user2)) == Set("webhook1-1", "webhook1-2"))
assert(r((issue3, issueUser, pullreq3, user3, user2)) == Set("webhook3-1", "webhook3-2"))
assert(r((issue32, issueUser, pullreq32, user3, user2)) == Set("webhook3-1", "webhook3-2"))
// when closed, it not founds.
service.updateClosed("user1", "repo1", issue1.issueId, true)
val r2 = service.getPullRequestsByRequestForWebhook("user2", "repo2", "master2").mapValues(_.map(_.url).toSet)
assert(r2.size == 2)
assert(r2((issue3, issueUser, pullreq3, user3, user2)) == Set("webhook3-1", "webhook3-2"))
assert(r2((issue32, issueUser, pullreq32, user3, user2)) == Set("webhook3-1", "webhook3-2"))
}
}
test("add and get and update and delete") {
withTestDB { implicit session =>
val user1 = generateNewUserWithDBRepository("user1", "repo1")
val formType = WebHookContentType.FORM
val jsonType = WebHookContentType.JSON
service.addWebHook("user1", "repo1", "http://example.com", Set(WebHook.PullRequest), formType, Some("key"))
assert(
service.getWebHooks("user1", "repo1") == List(
(RepositoryWebHook("user1", "repo1", "http://example.com", formType, Some("key")), Set(WebHook.PullRequest))
)
)
assert(
service.getWebHook("user1", "repo1", "http://example.com") == Some(
(RepositoryWebHook("user1", "repo1", "http://example.com", formType, Some("key")), Set(WebHook.PullRequest))
)
)
assert(
service.getWebHooksByEvent("user1", "repo1", WebHook.PullRequest) == List(
(RepositoryWebHook("user1", "repo1", "http://example.com", formType, Some("key")))
)
)
assert(service.getWebHooksByEvent("user1", "repo1", WebHook.Push) == Nil)
assert(service.getWebHook("user1", "repo1", "http://example.com2") == None)
assert(service.getWebHook("user2", "repo1", "http://example.com") == None)
assert(service.getWebHook("user1", "repo2", "http://example.com") == None)
service.updateWebHook(
"user1",
"repo1",
"http://example.com",
Set(WebHook.Push, WebHook.Issues),
jsonType,
Some("key")
)
assert(
service.getWebHook("user1", "repo1", "http://example.com") == Some(
(
RepositoryWebHook("user1", "repo1", "http://example.com", jsonType, Some("key")),
Set(WebHook.Push, WebHook.Issues)
)
)
)
assert(service.getWebHooksByEvent("user1", "repo1", WebHook.PullRequest) == Nil)
assert(
service.getWebHooksByEvent("user1", "repo1", WebHook.Push) == List(
(RepositoryWebHook("user1", "repo1", "http://example.com", jsonType, Some("key")))
)
)
service.deleteWebHook("user1", "repo1", "http://example.com")
assert(service.getWebHook("user1", "repo1", "http://example.com") == None)
}
}
test("getWebHooks, getWebHooksByEvent") {
withTestDB { implicit session =>
val user1 = generateNewUserWithDBRepository("user1", "repo1")
val ctype = WebHookContentType.FORM
service.addWebHook("user1", "repo1", "http://example.com/1", Set(WebHook.PullRequest), ctype, Some("key"))
service.addWebHook("user1", "repo1", "http://example.com/2", Set(WebHook.Push), ctype, Some("key"))
service.addWebHook(
"user1",
"repo1",
"http://example.com/3",
Set(WebHook.PullRequest, WebHook.Push),
ctype,
Some("key")
)
assert(
service.getWebHooks("user1", "repo1") == List(
RepositoryWebHook("user1", "repo1", "http://example.com/1", ctype, Some("key")) -> Set(WebHook.PullRequest),
RepositoryWebHook("user1", "repo1", "http://example.com/2", ctype, Some("key")) -> Set(WebHook.Push),
RepositoryWebHook("user1", "repo1", "http://example.com/3", ctype, Some("key")) -> Set(
WebHook.PullRequest,
WebHook.Push
)
)
)
assert(
service.getWebHooksByEvent("user1", "repo1", WebHook.PullRequest) == List(
RepositoryWebHook("user1", "repo1", "http://example.com/1", ctype, Some("key")),
RepositoryWebHook("user1", "repo1", "http://example.com/3", ctype, Some("key"))
)
)
}
}
}
| McFoggy/gitbucket | src/test/scala/gitbucket/core/service/WebHookServiceSpec.scala | Scala | apache-2.0 | 6,683 |
package object junto {
protected[junto] def differenceNorm2Squared(length: Int)(
dist1: Seq[Double], dist2: Seq[Double]
) = {
var index = 0
var squaredDiffsSum = 0.0
while (index < length) {
val diff = dist1(index) - dist2(index)
squaredDiffsSum += diff * diff
index += 1
}
math.sqrt(squaredDiffsSum)
}
protected[junto] lazy val DUMMY_LABEL = "__DUMMY__"
}
| scalanlp/junto | src/main/scala/junto/package.scala | Scala | apache-2.0 | 410 |
/*
* @author Philip Stutz
*
* Copyright 2011 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.features
import org.specs2.mutable._
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import org.specs2.matcher.Matcher
import org.specs2.mock.Mockito
import com.signalcollect.interfaces._
import java.util.Map.Entry
import com.signalcollect._
import com.signalcollect.examples.PageRankVertex
import com.signalcollect.examples.PageRankEdge
import com.signalcollect.examples.SudokuCell
@RunWith(classOf[JUnitRunner])
class AggregationOperationsSpec extends SpecificationWithJUnit with Mockito {
"SumOfStates" should {
val graph = GraphBuilder.build
graph.addVertex(new PageRankVertex(1))
graph.addVertex(new PageRankVertex(2))
graph.addVertex(new SudokuCell(1, None))
graph.addEdge(1, new PageRankEdge(2))
graph.addEdge(2, new PageRankEdge(1))
graph.execute(ExecutionConfiguration.withSignalThreshold(0))
"sum all states correctly" in {
val sumOfStates = graph.aggregate(new SumOfStates[Double]).getOrElse(0.0)
(sumOfStates - 2.0) <= 0.0001
}
}
"ProductOfStates" should {
val graph = GraphBuilder.build
graph.addVertex(new PageRankVertex(1))
graph.addVertex(new PageRankVertex(2))
graph.addVertex(new SudokuCell(1, None))
graph.addEdge(1, new PageRankEdge(2))
graph.addEdge(2, new PageRankEdge(1))
graph.execute(ExecutionConfiguration.withSignalThreshold(0))
"multiply all states correctly" in {
val productOfStates = graph.aggregate(new ProductOfStates[Double]).getOrElse(0.0)
(productOfStates - 1.0) <= 0.0001
}
}
"CountVertices" should {
val graph = GraphBuilder.build
graph.addVertex(new PageRankVertex(1))
graph.addVertex(new PageRankVertex(2))
graph.addVertex(new PageRankVertex(3))
graph.addVertex(new SudokuCell(1, None))
graph.removeVertex(1)
"count the number of PageRank vertices correctly" in {
val numberOfPRVertices = graph.aggregate(new CountVertices[PageRankVertex])
(numberOfPRVertices - 2.0) <= 0.0001
}
"count the number of SudokuCell vertices correctly" in {
val numberOfSCVertices = graph.aggregate(new CountVertices[SudokuCell])
(numberOfSCVertices - 1.0) <= 0.0001
}
}
"SampleVertexIds" should {
val graph = GraphBuilder.build
val idSet = (1 to 1000).toSet
for (id <- idSet) {
graph.addVertex(new PageRankVertex(id))
}
"sample 0 vertex ids correctly" in {
val vertexSample = graph.aggregate(new SampleVertexIds(0))
vertexSample.size == 0
}
"sample 50 vertex ids correclty" in {
val vertexSample = graph.aggregate(new SampleVertexIds(50))
vertexSample.size == 50 && vertexSample.forall(id => idSet.contains(id.asInstanceOf[Int]))
}
"sample 50 vertex ids correclty" in {
val vertexSample = graph.aggregate(new SampleVertexIds(1000))
vertexSample.size == 1000 && vertexSample.forall(id => idSet.contains(id.asInstanceOf[Int]))
}
}
} | Tjoene/thesis | Case_Programs/signal-collect/src/test/scala/com/signalcollect/features/AggregationOperationsSpec.scala | Scala | gpl-2.0 | 3,889 |
// @SOURCE:/Users/TarcioMac/Development/PlayProjects/uk.aber.spam/conf/routes
// @HASH:e03a3112c60574d71b1dd4dda8f4a7a06eb4c740
// @DATE:Thu Aug 07 15:09:32 BST 2014
import Routes.{prefix => _prefix, defaultPrefix => _defaultPrefix}
import play.core._
import play.core.Router._
import play.core.Router.HandlerInvokerFactory._
import play.core.j._
import play.api.mvc._
import _root_.controllers.Assets.Asset
import _root_.play.libs.F
import Router.queryString
// @LINE:20
// @LINE:17
// @LINE:16
// @LINE:15
// @LINE:14
// @LINE:13
// @LINE:12
// @LINE:11
// @LINE:10
// @LINE:9
// @LINE:8
// @LINE:7
// @LINE:6
package controllers {
// @LINE:20
class ReverseAssets {
// @LINE:20
def at(file:String): Call = {
implicit val _rrc = new ReverseRouteContext(Map(("path", "/public")))
Call("GET", _prefix + { _defaultPrefix } + "assets/" + implicitly[PathBindable[String]].unbind("file", file))
}
}
// @LINE:9
// @LINE:7
class ReverseAdminController {
// @LINE:7
def arrangeMeeting(): Call = {
import ReverseRouteContext.empty
Call("POST", _prefix + { _defaultPrefix } + "studentReport")
}
// @LINE:9
def getJSON(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "json/admin")
}
}
// @LINE:13
// @LINE:10
class ReverseYearTutorController {
// @LINE:10
def getJSON(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "json/yt")
}
// @LINE:13
def updateOutcome(): Call = {
import ReverseRouteContext.empty
Call("POST", _prefix + { _defaultPrefix } + "ytList")
}
}
// @LINE:15
// @LINE:11
class ReverseDotController {
// @LINE:11
def getJSON(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "json/dot")
}
// @LINE:15
def updateOutcome(): Call = {
import ReverseRouteContext.empty
Call("POST", _prefix + { _defaultPrefix } + "dotList")
}
}
// @LINE:17
// @LINE:16
// @LINE:14
// @LINE:12
// @LINE:8
// @LINE:6
class ReverseApplication {
// @LINE:16
def dotList(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "dotList")
}
// @LINE:8
def studentReport(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "studentReport")
}
// @LINE:17
def summary(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "summary")
}
// @LINE:14
def ytList(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "ytList")
}
// @LINE:12
def jsonSummary(stud_uid:String, academic_year:String): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix + { _defaultPrefix } + "json/summary" + queryString(List(Some(implicitly[QueryStringBindable[String]].unbind("stud_uid", stud_uid)), Some(implicitly[QueryStringBindable[String]].unbind("academic_year", academic_year)))))
}
// @LINE:6
def index(): Call = {
import ReverseRouteContext.empty
Call("GET", _prefix)
}
}
}
// @LINE:20
// @LINE:17
// @LINE:16
// @LINE:15
// @LINE:14
// @LINE:13
// @LINE:12
// @LINE:11
// @LINE:10
// @LINE:9
// @LINE:8
// @LINE:7
// @LINE:6
package controllers.javascript {
import ReverseRouteContext.empty
// @LINE:20
class ReverseAssets {
// @LINE:20
def at : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Assets.at",
"""
function(file) {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "assets/" + (""" + implicitly[PathBindable[String]].javascriptUnbind + """)("file", file)})
}
"""
)
}
// @LINE:9
// @LINE:7
class ReverseAdminController {
// @LINE:7
def arrangeMeeting : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.AdminController.arrangeMeeting",
"""
function() {
return _wA({method:"POST", url:"""" + _prefix + { _defaultPrefix } + """" + "studentReport"})
}
"""
)
// @LINE:9
def getJSON : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.AdminController.getJSON",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "json/admin"})
}
"""
)
}
// @LINE:13
// @LINE:10
class ReverseYearTutorController {
// @LINE:10
def getJSON : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.YearTutorController.getJSON",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "json/yt"})
}
"""
)
// @LINE:13
def updateOutcome : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.YearTutorController.updateOutcome",
"""
function() {
return _wA({method:"POST", url:"""" + _prefix + { _defaultPrefix } + """" + "ytList"})
}
"""
)
}
// @LINE:15
// @LINE:11
class ReverseDotController {
// @LINE:11
def getJSON : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.DotController.getJSON",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "json/dot"})
}
"""
)
// @LINE:15
def updateOutcome : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.DotController.updateOutcome",
"""
function() {
return _wA({method:"POST", url:"""" + _prefix + { _defaultPrefix } + """" + "dotList"})
}
"""
)
}
// @LINE:17
// @LINE:16
// @LINE:14
// @LINE:12
// @LINE:8
// @LINE:6
class ReverseApplication {
// @LINE:16
def dotList : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.dotList",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "dotList"})
}
"""
)
// @LINE:8
def studentReport : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.studentReport",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "studentReport"})
}
"""
)
// @LINE:17
def summary : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.summary",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "summary"})
}
"""
)
// @LINE:14
def ytList : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.ytList",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "ytList"})
}
"""
)
// @LINE:12
def jsonSummary : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.jsonSummary",
"""
function(stud_uid,academic_year) {
return _wA({method:"GET", url:"""" + _prefix + { _defaultPrefix } + """" + "json/summary" + _qS([(""" + implicitly[QueryStringBindable[String]].javascriptUnbind + """)("stud_uid", stud_uid), (""" + implicitly[QueryStringBindable[String]].javascriptUnbind + """)("academic_year", academic_year)])})
}
"""
)
// @LINE:6
def index : JavascriptReverseRoute = JavascriptReverseRoute(
"controllers.Application.index",
"""
function() {
return _wA({method:"GET", url:"""" + _prefix + """"})
}
"""
)
}
}
// @LINE:20
// @LINE:17
// @LINE:16
// @LINE:15
// @LINE:14
// @LINE:13
// @LINE:12
// @LINE:11
// @LINE:10
// @LINE:9
// @LINE:8
// @LINE:7
// @LINE:6
package controllers.ref {
// @LINE:20
class ReverseAssets {
// @LINE:20
def at(path:String, file:String): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Assets.at(path, file), HandlerDef(this.getClass.getClassLoader, "", "controllers.Assets", "at", Seq(classOf[String], classOf[String]), "GET", """ Map static resources from the /public folder to the /assets URL path""", _prefix + """assets/$file<.+>""")
)
}
// @LINE:9
// @LINE:7
class ReverseAdminController {
// @LINE:7
def arrangeMeeting(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.AdminController.arrangeMeeting(), HandlerDef(this.getClass.getClassLoader, "", "controllers.AdminController", "arrangeMeeting", Seq(), "POST", """""", _prefix + """studentReport""")
)
// @LINE:9
def getJSON(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.AdminController.getJSON(), HandlerDef(this.getClass.getClassLoader, "", "controllers.AdminController", "getJSON", Seq(), "GET", """""", _prefix + """json/admin""")
)
}
// @LINE:13
// @LINE:10
class ReverseYearTutorController {
// @LINE:10
def getJSON(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.YearTutorController.getJSON(), HandlerDef(this.getClass.getClassLoader, "", "controllers.YearTutorController", "getJSON", Seq(), "GET", """""", _prefix + """json/yt""")
)
// @LINE:13
def updateOutcome(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.YearTutorController.updateOutcome(), HandlerDef(this.getClass.getClassLoader, "", "controllers.YearTutorController", "updateOutcome", Seq(), "POST", """""", _prefix + """ytList""")
)
}
// @LINE:15
// @LINE:11
class ReverseDotController {
// @LINE:11
def getJSON(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.DotController.getJSON(), HandlerDef(this.getClass.getClassLoader, "", "controllers.DotController", "getJSON", Seq(), "GET", """""", _prefix + """json/dot""")
)
// @LINE:15
def updateOutcome(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.DotController.updateOutcome(), HandlerDef(this.getClass.getClassLoader, "", "controllers.DotController", "updateOutcome", Seq(), "POST", """""", _prefix + """dotList""")
)
}
// @LINE:17
// @LINE:16
// @LINE:14
// @LINE:12
// @LINE:8
// @LINE:6
class ReverseApplication {
// @LINE:16
def dotList(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.dotList(), HandlerDef(this.getClass.getClassLoader, "", "controllers.Application", "dotList", Seq(), "GET", """""", _prefix + """dotList""")
)
// @LINE:8
def studentReport(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.studentReport(), HandlerDef(this.getClass.getClassLoader, "", "controllers.Application", "studentReport", Seq(), "GET", """""", _prefix + """studentReport""")
)
// @LINE:17
def summary(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.summary(), HandlerDef(this.getClass.getClassLoader, "", "controllers.Application", "summary", Seq(), "GET", """""", _prefix + """summary""")
)
// @LINE:14
def ytList(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.ytList(), HandlerDef(this.getClass.getClassLoader, "", "controllers.Application", "ytList", Seq(), "GET", """""", _prefix + """ytList""")
)
// @LINE:12
def jsonSummary(stud_uid:String, academic_year:String): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.jsonSummary(stud_uid, academic_year), HandlerDef(this.getClass.getClassLoader, "", "controllers.Application", "jsonSummary", Seq(classOf[String], classOf[String]), "GET", """""", _prefix + """json/summary""")
)
// @LINE:6
def index(): play.api.mvc.HandlerRef[_] = new play.api.mvc.HandlerRef(
controllers.Application.index(), HandlerDef(this.getClass.getClassLoader, "", "controllers.Application", "index", Seq(), "GET", """ Home page""", _prefix + """""")
)
}
}
| tmmachado/uk.aber.spam | target/scala-2.11/src_managed/main/routes_reverseRouting.scala | Scala | gpl-2.0 | 12,815 |
package provingground.translation
import provingground._
import translation._
import NlpProse._
//import provingground.ParseProse._
import java.io._
// import java.util.*;
import scala.jdk.CollectionConverters._
import edu.stanford.nlp.io._
import edu.stanford.nlp.ling._
import edu.stanford.nlp.pipeline._
import edu.stanford.nlp.trees._
import edu.stanford.nlp.util._
import edu.stanford.nlp.ling.CoreAnnotations._
import edu.stanford.nlp.semgraph._
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations._
/**
* direct interface to the Stanford parser; the [[StanfordParser]] object which handles LaTeX is used instead.
*/
object CoreNLP {
def gov(e: SemanticGraphEdge) =
Token(e.getGovernor().word, e.getGovernor().index)
def dep(e: SemanticGraphEdge) =
Token(e.getDependent().word, e.getDependent().index)
def depWord(short: String, specific: String) =
if (short == "prep") short + "_" + specific else short
def depType(e: SemanticGraphEdge) =
depWord(e.getRelation().getShortName(), e.getRelation.getSpecific())
def newPipe = {
val props = new java.util.Properties()
props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref")
new StanfordCoreNLP(props)
}
implicit lazy val pipe = newPipe
def annotatedDoc(text: String, pipe: StanfordCoreNLP) = {
val document = new Annotation(text)
pipe.annotate(document)
document
}
def sentences(document: Annotation): List[CoreMap] = {
(document.get(classOf[SentencesAnnotation])).asScala.toList
}
def depRelIterable(sentence: CoreMap) = {
val dependencies =
sentence.get(classOf[CollapsedCCProcessedDependenciesAnnotation]);
val dependencyIterable = dependencies.edgeIterable().asScala
dependencyIterable map
((e: SemanticGraphEdge) => DepRel(gov(e), dep(e), depType(e)))
}
def proseTrees(text: String)(implicit pipe: StanfordCoreNLP) = {
for (sentence <- sentences(annotatedDoc(text, pipe))) yield {
ProseTree(depRelIterable(sentence).toList)
}
}
def coreLabelList(sentence: CoreMap) = {
(sentence.get(classOf[TokensAnnotation])).asScala.toList
}
def word(token: CoreLabel) = token.get(classOf[TextAnnotation])
def pos(token: CoreLabel) = token.get(classOf[PartOfSpeechAnnotation])
def namedentity(token: CoreLabel) =
token.get(classOf[NamedEntityTagAnnotation])
}
// The part below is for testing.
object CoreNLPTest {
import provingground.translation.CoreNLP._
val props = new java.util.Properties()
props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref")
val pipeline = new StanfordCoreNLP(props)
// read some text in the text variable
val text = "Quick brown for jumps over the lazy dog"
// create an empty Annotation just with the given text
val document = new Annotation(text)
// run all Annotators on this text
pipeline.annotate(document)
// these are all the sentences in this document
// a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
val sentencesJava =
(document.get(classOf[CoreAnnotations.SentencesAnnotation]))
import scala.jdk.CollectionConverters._
val sentences: List[CoreMap] = sentencesJava.asScala.toList
val depTrees = for (sentence <- sentences) yield {
// traversing the words in the current sentence
// a CoreLabel is a CoreMap with additional token-specific methods
// for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
// this is the text of the token
// String word = token.get(TextAnnotation.class);
// this is the POS tag of the token
// String pos = token.get(PartOfSpeechAnnotation.class);
// this is the NER label of the token
// String ne = token.get(NamedEntityTagAnnotation.class);
// }
// this is the parse tree of the current sentence
// Tree tree = sentence.get(TreeAnnotation.class);
// this is the Stanford dependency graph of the current sentence
val dependencies =
sentence.get(classOf[CollapsedCCProcessedDependenciesAnnotation]);
val dependencyIterable = dependencies.edgeIterable().asScala
dependencyIterable map
((e: SemanticGraphEdge) => DepRel(gov(e), dep(e), depType(e)))
}
for (t <- depTrees; e <- t) println(e)
// This is the coreference link graph
// Each chain stores a set of mentions that link to each other,
// along with a method for getting the most representative mention
// Both sentence and token offsets start at 1!
// Map<Integer, CorefChain> graph =
// document.get(CorefChainAnnotation.class);
val tree = proseTrees(
"if a prime number p divides mn, p divides one of m and n").head
// println(toFormula(tree, Global))
}
| siddhartha-gadgil/ProvingGround | nlp/src/main/scala/provingground/nlp/CoreNLP.scala | Scala | mit | 4,777 |
package de.frosner.broccoli.models
import play.api.libs.json.{JsValue, Json}
case class InstanceCreation(templateId: String, parameters: Map[String, JsValue])
object InstanceCreation {
implicit val instanceCreationWrites = Json.writes[InstanceCreation]
implicit val instanceCreationReads = Json.reads[InstanceCreation]
}
| FRosner/cluster-broccoli | server/src/main/scala/de/frosner/broccoli/models/InstanceCreation.scala | Scala | apache-2.0 | 331 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime
import org.apache.flink.api.common.functions.{FlatJoinFunction, RichFlatJoinFunction}
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.table.codegen.Compiler
import org.apache.flink.configuration.Configuration
import org.apache.flink.util.Collector
import org.slf4j.LoggerFactory
class FlatJoinRunner[IN1, IN2, OUT](
name: String,
code: String,
@transient var returnType: TypeInformation[OUT])
extends RichFlatJoinFunction[IN1, IN2, OUT]
with ResultTypeQueryable[OUT]
with Compiler[FlatJoinFunction[IN1, IN2, OUT]] {
val LOG = LoggerFactory.getLogger(this.getClass)
private var function: FlatJoinFunction[IN1, IN2, OUT] = null
override def open(parameters: Configuration): Unit = {
LOG.debug(s"Compiling FlatJoinFunction: $name \\n\\n Code:\\n$code")
val clazz = compile(getRuntimeContext.getUserCodeClassLoader, name, code)
LOG.debug("Instantiating FlatJoinFunction.")
function = clazz.newInstance()
}
override def join(first: IN1, second: IN2, out: Collector[OUT]): Unit =
function.join(first, second, out)
override def getProducedType: TypeInformation[OUT] = returnType
}
| WangTaoTheTonic/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/FlatJoinRunner.scala | Scala | apache-2.0 | 2,073 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.registry.impl
import java.net.URI
import java.util.{ Collections, Map => JMap }
import com.lightbend.lagom.internal.javadsl.registry.ServiceRegistryService
import com.lightbend.lagom.javadsl.api.ServiceAcl
import javax.inject.Inject
import scala.collection.JavaConverters._
case class UnmanagedServices @Inject() (services: Map[String, ServiceRegistryService])
object UnmanagedServices {
def apply(services: JMap[String, String]): UnmanagedServices = {
val convertedServices = for ((name, url) <- services.asScala.toMap) yield {
name -> new ServiceRegistryService(new URI(url))
}
UnmanagedServices(convertedServices)
}
}
| rstento/lagom | dev/service-registry/service-locator/src/main/scala/com/lightbend/lagom/registry/impl/UnmanagedServices.scala | Scala | apache-2.0 | 754 |
import scala.annotation._
object Sublist {
@tailrec
private def checkForSuperOrSubLists[T](list1: List[T],
list2: List[T],
subOrSuperList: Int): Int = {
if (!list2.isEmpty && list2.length >= list1.length) {
if (list1 == list2.take(list1.length)) {
return subOrSuperList
}
} else {
return Unequal
}
checkForSuperOrSubLists(list1, list2.tail, subOrSuperList)
}
def sublist[T](list1: List[T], list2: List[T]): Int = {
list1.length compare list2.length match {
case x if x == 0 =>
if (list1 == list2) return Equal else return Unequal
case x if x < 0 => return checkForSuperOrSubLists(list1, list2, Sublist)
case _ => {
return checkForSuperOrSubLists(list2, list1, Superlist)
}
}
Unequal
}
final val Equal = 0
final val Unequal = 1
final val Superlist = 2
final val Sublist = 3
}
| stanciua/exercism | scala/sublist/src/main/scala/Sublist.scala | Scala | mit | 972 |
package services
import java.util.{Date, UUID}
import javax.inject.Inject
import dao.Schema
import geo.GeoDataLoader
import kidstravel.shared.Api
import kidstravel.shared.geo.{City, CityLabel, Country}
import kidstravel.shared.poi.Poi
import play.api.db.slick.DatabaseConfigProvider
import slick.driver.JdbcProfile
import slick.lifted.TableQuery
import scala.concurrent.Future
class ApiService @Inject()(
protected val dbConfigProvider: DatabaseConfigProvider,
protected val geoDataLoader: GeoDataLoader) extends Api {
import scala.concurrent.ExecutionContext.Implicits.global
val dbConfig = dbConfigProvider.get[JdbcProfile]
val db = dbConfig.db
import dbConfig.driver.api._
import Schema._
override def getCountries(): Seq[Country] = {
Nil
}
override def getPoi(id: Long): Future[Poi] =
db.run(pois.filter(_.id === id).result.head)
override def updatePoi(poi: Poi): Seq[Poi] = Nil
override def deletePoi(id: Long): Seq[Poi] = Nil
override def getCitiesByName(fragment: String): Future[Seq[CityLabel]] = {
val query = for {
city <- cities.filter(_.name.toLowerCase.startsWith(fragment.toLowerCase))
country <- countries if city.countryCode === country.code
} yield (city.id, city.name, country.name, city.subdivisionId)
db.run(query.take(10).result).map(_.map {
case (id, name, country, subdivisionId) => CityLabel(id, name, country, subdivisionId.map(_.toString))
})
}
override def getTopCities(): Future[Seq[City]] = {
val query = cities.sortBy(_.population.desc).take(10)
db.run(query.result)
}
override def getCity(cityId: Long): Future[City] =
db.run(cities.filter(_.id === cityId).result.head)
}
| devkat/kidstravel | server/src/main/scala/services/ApiService.scala | Scala | apache-2.0 | 1,710 |
package sttp.client3.opentracing
import io.opentracing.tag.Tags
import io.opentracing.{Span, SpanContext, Tracer}
import io.opentracing.propagation.Format
import io.opentracing.Tracer.SpanBuilder
import sttp.capabilities.Effect
import sttp.monad.MonadError
import sttp.monad.syntax._
import sttp.client3.{FollowRedirectsBackend, Request, RequestT, Response, SttpBackend}
import sttp.client3.opentracing.OpenTracingBackend._
import scala.collection.JavaConverters._
class OpenTracingBackend[F[_], P] private (delegate: SttpBackend[F, P], tracer: Tracer) extends SttpBackend[F, P] {
private implicit val _monad: MonadError[F] = responseMonad
type PE = P with Effect[F]
override def send[T, R >: PE](request: Request[T, R]): F[Response[T]] =
responseMonad
.eval {
val spanBuilderTransformer: SpanBuilderTransformer =
request
.tag(OpenTracingBackend.SpanBuilderTransformerRequestTag)
.collectFirst { case f: SpanBuilderTransformer =>
f
}
.getOrElse(identity)
val span = spanBuilderTransformer(
tracer
.buildSpan(
request
.tag(OpenTracingBackend.OperationIdRequestTag)
.getOrElse("default-operation-id")
.toString
)
).withTag(Tags.SPAN_KIND, Tags.SPAN_KIND_CLIENT)
.withTag(Tags.HTTP_METHOD, request.method.method)
.withTag(Tags.HTTP_URL, request.uri.toString)
.withTag(Tags.COMPONENT, "sttp3-client")
.start()
request
.tag(OpenTracingBackend.SpanTransformerRequestTag)
.collectFirst { case spanTranformer: SpanTransformer => spanTranformer(span) }
.getOrElse(span)
}
.flatMap { span =>
val requestBuilderAdapter = new RequestBuilderAdapter(request)
tracer.inject(span.context(), Format.Builtin.HTTP_HEADERS, new RequestBuilderCarrier(requestBuilderAdapter))
responseMonad.handleError(
delegate.send(requestBuilderAdapter.request).map { response =>
span
.setTag(Tags.HTTP_STATUS, Integer.valueOf(response.code.code))
.finish()
response
}
) { case e =>
span
.setTag(Tags.ERROR, java.lang.Boolean.TRUE)
.log(Map("event" -> Tags.ERROR.getKey, "error.object" -> e).asJava)
.finish()
responseMonad.error(e)
}
}
override def close(): F[Unit] = delegate.close()
override def responseMonad: MonadError[F] = delegate.responseMonad
}
object OpenTracingBackend {
private val OperationIdRequestTag = "io.opentracing.tag.sttp.operationId"
private val SpanBuilderTransformerRequestTag = "io.opentracing.tag.sttp.span.builder.transformer"
private val SpanTransformerRequestTag = "io.opentracing.tag.sttp.span.transformer"
type SpanBuilderTransformer = SpanBuilder => SpanBuilder
type SpanTransformer = Span => Span
implicit class RichRequest[U[_], T, R](request: RequestT[U, T, R]) {
def tagWithOperationId(operationId: String): RequestT[U, T, R] =
request.tag(OperationIdRequestTag, operationId)
def tagWithTransformSpan(transformSpan: SpanTransformer): RequestT[U, T, R] =
request.tag(SpanTransformerRequestTag, transformSpan)
/** Sets transformation of SpanBuilder used by OpenTracing backend to create Span this request execution. */
def tagWithTransformSpanBuilder(transformSpan: SpanBuilderTransformer): RequestT[U, T, R] =
request.tag(SpanBuilderTransformerRequestTag, transformSpan)
/** Sets parent Span for OpenTracing Span of this request execution. */
def setOpenTracingParentSpan(parent: Span): RequestT[U, T, R] =
tagWithTransformSpanBuilder(_.asChildOf(parent))
/** Sets parent SpanContext for OpenTracing Span of this request execution. */
def setOpenTracingParentSpanContext(parentSpanContext: SpanContext): RequestT[U, T, R] =
tagWithTransformSpanBuilder(_.asChildOf(parentSpanContext))
}
def apply[F[_], P](delegate: SttpBackend[F, P], tracer: Tracer): SttpBackend[F, P] = {
new FollowRedirectsBackend[F, P](new OpenTracingBackend(delegate, tracer))
}
}
| softwaremill/sttp | metrics/open-tracing-backend/src/main/scala/sttp/client3/opentracing/OpenTracingBackend.scala | Scala | apache-2.0 | 4,227 |
object SubD
| typesafehub/sbteclipse | src/sbt-test/sbteclipse/04-dependency-options/subd/src/main/scala/SubD.scala | Scala | apache-2.0 | 12 |
package js.hw4
import scala.util.parsing.input.Positional
import js.util._
object ast {
sealed abstract class Expr extends Positional {
// pretty print as AST
override def toString(): String = print.prettyAST(this)
// pretty print as JS expression
def prettyJS(): String = print.prettyJS(this)
// pretty print as value
def prettyVal(): String = print.prettyVal(this)
}
/* Variables */
case class Var(x: String) extends Expr
/* Declarations */
case class ConstDecl(x: String, e1: Expr, e2: Expr) extends Expr
/* Literals and Values*/
case class Num(n: Double) extends Expr
case class Bool(b: Boolean) extends Expr
case class Str(s: String) extends Expr
case object Undefined extends Expr
/* Unary and Binary Operators */
case class UnOp(uop: Uop, e1: Expr) extends Expr
case class BinOp(bop: Bop, e1: Expr, e2: Expr) extends Expr
sealed abstract class Uop
case object UMinus extends Uop /* - */
case object Not extends Uop /* ! */
sealed abstract class Bop
case object Plus extends Bop /* + */
case object Minus extends Bop /* - */
case object Times extends Bop /* * */
case object Div extends Bop /* / */
case object Eq extends Bop /* === */
case object Ne extends Bop /* !== */
case object Lt extends Bop /* < */
case object Le extends Bop /* <= */
case object Gt extends Bop /* > */
case object Ge extends Bop /* >= */
case object And extends Bop /* && */
case object Or extends Bop /* || */
case object Seq extends Bop /* , */
/* Control constructs */
case class If(e1: Expr, e2: Expr, e3: Expr) extends Expr
/* I/O */
case class Print(e1: Expr) extends Expr
/* Functions */
case class Function(p: Option[String], xs: List[(String, Typ)], tann: Option[Typ], e1: Expr) extends Expr
case class Call(e1: Expr, es: List[Expr]) extends Expr
/* Objects */
case class Obj(fs: Map[String, Expr]) extends Expr
case class GetField(e1: Expr, f: String) extends Expr
/* Types */
sealed abstract class Typ {
// pretty print as AST
override def toString(): String = print.prettyAST(this)
// pretty print as JS expression
def pretty(): String = print.prettyTyp(this)
}
case object TNumber extends Typ
case object TBool extends Typ
case object TString extends Typ
case object TUndefined extends Typ
case class TFunction(txs: List[(String,Typ)], tret: Typ) extends Typ
case class TObj(tfs: Map[String, Typ]) extends Typ
/* Convenience function for making Seq constructors */
def mkSeq(e1: Expr, e2: Expr): Expr =
(e1, e2) match {
case (Undefined, _) => e2
case (_, Undefined) => e1
case _ => BinOp(Seq, e1, e2)
}
/* Define values. */
def isValue(e: Expr): Boolean = e match {
case Num(_) | Bool(_) | Str(_) | Function(_, _, _, _) | Undefined => true
case Obj(fs) => fs forall { case (_, ei) => isValue(ei) }
case _ => false
}
/* Define statements (used for pretty printing). */
def isStmt(e: Expr): Boolean = e match {
case ConstDecl(_, _, _) | Print(_) => true
case BinOp(Seq, _, e2) => isStmt(e2)
case _ => false
}
/* Get the free variables of e. */
def fv(e: Expr): Set[String] = e match {
case Var(x) => Set(x)
case ConstDecl(x, e1, e2) => fv(e1) | (fv(e2) - x)
case Function(p, xs, _, e1) => fv(e1) -- (xs map (_._1)) -- p
case Num(_) | Bool(_) | Undefined | Str(_) => Set.empty
case UnOp(_, e1) => fv(e1)
case BinOp(_, e1, e2) => fv(e1) | fv(e2)
case If (e1, e2, e3) => fv(e1) | fv(e2) | fv(e3)
case Call(e1, es) => fv(e1) | (es.toSet flatMap fv)
case Print(e1) => fv(e1)
case Obj(fs) =>
fs.foldLeft(Set.empty: Set[String]){ (acc: Set[String], p: (String, Expr)) => acc | fv(p._2) }
case GetField(e1, _) => fv(e1)
}
/* Check whether the given expression is closed. */
def closed(e: Expr): Boolean = fv(e).isEmpty
/*
* Dynamic Type Error exception. Throw this exception to signal a dynamic type error.
*
* throw DynamicTypeError(e)
*
*/
case class DynamicTypeError(e: Expr) extends JsException("Type Error", e.pos)
/*
* Static Type Error exception. Throw this exception to signal a static
* type error.
*
* throw StaticTypeError(tbad, esub, e)
*
*/
case class StaticTypeError(tbad: Typ, e: Expr) extends
JsException("Type Error: " + "invalid type " + tbad.pretty(), e.pos)
/*
* Stuck Type Error exception. Throw this exception to signal getting
* stuck in evaluation. This exception should not get raised if
* evaluating a well-typed expression.
*
* throw StuckError(e)
*
*/
case class StuckError(e: Expr) extends JsException("stuck while evaluating expression", e.pos)
} | mpgarate/ProgLang-Assignments | HW4/HW4/src/main/scala/js/hw4/ast.scala | Scala | mit | 4,788 |
import sbt._
class ThereminProject(info: ProjectInfo) extends DefaultProject(info) {
// repositories to use
val scalaToolsReleases = ScalaToolsReleases
// dependencies
val scalatest = "org.scalatest" % "scalatest" % "1.2" % "test"
// test listeners
def junitXmlListener: TestReportListener = new eu.henkelmann.sbt.JUnitXmlTestsListener(outputPath.toString)
override def testListeners: Seq[TestReportListener] = super.testListeners ++ Seq(junitXmlListener)
}
| mmakowski/theremin | project/build/ThereminProject.scala | Scala | bsd-2-clause | 492 |
/**
* Copyright 2010-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package test
import org.scalatest._
import play.api.test._
import play.api.test.Helpers._
class ExampleUnitSpec extends UnitSpec {
"Application" should {
"send 404 on a bad request" in running(app) {
status(route(app, FakeRequest(GET, "/boum")).get) shouldBe NOT_FOUND
}
"render the index page" in running(app) {
val home = route(app, FakeRequest(GET, "/")).get
status(home) shouldBe OK
contentType(home).value shouldBe "text/html"
contentAsString(home) should include ("Scalactic")
}
}
}
| scalatest/scalactic-website | test/ExampleUnitSpec.scala | Scala | apache-2.0 | 1,169 |
package physical.habitat
import org.locationtech.jts.geom.{Geometry, MultiPolygon}
import locals.{Constants, HabitatType, Reef, Other, RockyIntertidal, Ocean}
import org.opengis.feature.simple.SimpleFeature
object SimpleFeatureAdaptor {
def getGeometry(sf: SimpleFeature) : Geometry = sf.getAttribute(Constants.ShapeAttribute.Geometry._1).asInstanceOf[Geometry]
def getMultiPolygon(sf: SimpleFeature): MultiPolygon = sf.getAttribute(Constants.ShapeAttribute.Geometry._1).asInstanceOf[MultiPolygon]
def getId(sf: SimpleFeature): Int = sf.getAttribute(Constants.ShapeAttribute.Patch._2).asInstanceOf[Int]
def getHabitatType(sf: SimpleFeature): HabitatType = {
sf.getAttribute(Constants.ShapeAttribute.Habitat._2).toString match {
case "Reef" => Reef
case "Other" => Other
case "Rocky intertidal" => RockyIntertidal
case _ => Ocean
}
}
}
| shawes/zissou | src/main/scala/physical/habitat/SimpleFeatureAdaptor.scala | Scala | mit | 883 |
package org.cloudfun.framework.storage.mongodb
import com.osinka.mongodb.Serializer
import org.cloudfun.framework.storage.Storable
import com.mongodb.{BasicDBList, BasicDBObject, DBObject}
import org.cloudfun.framework.data.{MutableData, Data}
import scala.collection.JavaConversions._
import org.bson.types.ObjectId
import org.scalaprops.Property
import org.cloudfun.framework.component.{ComponentService, ComponentType}
/**
* Serializer for converting Storable objects to MongoDB format and back.
*/
class MongoSerializer(componentService: ComponentService) extends Serializer[Storable] {
private val CF_TYPE = "_cf_type"
def in(obj: Storable): DBObject = {
val doc = dataToObj(obj)
val componentType: Symbol = componentService.getComponentType(obj) match {
case None => throw new IllegalStateException("Can not store an object of type " + obj.getClass.getName())
case Some(ct: ComponentType[_]) => ct.name
}
doc.put(CF_TYPE, componentType.name)
doc
}
def out(dbo: DBObject): scala.Option[Storable] = {
if (dbo == null) None
else {
if (!dbo.containsField(CF_TYPE)) throw new IllegalStateException("Can not unserialize object, it has no type field ("+CF_TYPE+").")
// Get type
val kind = Symbol(dbo.get(CF_TYPE).toString)
// Get data
val properties= objToMap(dbo)
// Build component
componentService.createComponent(kind, properties, true)
}
}
private def dataToObj(data: Data): DBObject = {
val doc = new BasicDBObject()
data.properties.elements foreach ( (e: (Symbol, Property[_])) => {
var value = e._2.get
if (value.isInstanceOf[MongoRef[_]]) value = value.asInstanceOf[MongoRef[_]].id
if (value.isInstanceOf[Data]) value = dataToObj(value.asInstanceOf[Data])
if (value.isInstanceOf[List[_]]) value = listToObj(value.asInstanceOf[List[_]])
if (value.isInstanceOf[Set[_]]) value = setToObj(value.asInstanceOf[Set[_]])
doc.put(e._1.name, value)
})
doc
}
private def listToObj(list: List[_]): BasicDBList = {
val doc = new BasicDBList()
list foreach (e => doc.add(e.asInstanceOf[Object]))
doc
}
private def setToObj(set: Set[_]): BasicDBList = {
val doc = new BasicDBList()
set foreach (e => doc.add(e.asInstanceOf[Object]))
doc
}
private def objToData(obj: DBObject): Data= {
val data = new MutableData()
obj.toMap.elements foreach ( (e: (_, _)) => {
var value = e._2
if (value.isInstanceOf[BasicDBList]) value = objToList(value.asInstanceOf[BasicDBList])
if (value.isInstanceOf[DBObject]) value = objToData(value.asInstanceOf[DBObject])
if (value.isInstanceOf[ObjectId]) value = MongoRef[Storable](value.asInstanceOf[ObjectId])
data.put(Symbol(e._1.toString), value.asInstanceOf[Object])
})
data
}
private def objToMap(obj: DBObject): Map[Symbol, AnyRef] = {
var map = Map[Symbol, AnyRef]()
obj.toMap.elements foreach ( (e: (_, _)) => {
val key: Symbol = Symbol(e._1.toString)
var value: AnyRef = e._2.asInstanceOf[AnyRef]
if (value.isInstanceOf[BasicDBList]) value = objToList(value.asInstanceOf[BasicDBList])
if (value.isInstanceOf[DBObject]) value = objToData(value.asInstanceOf[DBObject])
if (value.isInstanceOf[ObjectId]) value = MongoRef[Storable](value.asInstanceOf[ObjectId])
map += (key -> value)
})
map
}
private def objToList(list: BasicDBList): List[_] = {
list.elements.toList
}
}
| zzorn/cloudfun | src/main/scala/org/cloudfun/framework/storage/mongodb/MongoSerializer.scala | Scala | lgpl-3.0 | 3,517 |
package io.circe
import cats.data.{ NonEmptyList, Validated, Xor }
import cats.laws.discipline.arbitrary._
import cats.laws.discipline.eq._
import io.circe.test.{ CodecTests, CirceSuite }
import org.scalacheck.Prop.forAll
class AnyValCodecTests extends CirceSuite {
checkAll("Codec[Unit]", CodecTests[Unit].codec)
checkAll("Codec[Boolean]", CodecTests[Boolean].codec)
checkAll("Codec[Char]", CodecTests[Char].codec)
checkAll("Codec[Float]", CodecTests[Float].codec)
checkAll("Codec[Double]", CodecTests[Double].codec)
checkAll("Codec[Byte]", CodecTests[Byte].codec)
checkAll("Codec[Short]", CodecTests[Short].codec)
checkAll("Codec[Int]", CodecTests[Int].codec)
checkAll("Codec[Long]", CodecTests[Long].codec)
}
class StdLibCodecTests extends CirceSuite {
checkAll("Codec[String]", CodecTests[String].codec)
checkAll("Codec[BigInt]", CodecTests[BigInt].codec)
checkAll("Codec[BigDecimal]", CodecTests[BigDecimal].codec)
checkAll("Codec[Option[Int]]", CodecTests[Option[Int]].codec)
checkAll("Codec[List[Int]]", CodecTests[List[Int]].codec)
checkAll("Codec[Map[String, Int]]", CodecTests[Map[String, Int]].codec)
checkAll("Codec[Set[Int]]", CodecTests[Set[Int]].codec)
checkAll("Codec[Tuple1[Int]]", CodecTests[Tuple1[Int]].codec)
checkAll("Codec[(Int, String)]", CodecTests[(Int, String)].codec)
checkAll("Codec[(Int, Int, String)]", CodecTests[(Int, Int, String)].codec)
test("Tuples should be encoded as JSON arrays") {
check {
forAll { (t: (Int, String, Char)) =>
val json = Encoder[(Int, String, Char)].apply(t)
val target = Json.array(Json.int(t._1), Json.string(t._2), Encoder[Char].apply(t._3))
json === target && json.as[(Int, String, Char)] === Xor.right(t)
}
}
}
test("Decoding a JSON array without enough elements into a tuple should fail") {
check {
forAll { (i: Int, s: String) =>
Json.array(Json.int(i), Json.string(s)).as[(Int, String, Double)].isLeft
}
}
}
test("Decoding a JSON array with too many elements into a tuple should fail") {
check {
forAll { (i: Int, s: String, d: Double) =>
Json.array(Json.int(i), Json.string(s), Json.numberOrNull(d)).as[(Int, String)].isLeft
}
}
}
}
class CatsCodecTests extends CirceSuite {
checkAll("Codec[NonEmptyList[Int]]", CodecTests[NonEmptyList[Int]].codec)
}
class CirceCodecTests extends CirceSuite {
checkAll("Codec[Json]", CodecTests[Json].codec)
}
class DisjunctionCodecTests extends CirceSuite {
import disjunctionCodecs._
checkAll("Codec[Xor[Int, String]]", CodecTests[Xor[Int, String]].codec)
checkAll("Codec[Either[Int, String]]", CodecTests[Either[Int, String]].codec)
checkAll("Codec[Validated[String, Int]]", CodecTests[Validated[String, Int]].codec)
}
class DecodingFailureTests extends CirceSuite {
val n = Json.int(10)
val b = Json.True
val s = Json.string("foo")
val l = Json.array(s)
val o = Json.obj("foo" -> n)
val nd = Decoder[Int]
val bd = Decoder[Boolean]
val sd = Decoder[String]
val ld = Decoder[List[String]]
val od = Decoder[Map[String, Int]]
test("Decoding a JSON number as anything else should fail") {
assert(List(bd, sd, ld, od).forall(d => d.decodeJson(n).isLeft))
}
test("Decoding a JSON boolean as anything else should fail") {
assert(List(nd, sd, ld, od).forall(d => d.decodeJson(b).isLeft))
}
test("Decoding a JSON string as anything else should fail") {
assert(List(nd, bd, ld, od).forall(d => d.decodeJson(s).isLeft))
}
test("Decoding a JSON array as anything else should fail") {
assert(List(nd, bd, sd, od).forall(d => d.decodeJson(l).isLeft))
}
test("Decoding a JSON object as anything else should fail") {
assert(List(nd, bd, sd, ld).forall(d => d.decodeJson(o).isLeft))
}
}
| groz/circe | core/shared/src/test/scala/io/circe/CodecTests.scala | Scala | apache-2.0 | 3,813 |
package mesosphere.marathon.integration
import mesosphere.marathon.integration.setup._
import org.scalatest.{ GivenWhenThen, Matchers }
import play.api.libs.json.Json
class InfoIntegrationTest extends IntegrationFunSuite with SingleMarathonIntegrationTest with GivenWhenThen with Matchers {
test("v2/info returns the right values") {
When("fetching the info")
val response = marathon.info
Then("the response should be successful")
response.code should be (200)
val info = response.entityJson
And("the http port should be correct")
info \\ "http_config" \\ "http_port" should be (Json.toJson(config.marathonBasePort))
And("the ZooKeeper info should be correct")
info \\ "zookeeper_config" \\ "zk" should be (Json.toJson(config.zk))
And("the mesos master information should be correct")
info \\ "marathon_config" \\ "master" should be (Json.toJson(config.master))
And("the request should always be answered by the leader")
info \\ "elected" should be (Json.toJson(true))
And("the leader value in the JSON should match the one in the HTTP headers")
val headerLeader =
response.originalResponse.headers.find(_.name.equals("X-Marathon-Leader")).get.value.replace("http://", "")
info \\ "leader" should be (Json.toJson(headerLeader))
And("the leader should match the value returned by /v2/leader")
info \\ "leader" should be (Json.toJson(marathon.leader.value.leader))
}
}
| EasonYi/marathon | src/test/scala/mesosphere/marathon/integration/InfoIntegrationTest.scala | Scala | apache-2.0 | 1,450 |
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration.FiniteDuration
import reactivemongo.api.{
Cursor,
CursorFlattener,
CursorProducer,
WrappedCursor
}
import reactivemongo.api.TestCompat._
trait Cursor1Spec { spec: CursorSpec =>
def group1 = {
val nDocs = 16517
s"insert $nDocs records" in {
def insert(rem: Int, bulks: Seq[Future[Unit]]): Future[Unit] = {
if (rem == 0) {
Future.sequence(bulks).map(_ => {})
} else {
val len = if (rem < 256) rem else 256
def prepared = nDocs - rem
def bulk = coll.insert(false).many(
for (i <- 0 until len) yield {
val n = i + prepared
BSONDocument("i" -> n, "record" -> s"record$n")
}).map(_ => {})
insert(rem - len, bulk +: bulks)
}
}
insert(nDocs, Seq.empty).map { _ =>
info(s"inserted $nDocs records")
} aka "fixtures" must beEqualTo({}).await(1, timeout)
}
"request for cursor query" in {
import reactivemongo.core.protocol.{ Response, Reply }
import reactivemongo.api.tests.{ makeRequest => req, nextResponse }
def cursor(batchSize: Int = 0) =
coll.find(matchAll("makeReq1")).batchSize(batchSize).cursor()
req(cursor(nDocs + 1), nDocs + 1) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #1" must_== 0 and {
from must_== 0 and (ret aka "returned" must_== nDocs)
}
}.await(1, timeout) and {
req(cursor(nDocs), 1) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #2" must_== 0 and {
from must_== 0 and (ret must_== 1)
}
}.await(1, timeout)
} and {
req(cursor(128), Int.MaxValue) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #3" must not(beEqualTo(0)) and {
from must_== 0 and (ret must_== 128)
}
}.await(1, timeout)
} and {
req(cursor(), 10) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #4" must_== 0 and {
from must_== 0 and (ret must_== 10)
}
}.await(1, timeout)
} and {
req(cursor(), 101) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #5" must_== 0 and {
from must_== 0 and (ret must_== 101 /* default batch size */ )
}
}.await(1, timeout)
} and {
req(cursor(), Int.MaxValue /* unlimited */ ) must beLike[Response] {
case Response(_, Reply(_, id, from, ret), _, _) =>
id aka "cursor ID #6" must not(beEqualTo(0)) and {
from must_== 0 and (ret must_== 101 /* default batch size */ )
}
}.await(1, timeout)
} and {
val batchSize = 128
val max = (batchSize * 2) - 1
val cur = cursor(batchSize)
@volatile var r1: Response = null // Workaround to avoid nesting .await
req(cur, max) must beLike[Response] {
case r @ Response(_, Reply(_, id1, from1, ret1), _, _) =>
id1 aka "cursor ID #7a" must not(beEqualTo(0)) and {
from1 must_== 0 and (ret1 must_== batchSize)
} and {
r1 = r
r1 aka "r1" must not(beNull)
}
}.await(1, timeout) and {
nextResponse(cur, max)(ee.ec, r1) must beSome[Response].like {
case r2 @ Response(_, Reply(_, id2, from2, ret2), _, _) =>
id2 aka "cursor ID #7b" must_== 0 and {
from2 aka "from #7b" must_== 128
} and {
ret2 must_== (batchSize - 1)
} and {
nextResponse(cur, 1)(ee.ec, r2) must beNone.await(1, timeout)
}
}.await(1, timeout)
}
}
}
{ // headOption
def headOptionSpec(c: BSONCollection, timeout: FiniteDuration) = {
"find first document when matching" in {
c.find(matchAll("headOption1")).cursor().
headOption must beSome[BSONDocument].await(1, timeout)
}
"find first document when not matching" in {
c.find(BSONDocument("i" -> -1)).cursor().
headOption must beNone.await(1, timeout)
}
}
"with the default connection" >> {
headOptionSpec(coll, timeout)
}
"with the slow connection" >> {
headOptionSpec(slowColl, slowTimeout)
}
}
// head
"find first document when matching" in {
coll.find(matchAll("head1") ++ ("i" -> 0)).cursor[BSONDocument]().head.
map(_ -- "_id") must beEqualTo(BSONDocument(
"i" -> 0, "record" -> "record0")).await(1, timeout)
}
"find first document when not matching" in {
Await.result(
coll.find(BSONDocument("i" -> -1)).cursor().head,
timeout) must throwA[Cursor.NoSuchResultException.type]
}
"read one option document with success" in {
coll.find(matchAll("one1")).one[BSONDocument].
aka("findOne") must beSome[BSONDocument].await(0, timeout)
}
"read one document with success" in {
coll.find(matchAll("one2") ++ ("i" -> 1)).requireOne[BSONDocument].
map(_ -- "_id") must beEqualTo(BSONDocument(
"i" -> 1, "record" -> "record1")).await(0, timeout)
}
"collect with limited maxDocs" in {
val max = (nDocs / 8).toInt
coll.find(matchAll("collectLimit")).batchSize(997).cursor().
collect[List](max, Cursor.FailOnError[List[BSONDocument]]()).
aka("documents") must haveSize[List[BSONDocument]](max).
await(1, timeout)
}
def foldSpec1(c: BSONCollection, timeout: FiniteDuration) = {
"get 10 first docs" in {
c.find(matchAll("cursorspec1")).cursor().
collect[List](10, Cursor.FailOnError[List[BSONDocument]]()).
map(_.size) aka "result size" must beEqualTo(10).await(1, timeout)
}
{ // .fold
"fold all the documents" in {
c.find(matchAll("cursorspec2a")).batchSize(2096).cursor().fold(0)(
{ (st, _) => debug(s"fold: $st"); st + 1 }) aka "result size" must beEqualTo(16517).await(1, timeout) and {
c.find(matchAll("cursorspec2b")).
batchSize(2096).cursor().fold(0, -1)(
{ (st, _) => st + 1 }) aka "result size" must beEqualTo(16517).await(1, timeout)
}
}
"fold only 1024 documents" in {
c.find(matchAll("cursorspec3")).batchSize(256).cursor().
fold(0, 1024)((st, _) => st + 1).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
{ // .foldWhile
"fold while all the documents" in {
c.find(matchAll("cursorspec4a")).
batchSize(2096).cursor().foldWhile(0)(
{ (st, _) => debug(s"foldWhile: $st"); Cursor.Cont(st + 1) }).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"fold while only 1024 documents" in {
c.find(matchAll("cursorspec5a")).batchSize(256).
cursor().foldWhile(0, 1024)(
(st, _) => Cursor.Cont(st + 1)).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
"fold while successfully with async function" >> {
"all the documents" in {
coll.find(matchAll("cursorspec4b")).
batchSize(2096).cursor().foldWhileM(0)(
(st, _) => Future.successful(Cursor.Cont(st + 1))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"only 1024 documents" in {
coll.find(matchAll("cursorspec5b")).
batchSize(256).cursor().foldWhileM(0, 1024)(
(st, _) => Future.successful(Cursor.Cont(st + 1))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
}
{ // .foldBulk
"fold the bulks for all the documents" in {
c.find(matchAll("cursorspec6a")).
batchSize(2096).cursor().foldBulks(0)({ (st, bulk) =>
debug(s"foldBulk: $st")
Cursor.Cont(st + bulk.size)
}) aka "result size" must beEqualTo(16517).await(1, timeout)
}
"fold the bulks for 1024 documents" in {
c.find(matchAll("cursorspec7a")).
batchSize(256).cursor().foldBulks(0, 1024)(
(st, bulk) => Cursor.Cont(st + bulk.size)).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
"fold the bulks with async function" >> {
"for all the documents" in {
coll.find(matchAll("cursorspec6b")).
batchSize(2096).cursor().foldBulksM(0)(
(st, bulk) => Future.successful(Cursor.Cont(st + bulk.size))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"for 1024 documents" in {
coll.find(matchAll("cursorspec7b")).
batchSize(256).cursor().foldBulksM(0, 1024)(
(st, bulk) => Future.successful(Cursor.Cont(st + bulk.size))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
}
{ // .foldResponse
"fold the responses for all the documents" in {
c.find(matchAll("cursorspec8a")).
batchSize(2096).cursor().foldResponses(0)({ (st, resp) =>
debug(s"foldResponses: $st")
Cursor.Cont(st + resp.reply.numberReturned)
}) aka "result size" must beEqualTo(16517).await(1, timeout)
}
"fold the responses for 1024 documents" in {
c.find(matchAll("cursorspec9a")).
batchSize(2056).cursor().foldResponses(0, 1024)(
(st, resp) => Cursor.Cont(st + resp.reply.numberReturned)).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
"fold the responses with async function" >> {
"for all the documents" in {
coll.find(matchAll("cursorspec8b")).
batchSize(2096).cursor().foldResponsesM(0)((st, resp) =>
Future.successful(Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"for 1024 documents" in {
coll.find(matchAll("cursorspec9b")).batchSize(256).cursor().
foldResponsesM(0, 1024)(
(st, resp) => Future.successful(
Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
}
}
"with the default connection" >> {
foldSpec1(coll, timeout)
}
"with the slow connection" >> {
foldSpec1(slowColl, slowTimeout * 2L)
}
"fold the responses with async function" >> {
"for all the documents" in {
coll.find(matchAll("cursorspec8")).
batchSize(2096).cursor().foldResponsesM(0)((st, resp) =>
Future.successful(Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(16517).await(1, timeout)
}
"for 1024 documents" in {
coll.find(matchAll("cursorspec9")).batchSize(256).cursor().
foldResponsesM(0, 1024)((st, resp) =>
Future.successful(Cursor.Cont(st + resp.reply.numberReturned))).
aka("result size") must beEqualTo(1024).await(1, timeout)
}
}
"produce a custom cursor for the results" in {
implicit def fooProducer[T] = new CursorProducer[T] {
type ProducedCursor = FooCursor[T]
def produce(base: Cursor.WithOps[T]): ProducedCursor =
new DefaultFooCursor(base)
}
implicit object fooFlattener extends CursorFlattener[FooCursor] {
type Flattened[T] = FooCursor[T]
def flatten[T](future: Future[FooCursor[T]]) =
new FlattenedFooCursor(future)
}
val cursor = coll.find(matchAll("cursorspec10")).cursor[BSONDocument]()
cursor.foo must_== "Bar" and {
Cursor.flatten(Future.successful(cursor)).foo must_=== "raB"
} and {
val extCursor: FooExtCursor[BSONDocument] = new DefaultFooCursor(cursor)
// Check resolution as super type (FooExtCursor <: FooCursor)
val flattened = Cursor.flatten[BSONDocument, FooCursor](
Future.successful[FooExtCursor[BSONDocument]](extCursor))
flattened must beAnInstanceOf[FooCursor[BSONDocument]] and {
flattened must not(beAnInstanceOf[FooExtCursor[BSONDocument]])
} and {
flattened.foo must_=== "raB"
}
}
}
}
// ---
private sealed trait FooCursor[T] extends Cursor[T] { def foo: String }
private sealed trait FooExtCursor[T] extends FooCursor[T]
private class DefaultFooCursor[T](val wrappee: Cursor[T])
extends FooExtCursor[T] with WrappedCursor[T] {
val foo = "Bar"
}
private class FlattenedFooCursor[T](cursor: Future[FooCursor[T]])
extends reactivemongo.api.FlattenedCursor[T](cursor) with FooCursor[T] {
val foo = "raB"
}
}
| ornicar/ReactiveMongo | driver/src/test/scala/Cursor1Spec.scala | Scala | apache-2.0 | 13,421 |
/*******************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
******************************************************************************/
package binaryTree.P64
import binaryTree.Tree
class sol02 {
def layoutBinaryTree[T](t: Tree[T]): Tree[T] =
t.layoutBinaryTree
}
| GuillaumeDD/scala99problems | src/main/scala/binaryTree/P64/sol02.scala | Scala | gpl-3.0 | 771 |
/*
* Copyright 2017 Exon IT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package by.exonit.redmine.client.play25ws
import monix.execution.Scheduler
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
trait BasicSpec extends WordSpec with Assertions with Matchers with ScalaFutures with Inside
with OptionValues {
override implicit def patienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(15, Millis))
val jsonContentType = "application/json"
implicit val scheduler: Scheduler = Scheduler.global
}
| exon-it/redmine-scala-client | client-play25-ws/src/test/scala/by/exonit/redmine/client/play25ws/BasicSpec.scala | Scala | apache-2.0 | 1,114 |
package slick.memory
import java.util.concurrent.atomic.AtomicLong
import com.typesafe.config.Config
import org.reactivestreams.Subscriber
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import scala.concurrent.{Future, ExecutionContext}
import slick.SlickException
import slick.ast._
import slick.lifted.{PrimaryKey, Constraint, Index}
import slick.relational.{RelationalProfile, RelationalBackend}
import slick.util.Logging
/** A simple database engine that stores data in heap data structures. */
trait HeapBackend extends RelationalBackend with Logging {
type This = HeapBackend
type Database = DatabaseDef
type Session = SessionDef
type DatabaseFactory = DatabaseFactoryDef
type Context = BasicActionContext
type StreamingContext = BasicStreamingActionContext
val Database = new DatabaseFactoryDef
val backend: HeapBackend = this
def createDatabase(config: Config, path: String): Database = Database.apply(ExecutionContext.global)
class DatabaseDef(protected val synchronousExecutionContext: ExecutionContext) extends super.DatabaseDef {
protected[this] def createDatabaseActionContext[T](_useSameThread: Boolean): Context =
new BasicActionContext { val useSameThread = _useSameThread }
protected[this] def createStreamingDatabaseActionContext[T](s: Subscriber[_ >: T], useSameThread: Boolean): StreamingContext =
new BasicStreamingActionContext(s, useSameThread, DatabaseDef.this)
protected val tables = new HashMap[String, HeapTable]
def createSession(): Session = new SessionDef(this)
override def shutdown: Future[Unit] = Future.successful(())
def close: Unit = ()
def getTable(name: String): HeapTable = synchronized {
tables.get(name).getOrElse(throw new SlickException(s"Table $name does not exist"))
}
def createTable(name: String, columns: IndexedSeq[HeapBackend.Column],
indexes: IndexedSeq[Index], constraints: IndexedSeq[Constraint]): HeapTable = synchronized {
if(tables.contains(name)) throw new SlickException(s"Table $name already exists")
val t = new HeapTable(name, columns, indexes, constraints)
tables += ((name, t))
t
}
def createTableIfNotExists(name: String, columns: IndexedSeq[HeapBackend.Column],
indexes: IndexedSeq[Index], constraints: IndexedSeq[Constraint]): HeapTable = synchronized {
val t = new HeapTable(name, columns, indexes, constraints)
if(!tables.contains(name)) tables += ((name, t))
t
}
def dropTable(name: String): Unit = synchronized {
if(!tables.remove(name).isDefined)
throw new SlickException(s"Table $name does not exist")
}
def dropTableIfExists(name: String): Unit = try dropTable(name) catch{
case e: SlickException => ()
case e: Throwable => throw e
}
def truncateTable(name: String): Unit = synchronized{
getTable(name).data.clear
}
def getTables: IndexedSeq[HeapTable] = synchronized {
tables.values.toVector
}
}
def createEmptyDatabase: Database = {
def err = throw new SlickException("Unsupported operation for empty heap database")
new DatabaseDef(new ExecutionContext {
def reportFailure(t: Throwable) = err
def execute(runnable: Runnable) = err
}) {
override def createTable(name: String, columns: IndexedSeq[HeapBackend.Column],
indexes: IndexedSeq[Index], constraints: IndexedSeq[Constraint]) = err
}
}
class DatabaseFactoryDef {
/** Create a new heap database instance that uses the supplied ExecutionContext for
* asynchronous execution of database actions. */
def apply(executionContext: ExecutionContext): Database = new DatabaseDef(executionContext)
}
class SessionDef(val database: Database) extends super.SessionDef {
def close(): Unit = {}
def rollback() =
throw new SlickException("HeapBackend does not currently support transactions")
def force(): Unit = {}
def withTransaction[T](f: => T) =
throw new SlickException("HeapBackend does not currently support transactions")
}
type Row = IndexedSeq[Any]
class HeapTable(val name: String, val columns: IndexedSeq[HeapBackend.Column],
indexes: IndexedSeq[Index], constraints: IndexedSeq[Constraint]) {
protected[HeapBackend] val data: ArrayBuffer[Row] = new ArrayBuffer[Row]
def rows: Iterable[Row] = data
def append(row: Row): Unit = synchronized {
verifier.verify(row)
data.append(row)
verifier.inserted(row)
logger.debug("Inserted ("+row.mkString(", ")+") into "+this)
}
def createInsertRow: ArrayBuffer[Any] = columns.map(_.createDefault)(collection.breakOut)
override def toString = name + "(" + columns.map(_.sym.name).mkString(", ") + ")"
lazy val columnIndexes: Map[TermSymbol, Int] = columns.map(_.sym).zipWithIndex.toMap
val verifier = {
val v1 = indexes.foldLeft(Verifier.empty) { case (z, i) => z andThen createIndexVerifier(i) }
val v2 = constraints.foldLeft(v1) { case (z, c) => z andThen createConstraintVerifier(c) }
columns.foldLeft(v2) { case (z, c) =>
if(c.isUnique) z andThen createUniquenessVerifier("<unique column "+c.sym.name+">", Vector(c.sym))
else z
}
}
protected def createConstraintVerifier(cons: Constraint) = cons match {
case PrimaryKey(name, columns) => createUniquenessVerifier(name, columns.map { case Select(_, f: FieldSymbol) => f })
case _ => Verifier.empty
}
protected def createIndexVerifier(idx: Index) =
if(!idx.unique) Verifier.empty
else createUniquenessVerifier(idx.name, idx.on.map { case Select(_, f: FieldSymbol) => f })
protected def createUniquenessVerifier(name: String, on: IndexedSeq[FieldSymbol]): Verifier = {
val columns: IndexedSeq[Int] = on.map(columnIndexes)
val extract: (Row => Any) =
if(columns.length == 1) (r: Row) => r(columns.head)
else (r: Row) => columns.map(r)
val hash = new HashSet[Any]()
new Verifier {
def verify(row: Row): Unit = {
val e = extract(row)
if(hash contains e)
throw new SlickException("Uniqueness constraint "+name+" violated. Duplicate data: "+e)
}
def inserted(row: Row): Unit = { hash += extract(row) }
}
}
}
/** A Verifier is called before and after data is updated in a table. It
* ensures that no constraints are violated before the update and updates
* the indices afterwards. */
trait Verifier { self =>
def verify(row: Row): Unit
def inserted(row: Row): Unit
def andThen(other: Verifier): Verifier =
if(this eq Verifier.empty) other
else if(other eq Verifier.empty) this
else new Verifier {
def verify(row: Row): Unit = { self.verify(row); other.verify(row) }
def inserted(row: Row): Unit = { self.inserted(row); other.inserted(row) }
}
}
object Verifier {
val empty: Verifier = new Verifier {
def verify(row: Row) = ()
def inserted(row: Row) = ()
}
}
}
object HeapBackend extends HeapBackend {
class Column(val sym: FieldSymbol, val tpe: ScalaType[Any]) {
private[this] val default = sym.options.collectFirst { case RelationalProfile.ColumnOption.Default(v) => v }
private[this] val autoInc = sym.options.collectFirst { case ColumnOption.AutoInc => new AtomicLong() }
val isUnique = sym.options.collectFirst { case ColumnOption.PrimaryKey => true }.getOrElse(false)
def createDefault: Any = autoInc match {
case Some(a) =>
val i = a.incrementAndGet()
if(tpe == ScalaBaseType.longType) i
else if(tpe == ScalaBaseType.intType) i.toInt
else throw new SlickException("Only Long and Int types are allowed for AutoInc columns")
case None => default.getOrElse(null)
}
}
}
| kwark/slick | slick/src/main/scala/slick/memory/HeapBackend.scala | Scala | bsd-2-clause | 7,918 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Websudos Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.builder.QueryBuilder.Utils
import com.websudos.phantom.builder.query.CQLQuery
import com.websudos.phantom.builder.syntax.CQLSyntax
private[builder] trait CollectionModifiers extends BaseModifiers {
def tupled(name: String, tuples: String*): CQLQuery = {
CQLQuery(name).wrap(Utils.join(tuples))
}
def tuple(name: String, tuples: String*): CQLQuery = {
CQLQuery(name).forcePad.append(CQLSyntax.Collections.tuple).wrap(Utils.join(tuples))
.append(CQLSyntax.Symbols.`>`)
}
def frozen(column: String, definition: String): CQLQuery = {
CQLQuery(CQLSyntax.frozen)
.append(CQLSyntax.Symbols.`<`)
.forcePad.append(diamond(column, definition))
.append(CQLSyntax.Symbols.`>`)
}
/**
* This will pre-fix and post-fix the given value with the "<>" diamond syntax.
* It is used to define the collection type of a column.
*
* Sample outputs would be:
* {{{
* dimond("list", "int") = list<int>
* dimond("set", "varchar") = set<varchar>
* }}}
*
* @param collection The name of the collection in use.
* @param value The value, usually the type of the CQL collection.
* @return A CQL query serialising the CQL collection column definition syntax.
*/
def diamond(collection: String, value: String): CQLQuery = {
CQLQuery(collection).append(CQLSyntax.Symbols.`<`).append(value).append(CQLSyntax.Symbols.`>`)
}
def prepend(column: String, values: String*): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(Utils.collection(values).queryString, CQLSyntax.Symbols.+, column)
)
}
def prepend(column: String, valueDef: String): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(valueDef, CQLSyntax.Symbols.+, column)
)
}
def append(column: String, values: String*): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.+, Utils.collection(values).queryString)
)
}
def append(column: String, valueDef: String): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.+, valueDef)
)
}
def discard(column: String, values: String*): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.-, Utils.collection(values).queryString)
)
}
def discard(column: String, valueDef: String): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.-, valueDef)
)
}
def add(column: String, values: Set[String]): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.+, Utils.set(values))
)
}
/**
* Creates a set removal query, to remove the given values from the name set column.
* Assumes values are already serialised to their CQL form and escaped.
*
* {{{
* setColumn = setColumn - {`test`, `test2`}
* }}}
*
* @param column The name of the set column.
* @param values The set of values, pre-serialized and escaped.
* @return A CQLQuery set remove query as described above.
*/
def remove(column: String, values: Set[String]): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.-, Utils.set(values))
)
}
def mapSet(column: String, key: String, value: String): CQLQuery = {
CQLQuery(column).append(CQLSyntax.Symbols.`[`)
.append(key).append(CQLSyntax.Symbols.`]`)
.forcePad.append(CQLSyntax.eqs)
.forcePad.append(value)
}
def setIdX(column: String, index: String, value: String): CQLQuery = {
CQLQuery(column).append(CQLSyntax.Symbols.`[`)
.append(index).append(CQLSyntax.Symbols.`]`)
.forcePad.append(CQLSyntax.eqs)
.forcePad.append(value)
}
def put(column: String, pairs: (String, String)*): CQLQuery = {
CQLQuery(column).forcePad.append(CQLSyntax.Symbols.`=`).forcePad.append(
collectionModifier(column, CQLSyntax.Symbols.+, Utils.map(pairs))
)
}
def serialize(set: Set[String]): CQLQuery = {
CQLQuery(CQLSyntax.Symbols.`{`)
.forcePad.append(CQLQuery(set))
.forcePad.append(CQLSyntax.Symbols.`}`)
}
def serialize(col: Map[String, String] ): CQLQuery = {
CQLQuery(CQLSyntax.Symbols.`{`).forcePad
.append(CQLQuery(col.map(item => s"${item._1} : ${item._2}")))
.forcePad.append(CQLSyntax.Symbols.`}`)
}
def mapType(keyType: String, valueType: String): CQLQuery = {
diamond(CQLSyntax.Collections.map, CQLQuery(List(keyType, valueType)).queryString)
}
def listType(valueType: String): CQLQuery = {
diamond(CQLSyntax.Collections.list, valueType)
}
def setType(valueType: String): CQLQuery = {
diamond(CQLSyntax.Collections.set, valueType)
}
}
| analytically/phantom | phantom-dsl/src/main/scala/com/websudos/phantom/builder/serializers/CollectionModifiers.scala | Scala | bsd-2-clause | 6,675 |
package org.coursera.zipkin
import com.twitter.finagle.Httpx
import com.twitter.finagle.param
import com.twitter.logging.Logger
import com.twitter.scrooge.BinaryThriftStructSerializer
import com.twitter.server.Closer
import com.twitter.server.TwitterServer
import com.twitter.util.Await
import com.twitter.util.Base64StringEncoder
import com.twitter.util.Future
import com.twitter.zipkin.cassandra.CassandraSpanStoreFactory
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.query.ThriftQueryService
import com.twitter.zipkin.receiver.kafka.KafkaProcessor
import com.twitter.zipkin.thriftscala.{Span => ThriftSpan}
import com.twitter.zipkin.receiver.kafka.KafkaSpanReceiverFactory
import com.twitter.zipkin.web.ZipkinWebFactory
import scala.util.Try
object StringDecoder extends KafkaProcessor.KafkaDecoder {
private[this] val deserializer = new BinaryThriftStructSerializer[ThriftSpan] {
override val encoder = Base64StringEncoder
val codec = ThriftSpan
}
override def fromBytes(bytes: Array[Byte]): List[ThriftSpan] = {
val s = new String(bytes)
s.split(",").flatMap(d => Try(deserializer.fromString(d)).toOption).toList
}
}
object Main
extends TwitterServer
with CassandraSpanStoreFactory
with KafkaSpanReceiverFactory
with ZipkinWebFactory {
def main() {
val logger = Logger.get("Main")
val store = newCassandraStore()
def process(spans: Seq[ThriftSpan]): Future[Unit] = {
val converted = spans.map(_.toSpan)
store(converted) rescue {
case t: Throwable =>
logger.error("Error while writing span.", t)
Future.value(())
}
}
val kafkaReceiver = newKafkaSpanReceiver(process, valueDecoder = StringDecoder)
val query = new ThriftQueryService(store)
val server = Httpx.server
.configured(param.Label("zipkin-web"))
.serve(webServerPort(), newWebServer(query))
onExit {
server.close()
kafkaReceiver.close()
store.close()
}
Await.all(server, kafkaReceiver)
}
}
| coursera/zipkin | zipkin-all/src/main/scala/org/coursera/zipkin/Main.scala | Scala | apache-2.0 | 2,044 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import java.io.File
import java.net.URI
import org.apache.log4j.Level
import org.scalatest.PrivateMethodTester
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
import org.apache.spark.sql.execution.{CollectLimitExec, CommandResultExec, LocalTableScanExec, PartialReducerPartitionSpec, QueryExecution, ReusedSubqueryExec, ShuffledRowRDD, SortExec, SparkPlan, UnaryExecNode, UnionExec}
import org.apache.spark.sql.execution.command.DataWritingCommandExec
import org.apache.spark.sql.execution.datasources.noop.NoopDataSource
import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ENSURE_REQUIREMENTS, Exchange, REPARTITION_BY_COL, REPARTITION_BY_NUM, ReusedExchangeExec, ShuffleExchangeExec, ShuffleExchangeLike, ShuffleOrigin}
import org.apache.spark.sql.execution.joins.{BaseJoinExec, BroadcastHashJoinExec, ShuffledHashJoinExec, ShuffledJoin, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLShuffleReadMetricsReporter
import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.test.SQLTestData.TestData
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.QueryExecutionListener
import org.apache.spark.util.Utils
class AdaptiveQueryExecSuite
extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper
with PrivateMethodTester {
import testImplicits._
setupTestData()
private def runAdaptiveAndVerifyResult(query: String): (SparkPlan, SparkPlan) = {
var finalPlanCnt = 0
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, sparkPlanInfo) =>
if (sparkPlanInfo.simpleString.startsWith(
"AdaptiveSparkPlan isFinalPlan=true")) {
finalPlanCnt += 1
}
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
val dfAdaptive = sql(query)
val planBefore = dfAdaptive.queryExecution.executedPlan
assert(planBefore.toString.startsWith("AdaptiveSparkPlan isFinalPlan=false"))
val result = dfAdaptive.collect()
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
val df = sql(query)
checkAnswer(df, result)
}
val planAfter = dfAdaptive.queryExecution.executedPlan
assert(planAfter.toString.startsWith("AdaptiveSparkPlan isFinalPlan=true"))
val adaptivePlan = planAfter.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
spark.sparkContext.listenerBus.waitUntilEmpty()
// AQE will post `SparkListenerSQLAdaptiveExecutionUpdate` twice in case of subqueries that
// exist out of query stages.
val expectedFinalPlanCnt = adaptivePlan.find(_.subqueries.nonEmpty).map(_ => 2).getOrElse(1)
assert(finalPlanCnt == expectedFinalPlanCnt)
spark.sparkContext.removeSparkListener(listener)
val exchanges = adaptivePlan.collect {
case e: Exchange => e
}
assert(exchanges.isEmpty, "The final plan should not contain any Exchange node.")
(dfAdaptive.queryExecution.sparkPlan, adaptivePlan)
}
private def findTopLevelBroadcastHashJoin(plan: SparkPlan): Seq[BroadcastHashJoinExec] = {
collect(plan) {
case j: BroadcastHashJoinExec => j
}
}
private def findTopLevelSortMergeJoin(plan: SparkPlan): Seq[SortMergeJoinExec] = {
collect(plan) {
case j: SortMergeJoinExec => j
}
}
private def findTopLevelShuffledHashJoin(plan: SparkPlan): Seq[ShuffledHashJoinExec] = {
collect(plan) {
case j: ShuffledHashJoinExec => j
}
}
private def findTopLevelBaseJoin(plan: SparkPlan): Seq[BaseJoinExec] = {
collect(plan) {
case j: BaseJoinExec => j
}
}
private def findTopLevelSort(plan: SparkPlan): Seq[SortExec] = {
collect(plan) {
case s: SortExec => s
}
}
private def findTopLevelLimit(plan: SparkPlan): Seq[CollectLimitExec] = {
collect(plan) {
case l: CollectLimitExec => l
}
}
private def findReusedExchange(plan: SparkPlan): Seq[ReusedExchangeExec] = {
collectWithSubqueries(plan) {
case ShuffleQueryStageExec(_, e: ReusedExchangeExec, _) => e
case BroadcastQueryStageExec(_, e: ReusedExchangeExec, _) => e
}
}
private def findReusedSubquery(plan: SparkPlan): Seq[ReusedSubqueryExec] = {
collectWithSubqueries(plan) {
case e: ReusedSubqueryExec => e
}
}
private def checkNumLocalShuffleReads(
plan: SparkPlan, numShufflesWithoutLocalRead: Int = 0): Unit = {
val numShuffles = collect(plan) {
case s: ShuffleQueryStageExec => s
}.length
val numLocalReads = collect(plan) {
case read: AQEShuffleReadExec if read.isLocalRead => read
}
numLocalReads.foreach { r =>
val rdd = r.execute()
val parts = rdd.partitions
assert(parts.forall(rdd.preferredLocations(_).nonEmpty))
}
assert(numShuffles === (numLocalReads.length + numShufflesWithoutLocalRead))
}
private def checkInitialPartitionNum(df: Dataset[_], numPartition: Int): Unit = {
// repartition obeys initialPartitionNum when adaptiveExecutionEnabled
val plan = df.queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
val shuffle = plan.asInstanceOf[AdaptiveSparkPlanExec].executedPlan.collect {
case s: ShuffleExchangeExec => s
}
assert(shuffle.size == 1)
assert(shuffle(0).outputPartitioning.numPartitions == numPartition)
}
test("Change merge join to broadcast join") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("Reuse the parallelism of coalesced shuffle in local shuffle read") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReads = collect(adaptivePlan) {
case read: AQEShuffleReadExec if read.isLocalRead => read
}
assert(localReads.length == 2)
val localShuffleRDD0 = localReads(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReads(1).execute().asInstanceOf[ShuffledRowRDD]
// The pre-shuffle partition size is [0, 0, 0, 72, 0]
// We exclude the 0-size partitions, so only one partition, advisoryParallelism = 1
// the final parallelism is
// advisoryParallelism = 1 since advisoryParallelism < numMappers
// and the partitions length is 1
assert(localShuffleRDD0.getPartitions.length == 1)
// The pre-shuffle partition size is [0, 72, 0, 72, 126]
// We exclude the 0-size partitions, so only 3 partition, advisoryParallelism = 3
// the final parallelism is
// advisoryParallelism / numMappers: 3/2 = 1 since advisoryParallelism >= numMappers
// and the partitions length is 1 * numMappers = 2
assert(localShuffleRDD1.getPartitions.length == 2)
}
}
test("Reuse the default parallelism in local shuffle read") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReads = collect(adaptivePlan) {
case read: AQEShuffleReadExec if read.isLocalRead => read
}
assert(localReads.length == 2)
val localShuffleRDD0 = localReads(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReads(1).execute().asInstanceOf[ShuffledRowRDD]
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD0.getPartitions.length == 4)
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD1.getPartitions.length == 4)
}
}
test("Empty stage coalesced to 1-partition RDD") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> AQEPropagateEmptyRelation.ruleName) {
val df1 = spark.range(10).withColumn("a", 'id)
val df2 = spark.range(10).withColumn("b", 'id)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[SortMergeJoinExec]).isDefined)
val coalescedReads = collect(plan) {
case r: AQEShuffleReadExec => r
}
assert(coalescedReads.length == 3)
coalescedReads.foreach(r => assert(r.partitionSpecs.length == 1))
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[BroadcastHashJoinExec]).isDefined)
val coalescedReads = collect(plan) {
case r: AQEShuffleReadExec => r
}
assert(coalescedReads.length == 3, s"$plan")
coalescedReads.foreach(r => assert(r.isLocalRead || r.partitionSpecs.length == 1))
}
}
}
test("Scalar subquery") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("Scalar subquery in later stages") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where (value + a) = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("multiple joins") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN testData3 t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// After applied the 'OptimizeShuffleWithLocalRead' rule, we can convert all the four
// shuffle read to local shuffle read in the bottom two 'BroadcastHashJoin'.
// For the top level 'BroadcastHashJoin', the probe side is not shuffle query stage
// and the build side shuffle query stage is also converted to local shuffle read.
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("multiple joins with aggregate") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, sum(b) from testData3 group by a
| ) t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 1)
}
}
test("multiple joins with aggregate 2") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "500") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, max(b) b from testData2 group by a
| ) t3 ON t2.n = t3.b
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON value = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- Filter
// +- HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 1)
}
}
test("Exchange reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT value FROM testData join testData2 ON key = a " +
"join (SELECT value v from testData join testData3 ON key = a) on value = v")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 2)
// There is still a SMJ, and its two shuffles can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 2)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse with subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Subquery reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.isEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.nonEmpty)
}
}
test("Broadcast exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "20000000",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (" +
"SELECT /*+ broadcast(testData2) */ max(key) from testData join testData2 ON key = a) " +
"and a <= (" +
"SELECT /*+ broadcast(testData2) */ max(value) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
// Even with local shuffle read, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
assert(ex.head.child.isInstanceOf[BroadcastExchangeExec])
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Union/Except/Intersect queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData
|EXCEPT
|SELECT * FROM testData2
|UNION ALL
|SELECT * FROM testData
|INTERSECT ALL
|SELECT * FROM testData2
""".stripMargin)
}
}
test("Subquery de-correlation in Union queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("a", "b") {
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("a")
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("b")
runAdaptiveAndVerifyResult(
"""
|SELECT id,num,source FROM (
| SELECT id, num, 'a' as source FROM a
| UNION ALL
| SELECT id, num, 'b' as source FROM b
|) AS c WHERE c.id IN (SELECT id FROM b WHERE num = 2)
""".stripMargin)
}
}
}
test("Avoid plan change if cost is greater") {
val origPlan = sql("SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1").queryExecution.executedPlan
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.BROADCAST_HASH_JOIN_OUTPUT_PARTITIONING_EXPAND_LIMIT.key -> "0") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val smj2 = findTopLevelSortMergeJoin(adaptivePlan)
assert(smj2.size == 2, origPlan.toString)
}
}
test("Change merge join to broadcast join without local shuffle read") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.LOCAL_SHUFFLE_READER_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "40") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData t1 join testData2 t2
|ON t1.key = t2.a join testData3 t3 on t2.a = t3.a
|where t1.value = 1
""".stripMargin
)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
// There is still a SMJ, and its two shuffles can't apply local read.
checkNumLocalShuffleReads(adaptivePlan, 2)
}
}
test("Avoid changing merge join to broadcast join if too many empty partitions on build plan") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.NON_EMPTY_PARTITION_RATIO_FOR_BROADCAST_JOIN.key -> "0.5") {
// `testData` is small enough to be broadcast but has empty partition ratio over the config.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.isEmpty)
}
// It is still possible to broadcast `testData2`.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2000") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
assert(bhj.head.buildSide == BuildRight)
}
}
}
test("SPARK-29906: AQE should not introduce extra shuffle for outermost limit") {
var numStages = 0
val listener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
numStages = jobStart.stageInfos.length
}
}
try {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
spark.sparkContext.addSparkListener(listener)
spark.range(0, 100, 1, numPartitions = 10).take(1)
spark.sparkContext.listenerBus.waitUntilEmpty()
// Should be only one stage since there is no shuffle.
assert(numStages == 1)
}
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
test("SPARK-30524: Do not optimize skew join if introduce additional shuffle") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "100",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 3 as key1", "id as value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 1 as key2", "id as value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(query: String, optimizeSkewJoin: Boolean): Unit = {
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(query)
val innerSmj = findTopLevelSortMergeJoin(innerAdaptivePlan)
assert(innerSmj.size == 1 && innerSmj.head.isSkewJoin == optimizeSkewJoin)
}
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2", true)
// Additional shuffle introduced, so disable the "OptimizeSkewedJoin" optimization
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2 GROUP BY key1", false)
}
}
}
test("SPARK-29544: adaptive skew join with different join types") {
Seq("SHUFFLE_MERGE", "SHUFFLE_HASH").foreach { joinHint =>
def getJoinNode(plan: SparkPlan): Seq[ShuffledJoin] = if (joinHint == "SHUFFLE_MERGE") {
findTopLevelSortMergeJoin(plan)
} else {
findTopLevelShuffledHashJoin(plan)
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "800") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(
joins: Seq[ShuffledJoin],
leftSkewNum: Int,
rightSkewNum: Int): Unit = {
assert(joins.size == 1 && joins.head.isSkewJoin)
assert(joins.head.left.collect {
case r: AQEShuffleReadExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == leftSkewNum)
assert(joins.head.right.collect {
case r: AQEShuffleReadExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == rightSkewNum)
}
// skewed inner join optimization
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(
s"SELECT /*+ $joinHint(skewData1) */ * FROM skewData1 " +
"JOIN skewData2 ON key1 = key2")
val inner = getJoinNode(innerAdaptivePlan)
checkSkewJoin(inner, 2, 1)
// skewed left outer join optimization
val (_, leftAdaptivePlan) = runAdaptiveAndVerifyResult(
s"SELECT /*+ $joinHint(skewData2) */ * FROM skewData1 " +
"LEFT OUTER JOIN skewData2 ON key1 = key2")
val leftJoin = getJoinNode(leftAdaptivePlan)
checkSkewJoin(leftJoin, 2, 0)
// skewed right outer join optimization
val (_, rightAdaptivePlan) = runAdaptiveAndVerifyResult(
s"SELECT /*+ $joinHint(skewData1) */ * FROM skewData1 " +
"RIGHT OUTER JOIN skewData2 ON key1 = key2")
val rightJoin = getJoinNode(rightAdaptivePlan)
checkSkewJoin(rightJoin, 0, 1)
}
}
}
}
test("SPARK-30291: AQE should catch the exceptions when doing materialize") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTable("bucketed_table") {
val df1 =
(0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val warehouseFilePath = new URI(spark.sessionState.conf.warehousePath).getPath
val tableDir = new File(warehouseFilePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)
val aggregated = spark.table("bucketed_table").groupBy("i").count()
val error = intercept[Exception] {
aggregated.count()
}
assert(error.toString contains "Invalid bucket file")
assert(error.getSuppressed.size === 0)
}
}
}
test("SPARK-30403: AQE should handle InSubquery") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult("SELECT * FROM testData LEFT OUTER join testData2" +
" ON key = a AND key NOT IN (select a from testData3) where value = '1'"
)
}
}
test("force apply AQE") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
test("SPARK-30719: do not log warning if intentionally skip AQE") {
val testAppender = new LogAppender("aqe logging warning test when skip")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(!plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
assert(!testAppender.loggingEvents
.exists(msg => msg.getRenderedMessage.contains(
s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is" +
s" enabled but is not supported for")))
}
test("test log level") {
def verifyLog(expectedLevel: Level): Unit = {
val logAppender = new LogAppender("adaptive execution")
withLogAppender(
logAppender,
loggerNames = Seq(AdaptiveSparkPlanExec.getClass.getName.dropRight(1)),
level = Some(Level.TRACE)) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData join testData2 ON key = a where value = '1'").collect()
}
}
Seq("Plan changed", "Final plan").foreach { msg =>
assert(
logAppender.loggingEvents.exists { event =>
event.getRenderedMessage.contains(msg) && event.getLevel == expectedLevel
})
}
}
// Verify default log level
verifyLog(Level.DEBUG)
// Verify custom log level
val levels = Seq(
"TRACE" -> Level.TRACE,
"trace" -> Level.TRACE,
"DEBUG" -> Level.DEBUG,
"debug" -> Level.DEBUG,
"INFO" -> Level.INFO,
"info" -> Level.INFO,
"WARN" -> Level.WARN,
"warn" -> Level.WARN,
"ERROR" -> Level.ERROR,
"error" -> Level.ERROR,
"deBUG" -> Level.DEBUG)
levels.foreach { level =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_LOG_LEVEL.key -> level._1) {
verifyLog(level._2)
}
}
}
test("tree string output") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = sql("SELECT * FROM testData join testData2 ON key = a where value = '1'")
val planBefore = df.queryExecution.executedPlan
assert(!planBefore.toString.contains("== Current Plan =="))
assert(!planBefore.toString.contains("== Initial Plan =="))
df.collect()
val planAfter = df.queryExecution.executedPlan
assert(planAfter.toString.contains("== Final Plan =="))
assert(planAfter.toString.contains("== Initial Plan =="))
}
}
test("SPARK-31384: avoid NPE in OptimizeSkewedJoin when there's 0 partition plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t2") {
// create DataFrame with 0 partition
spark.createDataFrame(sparkContext.emptyRDD[Row], new StructType().add("b", IntegerType))
.createOrReplaceTempView("t2")
// should run successfully without NPE
runAdaptiveAndVerifyResult("SELECT * FROM testData2 t1 left semi join t2 ON t1.a=t2.b")
}
}
}
test("SPARK-34682: AQEShuffleReadExec operating on canonicalized plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val reads = collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
}
assert(reads.length == 1)
val read = reads.head
val c = read.canonicalized.asInstanceOf[AQEShuffleReadExec]
// we can't just call execute() because that has separate checks for canonicalized plans
val ex = intercept[IllegalStateException] {
val doExecute = PrivateMethod[Unit](Symbol("doExecute"))
c.invokePrivate(doExecute())
}
assert(ex.getMessage === "operating on canonicalized plan")
}
}
test("metrics of the shuffle read") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val reads = collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
}
assert(reads.length == 1)
val read = reads.head
assert(!read.isLocalRead)
assert(!read.hasSkewedPartition)
assert(read.hasCoalescedPartition)
assert(read.metrics.keys.toSeq.sorted == Seq(
"numCoalescedPartitions", "numPartitions", "partitionDataSize"))
assert(read.metrics("numCoalescedPartitions").value == 1)
assert(read.metrics("numPartitions").value == read.partitionSpecs.length)
assert(read.metrics("partitionDataSize").value > 0)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val join = collect(adaptivePlan) {
case j: BroadcastHashJoinExec => j
}.head
assert(join.buildSide == BuildLeft)
val reads = collect(join.right) {
case r: AQEShuffleReadExec => r
}
assert(reads.length == 1)
val read = reads.head
assert(read.isLocalRead)
assert(read.metrics.keys.toSeq == Seq("numPartitions"))
assert(read.metrics("numPartitions").value == read.partitionSpecs.length)
}
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1000") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 join skewData2 ON key1 = key2")
val reads = collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
}
reads.foreach { read =>
assert(!read.isLocalRead)
assert(read.hasCoalescedPartition)
assert(read.hasSkewedPartition)
assert(read.metrics.contains("numSkewedPartitions"))
}
assert(reads(0).metrics("numSkewedPartitions").value == 2)
assert(reads(0).metrics("numSkewedSplits").value == 11)
assert(reads(1).metrics("numSkewedPartitions").value == 1)
assert(reads(1).metrics("numSkewedSplits").value == 9)
}
}
}
}
test("control a plan explain mode in listeners via SQLConf") {
def checkPlanDescription(mode: String, expected: Seq[String]): Unit = {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, planDescription, _) =>
assert(expected.forall(planDescription.contains))
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
withSQLConf(SQLConf.UI_EXPLAIN_MODE.key -> mode,
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val dfAdaptive = sql("SELECT * FROM testData JOIN testData2 ON key = a WHERE value = '1'")
try {
checkAnswer(dfAdaptive, Row(1, "1", 1, 1) :: Row(1, "1", 1, 2) :: Nil)
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
Seq(("simple", Seq("== Physical Plan ==")),
("extended", Seq("== Parsed Logical Plan ==", "== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==", "== Physical Plan ==")),
("codegen", Seq("WholeStageCodegen subtrees")),
("cost", Seq("== Optimized Logical Plan ==", "Statistics(sizeInBytes")),
("formatted", Seq("== Physical Plan ==", "Output", "Arguments"))).foreach {
case (mode, expected) =>
checkPlanDescription(mode, expected)
}
}
test("SPARK-30953: InsertAdaptiveSparkPlan should apply AQE on child plan of write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
val plan = sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").queryExecution.executedPlan
assert(plan.isInstanceOf[CommandResultExec])
val commandResultExec = plan.asInstanceOf[CommandResultExec]
assert(commandResultExec.commandPhysicalPlan.isInstanceOf[DataWritingCommandExec])
assert(commandResultExec.commandPhysicalPlan.asInstanceOf[DataWritingCommandExec]
.child.isInstanceOf[AdaptiveSparkPlanExec])
}
}
}
test("AQE should set active session during execution") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.range(10).select(sum('id))
assert(df.queryExecution.executedPlan.isInstanceOf[AdaptiveSparkPlanExec])
SparkSession.setActiveSession(null)
checkAnswer(df, Seq(Row(45)))
SparkSession.setActiveSession(spark) // recover the active session.
}
}
test("No deadlock in UI update") {
object TestStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case _: Aggregate =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
spark.range(5).rdd
}
Nil
case _ => Nil
}
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val df = spark.range(10).groupBy('id).count()
df.collect()
} finally {
spark.experimental.extraStrategies = Nil
}
}
}
test("SPARK-31658: SQL UI should show write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, planInfo) =>
assert(planInfo.nodeName == "Execute CreateDataSourceTableAsSelectCommand")
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
try {
sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").collect()
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
}
test("SPARK-31220, SPARK-32056: repartition by expression with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).repartition($"id")
val df2 = spark.range(10).repartition($"id" + 1)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartition(10, $"id")
val df4 = spark.range(10).repartition(10)
assert(df3.rdd.collectPartitions().length == 10)
assert(df4.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition by range with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).toDF.repartitionByRange($"id".asc)
val df2 = spark.range(10).toDF.repartitionByRange(($"id" + 1).asc)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartitionByRange(10, $"id".asc)
assert(df3.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition using sql and hint with AQE") {
Seq(true, false).foreach { enableAQE =>
withTempView("test") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
spark.range(10).toDF.createTempView("test")
val df1 = spark.sql("SELECT /*+ REPARTITION(id) */ * from test")
val df2 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(id) */ * from test")
val df3 = spark.sql("SELECT * from test DISTRIBUTE BY id")
val df4 = spark.sql("SELECT * from test CLUSTER BY id")
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
val partitionsNum3 = df3.rdd.collectPartitions().length
val partitionsNum4 = df4.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
assert(partitionsNum3 < 10)
assert(partitionsNum4 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
checkInitialPartitionNum(df3, 10)
checkInitialPartitionNum(df4, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
assert(partitionsNum3 === 10)
assert(partitionsNum4 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df5 = spark.sql("SELECT /*+ REPARTITION(10, id) */ * from test")
val df6 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(10, id) */ * from test")
assert(df5.rdd.collectPartitions().length == 10)
assert(df6.rdd.collectPartitions().length == 10)
}
}
}
}
test("SPARK-32573: Eliminate NAAJ when BuildSide is HashedRelationWithAllNullKeys") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("SPARK-32717: AQEOptimizer should respect excludedRules configuration") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString,
// This test is a copy of test(SPARK-32573), in order to test the configuration
// `spark.sql.adaptive.optimizer.excludedRules` works as expect.
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> AQEPropagateEmptyRelation.ruleName) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
// this is different compares to test(SPARK-32573) due to the rule
// `EliminateUnnecessaryJoin` has been excluded.
assert(join.nonEmpty)
checkNumLocalShuffleReads(adaptivePlan)
}
}
test("SPARK-32649: Eliminate inner and semi join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
Seq(
// inner join (small table at right side)
"SELECT * FROM testData t1 join testData3 t2 ON t1.key = t2.a WHERE t2.b = 1",
// inner join (small table at left side)
"SELECT * FROM testData3 t1 join testData t2 ON t1.a = t2.key WHERE t1.b = 1",
// left semi join
"SELECT * FROM testData t1 left semi join testData3 t2 ON t1.key = t2.a AND t2.b = 1"
).foreach(query => {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReads(adaptivePlan)
})
}
}
test("SPARK-34533: Eliminate left anti join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Seq(
// broadcast non-empty right side
("SELECT /*+ broadcast(testData3) */ * FROM testData LEFT ANTI JOIN testData3", true),
// broadcast empty right side
("SELECT /*+ broadcast(emptyTestData) */ * FROM testData LEFT ANTI JOIN emptyTestData",
true),
// broadcast left side
("SELECT /*+ broadcast(testData) */ * FROM testData LEFT ANTI JOIN testData3", false)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated)
}
}
}
test("SPARK-34781: Eliminate left semi/anti join to its left side") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
Seq(
// left semi join and non-empty right side
("SELECT * FROM testData LEFT SEMI JOIN testData3", true),
// left semi join, non-empty right side and non-empty join condition
("SELECT * FROM testData t1 LEFT SEMI JOIN testData3 t2 ON t1.key = t2.a", false),
// left anti join and empty right side
("SELECT * FROM testData LEFT ANTI JOIN emptyTestData", true),
// left anti join, empty right side and non-empty join condition
("SELECT * FROM testData t1 LEFT ANTI JOIN emptyTestData t2 ON t1.key = t2.key", true)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated)
}
}
}
test("SPARK-35455: Unify empty relation optimization between normal and AQE optimizer " +
"- single join") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
// left semi join and empty left side
("SELECT * FROM (SELECT * FROM testData WHERE value = '0')t1 LEFT SEMI JOIN " +
"testData2 t2 ON t1.key = t2.a", true),
// left anti join and empty left side
("SELECT * FROM (SELECT * FROM testData WHERE value = '0')t1 LEFT ANTI JOIN " +
"testData2 t2 ON t1.key = t2.a", true),
// left outer join and empty left side
("SELECT * FROM (SELECT * FROM testData WHERE key = 0)t1 LEFT JOIN testData2 t2 ON " +
"t1.key = t2.a", true),
// left outer join and non-empty left side
("SELECT * FROM testData t1 LEFT JOIN testData2 t2 ON " +
"t1.key = t2.a", false),
// right outer join and empty right side
("SELECT * FROM testData t1 RIGHT JOIN (SELECT * FROM testData2 WHERE b = 0)t2 ON " +
"t1.key = t2.a", true),
// right outer join and non-empty right side
("SELECT * FROM testData t1 RIGHT JOIN testData2 t2 ON " +
"t1.key = t2.a", false),
// full outer join and both side empty
("SELECT * FROM (SELECT * FROM testData WHERE key = 0)t1 FULL JOIN " +
"(SELECT * FROM testData2 WHERE b = 0)t2 ON t1.key = t2.a", true),
// full outer join and left side empty right side non-empty
("SELECT * FROM (SELECT * FROM testData WHERE key = 0)t1 FULL JOIN " +
"testData2 t2 ON t1.key = t2.a", true)
).foreach { case (query, isEliminated) =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 1)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty == isEliminated, adaptivePlan)
}
}
}
test("SPARK-35455: Unify empty relation optimization between normal and AQE optimizer " +
"- multi join") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(
"""
|SELECT * FROM testData t1
| JOIN (SELECT * FROM testData2 WHERE b = 0) t2 ON t1.key = t2.a
| LEFT JOIN testData2 t3 ON t1.key = t3.a
|""".stripMargin,
"""
|SELECT * FROM (SELECT * FROM testData WHERE key = 0) t1
| LEFT ANTI JOIN testData2 t2
| FULL JOIN (SELECT * FROM testData2 WHERE b = 0) t3 ON t1.key = t3.a
|""".stripMargin,
"""
|SELECT * FROM testData t1
| LEFT SEMI JOIN (SELECT * FROM testData2 WHERE b = 0)
| RIGHT JOIN testData2 t3 on t1.key = t3.a
|""".stripMargin
).foreach { query =>
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelBaseJoin(plan).size == 2)
assert(findTopLevelBaseJoin(adaptivePlan).isEmpty)
}
}
}
test("SPARK-35585: Support propagate empty relation through project/filter") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val (plan1, adaptivePlan1) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData WHERE key = 0 ORDER BY key, value")
assert(findTopLevelSort(plan1).size == 1)
assert(stripAQEPlan(adaptivePlan1).isInstanceOf[LocalTableScanExec])
val (plan2, adaptivePlan2) = runAdaptiveAndVerifyResult(
"SELECT key FROM (SELECT * FROM testData WHERE value = 'no_match' ORDER BY key)" +
" WHERE key > rand()")
assert(findTopLevelSort(plan2).size == 1)
assert(stripAQEPlan(adaptivePlan2).isInstanceOf[LocalTableScanExec])
}
}
test("SPARK-32753: Only copy tags to node with no tags") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("v1") {
spark.range(10).union(spark.range(10)).createOrReplaceTempView("v1")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT id FROM v1 GROUP BY id DISTRIBUTE BY id")
assert(collect(adaptivePlan) {
case s: ShuffleExchangeExec => s
}.length == 1)
}
}
}
test("Logging plan changes for AQE") {
val testAppender = new LogAppender("plan changes")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "INFO",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData JOIN testData2 ON key = a " +
"WHERE value = (SELECT max(a) FROM testData3)").collect()
}
Seq("=== Result of Batch AQE Preparations ===",
"=== Result of Batch AQE Post Stage Creation ===",
"=== Result of Batch AQE Replanning ===",
"=== Result of Batch AQE Query Stage Optimization ===").foreach { expectedMsg =>
assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg)))
}
}
}
test("SPARK-32932: Do not use local shuffle read at final stage on write command") {
withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.DYNAMIC.toString,
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val data = for (
i <- 1L to 10L;
j <- 1L to 3L
) yield (i, j)
val df = data.toDF("i", "j").repartition($"j")
var noLocalread: Boolean = false
val listener = new QueryExecutionListener {
override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = {
qe.executedPlan match {
case plan@(_: DataWritingCommandExec | _: V2TableWriteExec) =>
assert(plan.asInstanceOf[UnaryExecNode].child.isInstanceOf[AdaptiveSparkPlanExec])
noLocalread = collect(plan) {
case exec: AQEShuffleReadExec if exec.isLocalRead => exec
}.isEmpty
case _ => // ignore other events
}
}
override def onFailure(funcName: String, qe: QueryExecution,
exception: Exception): Unit = {}
}
spark.listenerManager.register(listener)
withTable("t") {
df.write.partitionBy("j").saveAsTable("t")
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalread)
noLocalread = false
}
// Test DataSource v2
val format = classOf[NoopDataSource].getName
df.write.format(format).mode("overwrite").save()
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalread)
noLocalread = false
spark.listenerManager.unregister(listener)
}
}
test("SPARK-33494: Do not use local shuffle read for repartition") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.table("testData").repartition('key)
df.collect()
// local shuffle read breaks partitioning and shouldn't be used for repartition operation
// which is specified by users.
checkNumLocalShuffleReads(df.queryExecution.executedPlan, numShufflesWithoutLocalRead = 1)
}
}
test("SPARK-33551: Do not use AQE shuffle read for repartition") {
def hasRepartitionShuffle(plan: SparkPlan): Boolean = {
find(plan) {
case s: ShuffleExchangeLike =>
s.shuffleOrigin == REPARTITION_BY_COL || s.shuffleOrigin == REPARTITION_BY_NUM
case _ => false
}.isDefined
}
def checkBHJ(
df: Dataset[Row],
optimizeOutRepartition: Boolean,
probeSideLocalRead: Boolean,
probeSideCoalescedRead: Boolean): Unit = {
df.collect()
val plan = df.queryExecution.executedPlan
// There should be only one shuffle that can't do local read, which is either the top shuffle
// from repartition, or BHJ probe side shuffle.
checkNumLocalShuffleReads(plan, 1)
assert(hasRepartitionShuffle(plan) == !optimizeOutRepartition)
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.length == 1)
// Build side should do local read.
val buildSide = find(bhj.head.left)(_.isInstanceOf[AQEShuffleReadExec])
assert(buildSide.isDefined)
assert(buildSide.get.asInstanceOf[AQEShuffleReadExec].isLocalRead)
val probeSide = find(bhj.head.right)(_.isInstanceOf[AQEShuffleReadExec])
if (probeSideLocalRead || probeSideCoalescedRead) {
assert(probeSide.isDefined)
if (probeSideLocalRead) {
assert(probeSide.get.asInstanceOf[AQEShuffleReadExec].isLocalRead)
} else {
assert(probeSide.get.asInstanceOf[AQEShuffleReadExec].hasCoalescedPartition)
}
} else {
assert(probeSide.isEmpty)
}
}
def checkSMJ(
df: Dataset[Row],
optimizeOutRepartition: Boolean,
optimizeSkewJoin: Boolean,
coalescedRead: Boolean): Unit = {
df.collect()
val plan = df.queryExecution.executedPlan
assert(hasRepartitionShuffle(plan) == !optimizeOutRepartition)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.length == 1)
assert(smj.head.isSkewJoin == optimizeSkewJoin)
val aqeReads = collect(smj.head) {
case c: AQEShuffleReadExec => c
}
if (coalescedRead || optimizeSkewJoin) {
assert(aqeReads.length == 2)
if (coalescedRead) assert(aqeReads.forall(_.hasCoalescedPartition))
} else {
assert(aqeReads.isEmpty)
}
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "5") {
val df = sql(
"""
|SELECT * FROM (
| SELECT * FROM testData WHERE key = 1
|)
|RIGHT OUTER JOIN testData2
|ON value = b
""".stripMargin)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
// Repartition with no partition num specified.
checkBHJ(df.repartition('b),
// The top shuffle from repartition is optimized out.
optimizeOutRepartition = true, probeSideLocalRead = false, probeSideCoalescedRead = true)
// Repartition with default partition num (5 in test env) specified.
checkBHJ(df.repartition(5, 'b),
// The top shuffle from repartition is optimized out
// The final plan must have 5 partitions, no optimization can be made to the probe side.
optimizeOutRepartition = true, probeSideLocalRead = false, probeSideCoalescedRead = false)
// Repartition with non-default partition num specified.
checkBHJ(df.repartition(4, 'b),
// The top shuffle from repartition is not optimized out
optimizeOutRepartition = false, probeSideLocalRead = true, probeSideCoalescedRead = true)
// Repartition by col and project away the partition cols
checkBHJ(df.repartition('b).select('key),
// The top shuffle from repartition is not optimized out
optimizeOutRepartition = false, probeSideLocalRead = true, probeSideCoalescedRead = true)
}
// Force skew join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_ENABLED.key -> "true",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_FACTOR.key -> "0",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
// Repartition with no partition num specified.
checkSMJ(df.repartition('b),
// The top shuffle from repartition is optimized out.
optimizeOutRepartition = true, optimizeSkewJoin = false, coalescedRead = true)
// Repartition with default partition num (5 in test env) specified.
checkSMJ(df.repartition(5, 'b),
// The top shuffle from repartition is optimized out.
// The final plan must have 5 partitions, can't do coalesced read.
optimizeOutRepartition = true, optimizeSkewJoin = false, coalescedRead = false)
// Repartition with non-default partition num specified.
checkSMJ(df.repartition(4, 'b),
// The top shuffle from repartition is not optimized out.
optimizeOutRepartition = false, optimizeSkewJoin = true, coalescedRead = false)
// Repartition by col and project away the partition cols
checkSMJ(df.repartition('b).select('key),
// The top shuffle from repartition is not optimized out.
optimizeOutRepartition = false, optimizeSkewJoin = true, coalescedRead = false)
}
}
}
test("SPARK-34091: Batch shuffle fetch in AQE partition coalescing") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "10",
SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "true") {
withTable("t1") {
spark.range(100).selectExpr("id + 1 as a").write.format("parquet").saveAsTable("t1")
val query = "SELECT SUM(a) FROM t1 GROUP BY a"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val metricName = SQLShuffleReadMetricsReporter.LOCAL_BLOCKS_FETCHED
val blocksFetchedMetric = collectFirst(adaptivePlan) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric.isDefined)
val blocksFetched = blocksFetchedMetric.get.value
withSQLConf(SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "false") {
val (_, adaptivePlan2) = runAdaptiveAndVerifyResult(query)
val blocksFetchedMetric2 = collectFirst(adaptivePlan2) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric2.isDefined)
val blocksFetched2 = blocksFetchedMetric2.get.value
assert(blocksFetched < blocksFetched2)
}
}
}
}
test("SPARK-33933: Materialize BroadcastQueryStage first in AQE") {
val testAppender = new LogAppender("aqe query stage materialization order test")
val df = spark.range(1000).select($"id" % 26, $"id" % 10)
.toDF("index", "pv")
val dim = Range(0, 26).map(x => (x, ('a' + x).toChar.toString))
.toDF("index", "name")
val testDf = df.groupBy("index")
.agg(sum($"pv").alias("pv"))
.join(dim, Seq("index"))
val loggerNames =
Seq(classOf[BroadcastQueryStageExec].getName, classOf[ShuffleQueryStageExec].getName)
withLogAppender(testAppender, loggerNames, level = Some(Level.DEBUG)) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val result = testDf.collect()
assert(result.length == 26)
}
}
val materializeLogs = testAppender.loggingEvents
.map(_.getRenderedMessage)
.filter(_.startsWith("Materialize query stage"))
.toArray
assert(materializeLogs(0).startsWith("Materialize query stage BroadcastQueryStageExec"))
assert(materializeLogs(1).startsWith("Materialize query stage ShuffleQueryStageExec"))
}
test("SPARK-34899: Use origin plan if we can not coalesce shuffle partition") {
def checkNoCoalescePartitions(ds: Dataset[Row], origin: ShuffleOrigin): Unit = {
assert(collect(ds.queryExecution.executedPlan) {
case s: ShuffleExchangeExec if s.shuffleOrigin == origin && s.numPartitions == 2 => s
}.size == 1)
ds.collect()
val plan = ds.queryExecution.executedPlan
assert(collect(plan) {
case c: AQEShuffleReadExec => c
}.isEmpty)
assert(collect(plan) {
case s: ShuffleExchangeExec if s.shuffleOrigin == origin && s.numPartitions == 2 => s
}.size == 1)
checkAnswer(ds, testData)
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
// Pick a small value so that no coalesce can happen.
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
val df = spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10).toDF()
// partition size [1420, 1420]
checkNoCoalescePartitions(df.repartition($"key"), REPARTITION_BY_COL)
// partition size [1140, 1119]
checkNoCoalescePartitions(df.sort($"key"), ENSURE_REQUIREMENTS)
}
}
test("SPARK-34980: Support coalesce partition through union") {
def checkResultPartition(
df: Dataset[Row],
numUnion: Int,
numShuffleReader: Int,
numPartition: Int): Unit = {
df.collect()
assert(collect(df.queryExecution.executedPlan) {
case u: UnionExec => u
}.size == numUnion)
assert(collect(df.queryExecution.executedPlan) {
case r: AQEShuffleReadExec => r
}.size === numShuffleReader)
assert(df.rdd.partitions.length === numPartition)
}
Seq(true, false).foreach { combineUnionEnabled =>
val combineUnionConfig = if (combineUnionEnabled) {
SQLConf.OPTIMIZER_EXCLUDED_RULES.key -> ""
} else {
SQLConf.OPTIMIZER_EXCLUDED_RULES.key ->
"org.apache.spark.sql.catalyst.optimizer.CombineUnions"
}
// advisory partition size 1048576 has no special meaning, just a big enough value
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "1048576",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "10",
combineUnionConfig) {
withTempView("t1", "t2") {
spark.sparkContext.parallelize((1 to 10).map(i => TestData(i, i.toString)), 2)
.toDF().createOrReplaceTempView("t1")
spark.sparkContext.parallelize((1 to 10).map(i => TestData(i, i.toString)), 4)
.toDF().createOrReplaceTempView("t2")
// positive test that could be coalesced
checkResultPartition(
sql("""
|SELECT key, count(*) FROM t1 GROUP BY key
|UNION ALL
|SELECT * FROM t2
""".stripMargin),
numUnion = 1,
numShuffleReader = 1,
numPartition = 1 + 4)
checkResultPartition(
sql("""
|SELECT key, count(*) FROM t1 GROUP BY key
|UNION ALL
|SELECT * FROM t2
|UNION ALL
|SELECT * FROM t1
""".stripMargin),
numUnion = if (combineUnionEnabled) 1 else 2,
numShuffleReader = 1,
numPartition = 1 + 4 + 2)
checkResultPartition(
sql("""
|SELECT /*+ merge(t2) */ t1.key, t2.key FROM t1 JOIN t2 ON t1.key = t2.key
|UNION ALL
|SELECT key, count(*) FROM t2 GROUP BY key
|UNION ALL
|SELECT * FROM t1
""".stripMargin),
numUnion = if (combineUnionEnabled) 1 else 2,
numShuffleReader = 3,
numPartition = 1 + 1 + 2)
// negative test
checkResultPartition(
sql("SELECT * FROM t1 UNION ALL SELECT * FROM t2"),
numUnion = if (combineUnionEnabled) 1 else 1,
numShuffleReader = 0,
numPartition = 2 + 4
)
}
}
}
}
test("SPARK-35239: Coalesce shuffle partition should handle empty input RDD") {
withTable("t") {
withSQLConf(SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "2") {
spark.sql("CREATE TABLE t (c1 int) USING PARQUET")
val (_, adaptive) = runAdaptiveAndVerifyResult("SELECT c1, count(*) FROM t GROUP BY c1")
assert(
collect(adaptive) {
case c @ AQEShuffleReadExec(_, partitionSpecs) if partitionSpecs.length == 1 =>
assert(c.hasCoalescedPartition)
c
}.length == 1
)
}
}
}
test("SPARK-35264: Support AQE side broadcastJoin threshold") {
withTempView("t1", "t2") {
def checkJoinStrategy(shouldBroadcast: Boolean): Unit = {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val (origin, adaptive) = runAdaptiveAndVerifyResult(
"SELECT t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1 = t2.c1")
assert(findTopLevelSortMergeJoin(origin).size == 1)
if (shouldBroadcast) {
assert(findTopLevelBroadcastHashJoin(adaptive).size == 1)
} else {
assert(findTopLevelSortMergeJoin(adaptive).size == 1)
}
}
}
// t1: 1600 bytes
// t2: 160 bytes
spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10)
.toDF("c1", "c2").createOrReplaceTempView("t1")
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString)), 5)
.toDF("c1", "c2").createOrReplaceTempView("t2")
checkJoinStrategy(false)
withSQLConf(SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
checkJoinStrategy(false)
}
withSQLConf(SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> "160") {
checkJoinStrategy(true)
}
}
}
test("SPARK-35264: Support AQE side shuffled hash join formula") {
withTempView("t1", "t2") {
def checkJoinStrategy(shouldShuffleHashJoin: Boolean): Unit = {
Seq("100", "100000").foreach { size =>
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> size) {
val (origin1, adaptive1) = runAdaptiveAndVerifyResult(
"SELECT t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1 = t2.c1")
assert(findTopLevelSortMergeJoin(origin1).size === 1)
if (shouldShuffleHashJoin && size.toInt < 100000) {
val shj = findTopLevelShuffledHashJoin(adaptive1)
assert(shj.size === 1)
assert(shj.head.buildSide == BuildRight)
} else {
assert(findTopLevelSortMergeJoin(adaptive1).size === 1)
}
}
}
// respect user specified join hint
val (origin2, adaptive2) = runAdaptiveAndVerifyResult(
"SELECT /*+ MERGE(t1) */ t1.c1, t2.c1 FROM t1 JOIN t2 ON t1.c1 = t2.c1")
assert(findTopLevelSortMergeJoin(origin2).size === 1)
assert(findTopLevelSortMergeJoin(adaptive2).size === 1)
}
spark.sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString)), 10)
.toDF("c1", "c2").createOrReplaceTempView("t1")
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString)), 5)
.toDF("c1", "c2").createOrReplaceTempView("t2")
// t1 partition size: [926, 729, 731]
// t2 partition size: [318, 120, 0]
withSQLConf(SQLConf.SHUFFLE_PARTITIONS.key -> "3",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.PREFER_SORTMERGEJOIN.key -> "true") {
// check default value
checkJoinStrategy(false)
withSQLConf(SQLConf.ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD.key -> "400") {
checkJoinStrategy(true)
}
withSQLConf(SQLConf.ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD.key -> "300") {
checkJoinStrategy(false)
}
withSQLConf(SQLConf.ADAPTIVE_MAX_SHUFFLE_HASH_JOIN_LOCAL_MAP_THRESHOLD.key -> "1000") {
checkJoinStrategy(true)
}
}
}
}
test("SPARK-35650: Coalesce number of partitions by AEQ") {
withSQLConf(SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1") {
Seq("REPARTITION", "REBALANCE(key)")
.foreach {repartition =>
val query = s"SELECT /*+ $repartition */ * FROM testData"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
} match {
case Seq(aqeShuffleRead) =>
assert(aqeShuffleRead.partitionSpecs.size === 1)
assert(!aqeShuffleRead.isLocalRead)
case _ =>
fail("There should be a AQEShuffleReadExec")
}
}
}
}
test("SPARK-35650: Use local shuffle read if can not coalesce number of partitions") {
withSQLConf(SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false") {
val query = "SELECT /*+ REPARTITION */ * FROM testData"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
collect(adaptivePlan) {
case r: AQEShuffleReadExec => r
} match {
case Seq(aqeShuffleRead) =>
assert(aqeShuffleRead.partitionSpecs.size === 4)
assert(aqeShuffleRead.isLocalRead)
case _ =>
fail("There should be a AQEShuffleReadExec")
}
}
}
test("SPARK-35725: Support optimize skewed partitions in RebalancePartitions") {
withTempView("v") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.ADAPTIVE_OPTIMIZE_SKEWS_IN_REBALANCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(if (i > 4) 5 else i, i.toString)), 3)
.toDF("c1", "c2").createOrReplaceTempView("v")
def checkPartitionNumber(
query: String, skewedPartitionNumber: Int, totalNumber: Int): Unit = {
val (_, adaptive) = runAdaptiveAndVerifyResult(query)
val read = collect(adaptive) {
case read: AQEShuffleReadExec => read
}
assert(read.size == 1)
assert(read.head.partitionSpecs.count(_.isInstanceOf[PartialReducerPartitionSpec]) ==
skewedPartitionNumber)
assert(read.head.partitionSpecs.size == totalNumber)
}
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "150") {
// partition size [0,258,72,72,72]
checkPartitionNumber("SELECT /*+ REBALANCE(c1) */ * FROM v", 2, 4)
// partition size [72,216,216,144,72]
checkPartitionNumber("SELECT /*+ REBALANCE */ * FROM v", 4, 7)
}
// no skewed partition should be optimized
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10000") {
checkPartitionNumber("SELECT /*+ REBALANCE(c1) */ * FROM v", 0, 1)
}
}
}
}
test("SPARK-35888: join with a 0-partition table") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> AQEPropagateEmptyRelation.ruleName) {
withTempView("t2") {
// create a temp view with 0 partition
spark.createDataFrame(sparkContext.emptyRDD[Row], new StructType().add("b", IntegerType))
.createOrReplaceTempView("t2")
val (_, adaptive) =
runAdaptiveAndVerifyResult("SELECT * FROM testData2 t1 left semi join t2 ON t1.a=t2.b")
val aqeReads = collect(adaptive) {
case c: AQEShuffleReadExec => c
}
assert(aqeReads.length == 2)
aqeReads.foreach { c =>
val stats = c.child.asInstanceOf[QueryStageExec].getRuntimeStatistics
assert(stats.sizeInBytes >= 0)
assert(stats.rowCount.get >= 0)
}
}
}
}
test("SPARK-33832: Support optimize skew join even if introduce extra shuffle") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_OPTIMIZE_SKEWS_IN_REBALANCE_PARTITIONS_ENABLED.key -> "false",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "100",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "10",
SQLConf.ADAPTIVE_FORCE_OPTIMIZE_SKEWED_JOIN.key -> "true") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 3 as key1", "id as value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 1 as key2", "id as value2")
.createOrReplaceTempView("skewData2")
// check if optimized skewed join does not satisfy the required distribution
Seq(true, false).foreach { hasRequiredDistribution =>
Seq(true, false).foreach { hasPartitionNumber =>
val repartition = if (hasRequiredDistribution) {
s"/*+ repartition(${ if (hasPartitionNumber) "10," else ""}key1) */"
} else {
""
}
// check required distribution and extra shuffle
val (_, adaptive1) =
runAdaptiveAndVerifyResult(s"SELECT $repartition key1 FROM skewData1 " +
s"JOIN skewData2 ON key1 = key2 GROUP BY key1")
val shuffles1 = collect(adaptive1) {
case s: ShuffleExchangeExec => s
}
assert(shuffles1.size == 3)
// shuffles1.head is the top-level shuffle under the Aggregate operator
assert(shuffles1.head.shuffleOrigin == ENSURE_REQUIREMENTS)
val smj1 = findTopLevelSortMergeJoin(adaptive1)
assert(smj1.size == 1 && smj1.head.isSkewJoin)
// only check required distribution
val (_, adaptive2) =
runAdaptiveAndVerifyResult(s"SELECT $repartition key1 FROM skewData1 " +
s"JOIN skewData2 ON key1 = key2")
val shuffles2 = collect(adaptive2) {
case s: ShuffleExchangeExec => s
}
if (hasRequiredDistribution) {
assert(shuffles2.size == 3)
val finalShuffle = shuffles2.head
if (hasPartitionNumber) {
assert(finalShuffle.shuffleOrigin == REPARTITION_BY_NUM)
} else {
assert(finalShuffle.shuffleOrigin == REPARTITION_BY_COL)
}
} else {
assert(shuffles2.size == 2)
}
val smj2 = findTopLevelSortMergeJoin(adaptive2)
assert(smj2.size == 1 && smj2.head.isSkewJoin)
}
}
}
}
}
test("SPARK-35968: AQE coalescing should not produce too small partitions by default") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptive) =
runAdaptiveAndVerifyResult("SELECT sum(id) FROM RANGE(10) GROUP BY id % 3")
val coalesceRead = collect(adaptive) {
case r: AQEShuffleReadExec if r.hasCoalescedPartition => r
}
assert(coalesceRead.length == 1)
// RANGE(10) is a very small dataset and AQE coalescing should produce one partition.
assert(coalesceRead.head.partitionSpecs.length == 1)
}
}
test("SPARK-35794: Allow custom plugin for cost evaluator") {
CostEvaluator.instantiate(
classOf[SimpleShuffleSortCostEvaluator].getCanonicalName, spark.sparkContext.getConf)
intercept[IllegalArgumentException] {
CostEvaluator.instantiate(
classOf[InvalidCostEvaluator].getCanonicalName, spark.sparkContext.getConf)
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val query = "SELECT * FROM testData join testData2 ON key = a where value = '1'"
withSQLConf(SQLConf.ADAPTIVE_CUSTOM_COST_EVALUATOR_CLASS.key ->
"org.apache.spark.sql.execution.adaptive.SimpleShuffleSortCostEvaluator") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReads(adaptivePlan)
}
withSQLConf(SQLConf.ADAPTIVE_CUSTOM_COST_EVALUATOR_CLASS.key ->
"org.apache.spark.sql.execution.adaptive.InvalidCostEvaluator") {
intercept[IllegalArgumentException] {
runAdaptiveAndVerifyResult(query)
}
}
}
}
test("SPARK-36020: Check logical link in remove redundant projects") {
withTempView("t") {
spark.range(10).selectExpr("id % 10 as key", "cast(id * 2 as int) as a",
"cast(id * 3 as int) as b", "array(id, id + 1, id + 3) as c").createOrReplaceTempView("t")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> "800") {
val query =
"""
|WITH tt AS (
| SELECT key, a, b, explode(c) AS c FROM t
|)
|SELECT t1.key, t1.c, t2.key, t2.c
|FROM (SELECT a, b, c, key FROM tt WHERE a > 1) t1
|JOIN (SELECT a, b, c, key FROM tt) t2
| ON t1.key = t2.key
|""".stripMargin
val (origin, adaptive) = runAdaptiveAndVerifyResult(query)
assert(findTopLevelSortMergeJoin(origin).size == 1)
assert(findTopLevelBroadcastHashJoin(adaptive).size == 1)
}
}
}
test("SPARK-35874: AQE Shuffle should wait for its subqueries to finish before materializing") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val query = "SELECT b FROM testData2 DISTRIBUTE BY (b, (SELECT max(key) FROM testData))"
runAdaptiveAndVerifyResult(query)
}
}
test("SPARK-36032: Use inputPlan instead of currentPhysicalPlan to initialize logical link") {
withTempView("v") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, i.toString)), 2)
.toDF("c1", "c2").createOrReplaceTempView("v")
Seq("-1", "10000").foreach { aqeBhj =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_AUTO_BROADCASTJOIN_THRESHOLD.key -> aqeBhj,
SQLConf.SHUFFLE_PARTITIONS.key -> "1") {
val (origin, adaptive) = runAdaptiveAndVerifyResult(
"""
|SELECT * FROM v t1 JOIN (
| SELECT c1 + 1 as c3 FROM v
|)t2 ON t1.c1 = t2.c3
|SORT BY c1
""".stripMargin)
if (aqeBhj.toInt < 0) {
// 1 sort since spark plan has no shuffle for SMJ
assert(findTopLevelSort(origin).size == 1)
// 2 sorts in SMJ
assert(findTopLevelSort(adaptive).size == 2)
} else {
assert(findTopLevelSort(origin).size == 1)
// 1 sort at top node and BHJ has no sort
assert(findTopLevelSort(adaptive).size == 1)
}
}
}
}
}
test("SPARK-36424: Support eliminate limits in AQE Optimizer") {
withTempView("v") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(i, if (i > 2) "2" else i.toString)), 2)
.toDF("c1", "c2").createOrReplaceTempView("v")
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "3") {
val (origin1, adaptive1) = runAdaptiveAndVerifyResult(
"""
|SELECT c2, sum(c1) FROM v GROUP BY c2 LIMIT 5
""".stripMargin)
assert(findTopLevelLimit(origin1).size == 1)
assert(findTopLevelLimit(adaptive1).isEmpty)
// eliminate limit through filter
val (origin2, adaptive2) = runAdaptiveAndVerifyResult(
"""
|SELECT c2, sum(c1) FROM v GROUP BY c2 HAVING sum(c1) > 1 LIMIT 5
""".stripMargin)
assert(findTopLevelLimit(origin2).size == 1)
assert(findTopLevelLimit(adaptive2).isEmpty)
}
}
}
test("SPARK-37063: OptimizeSkewInRebalancePartitions support optimize non-root node") {
withTempView("v") {
withSQLConf(
SQLConf.ADAPTIVE_OPTIMIZE_SKEWS_IN_REBALANCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "1",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1") {
spark.sparkContext.parallelize(
(1 to 10).map(i => TestData(if (i > 2) 2 else i, i.toString)), 2)
.toDF("c1", "c2").createOrReplaceTempView("v")
def checkRebalance(query: String, numShufflePartitions: Int): Unit = {
val (_, adaptive) = runAdaptiveAndVerifyResult(query)
assert(adaptive.collect {
case sort: SortExec => sort
}.size == 1)
val read = collect(adaptive) {
case read: AQEShuffleReadExec => read
}
assert(read.size == 1)
assert(read.head.partitionSpecs.forall(_.isInstanceOf[PartialReducerPartitionSpec]))
assert(read.head.partitionSpecs.size == numShufflePartitions)
}
withSQLConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "50") {
checkRebalance("SELECT /*+ REBALANCE(c1) */ * FROM v SORT BY c1", 2)
checkRebalance("SELECT /*+ REBALANCE */ * FROM v SORT BY c1", 2)
}
}
}
}
}
/**
* Invalid implementation class for [[CostEvaluator]].
*/
private class InvalidCostEvaluator() {}
/**
* A simple [[CostEvaluator]] to count number of [[ShuffleExchangeLike]] and [[SortExec]].
*/
private case class SimpleShuffleSortCostEvaluator() extends CostEvaluator {
override def evaluateCost(plan: SparkPlan): Cost = {
val cost = plan.collect {
case s: ShuffleExchangeLike => s
case s: SortExec => s
}.size
SimpleCost(cost)
}
}
| nchammas/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala | Scala | apache-2.0 | 91,648 |
package scala.meta.internal.tokenizers
import scala.collection.mutable
object Compat {
def newMutableMap[A, B] = mutable.Map.empty[A, B]
}
| scalameta/scalameta | scalameta/tokenizers/native/src/main/scala/scala/meta/internal/tokenizers/Compat.scala | Scala | bsd-3-clause | 143 |
package utils
import com.mongodb.casbah.MongoConnection
import play.api.Play
import com.mongodb.casbah.MongoDB
object MongoHQConfig {
val mongoServer = Play.current.configuration.getString("mongoServer").get
val mongoPort = Play.current.configuration.getString("mongoPort").get.toInt
val databaseName = Play.current.configuration.getString("databaseName").get
val dbUserName = Play.current.configuration.getString("dbUserName").get
val dbUserPassword = Play.current.configuration.getString("dbUserPassword").get
val mongoDB = MongoConnection(mongoServer, mongoPort)(databaseName)
mongoDB.authenticate(dbUserName, dbUserPassword)
} | knoldus/Play-Starter-Template | app/utils/MongoHQConfig.scala | Scala | apache-2.0 | 646 |
Subsets and Splits