code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.scaladebugger.api.profiles.traits.info
import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine
/**
* Represents the interface that needs to be implemented to provide
* miscellaneous info functionality for a specific debug profile.
*/
trait MiscInfo {
/**
* Retrieves the Scala virtual machine associated with this profile instance.
*
* @return The Scala virtual machine instance
*/
def toScalaVirtualMachine: ScalaVirtualMachine
/**
* Retrieves the list of available lines for a specific file.
*
* @param fileName The name of the file whose lines to retrieve
* @return Some list of breakpointable lines if the file exists,
* otherwise None
*/
def availableLinesForFile(fileName: String): Option[Seq[Int]]
/**
* Retrieves all source paths for the given source name.
*
* @example nameToPaths("file.scala") yields
* Seq("path/to/file.scala", "other/path/to/file.scala")
* @param sourceName The source (file) name whose associated paths to find
* @return The collection of source paths
*/
def sourceNameToPaths(sourceName: String): Seq[String]
/**
* Represents the name of the class used as the entrypoint for this vm.
*
* @return The main class name as a string
*/
def mainClassName: String
/**
* Represents the command line arguments used to start this VM.
*
* @return The command line arguments as a collection of strings
*/
def commandLineArguments: Seq[String]
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/traits/info/MiscInfo.scala | Scala | apache-2.0 | 1,506 |
/*
* Copyright (c) 2011-2012, Alex McGuire, Louis Botterill
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package maker.task.tasks
import org.apache.commons.io.FileUtils._
import maker.project._
import maker.task._
import maker.utils.Stopwatch
import maker.utils.maven.IvyLock
import maker.utils.FileUtils
import maker.PomUtils
/**
* publishes poms and packaged artifacts to the local filesystem at ~/.ivy2/maker-local - subject to change
*/
case class PublishLocalTask(baseProject : BaseProject, version : String) extends Task {
def name = "Publish Local"
def module = baseProject
def upstreamTasks = baseProject match {
case _ : Project => baseProject.immediateUpstreamModules.map(PublishLocalTask(_, version))
case m : Module => PackageMainJarTask(m) :: baseProject.immediateUpstreamModules.map(PublishLocalTask(_, version))
}
def exec(results : Iterable[TaskResult], sw : Stopwatch) = {
IvyLock.synchronized{
doPublish(baseProject, results, sw)
}
}
private def doPublish(baseProject: BaseProject, results : Iterable[TaskResult], sw : Stopwatch) = {
FileUtils.writeToFile(baseProject.publishLocalPomFile, PomUtils.pomXml(baseProject, version))
baseProject match {
case _ : Project =>
case m : Module =>
copyFileToDirectory(m.outputArtifact, m.publishLocalJarDir)
}
DefaultTaskResult(this, true, sw)
}
}
| syl20bnr/maker | maker/src/maker/task/tasks/PublishLocalTask.scala | Scala | bsd-2-clause | 2,680 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.types
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.apache.spark.annotation.InterfaceStability
/**
* The data type for Maps. Keys in a map are not allowed to have `null` values.
*
* Please use `DataTypes.createMapType()` to create a specific instance.
*
* @param keyType The data type of map keys.
* @param valueType The data type of map values.
* @param valueContainsNull Indicates if map values have `null` values.
*/
@InterfaceStability.Stable
case class MapType(
keyType: DataType,
valueType: DataType,
valueContainsNull: Boolean) extends DataType {
/** No-arg constructor for kryo. */
def this() = this(null, null, false)
private[sql] def buildFormattedString(prefix: String, builder: StringBuilder): Unit = {
builder.append(s"$prefix-- key: ${keyType.typeName}\\n")
DataType.buildFormattedString(keyType, s"$prefix |", builder)
builder.append(s"$prefix-- value: ${valueType.typeName} " +
s"(valueContainsNull = $valueContainsNull)\\n")
DataType.buildFormattedString(valueType, s"$prefix |", builder)
}
override private[sql] def jsonValue: JValue =
("type" -> typeName) ~
("keyType" -> keyType.jsonValue) ~
("valueType" -> valueType.jsonValue) ~
("valueContainsNull" -> valueContainsNull)
/**
* The default size of a value of the MapType is
* (the default size of the key type + the default size of the value type).
* We assume that there is only 1 element on average in a map. See SPARK-18853.
*/
override def defaultSize: Int = 1 * (keyType.defaultSize + valueType.defaultSize)
override def simpleString: String = s"map<${keyType.simpleString},${valueType.simpleString}>"
override def catalogString: String = s"map<${keyType.catalogString},${valueType.catalogString}>"
override def sql: String = s"MAP<${keyType.sql}, ${valueType.sql}>"
override private[spark] def asNullable: MapType =
MapType(keyType.asNullable, valueType.asNullable, valueContainsNull = true)
override private[spark] def existsRecursively(f: (DataType) => Boolean): Boolean = {
f(this) || keyType.existsRecursively(f) || valueType.existsRecursively(f)
}
}
/**
* @since 1.3.0
*/
@InterfaceStability.Stable
object MapType extends AbstractDataType {
override private[sql] def defaultConcreteType: DataType = apply(NullType, NullType)
override private[sql] def acceptsType(other: DataType): Boolean = {
other.isInstanceOf[MapType]
}
override private[sql] def simpleString: String = "map"
/**
* Construct a [[MapType]] object with the given key type and value type.
* The `valueContainsNull` is true.
*/
def apply(keyType: DataType, valueType: DataType): MapType =
MapType(keyType: DataType, valueType: DataType, valueContainsNull = true)
}
| michalsenkyr/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/types/MapType.scala | Scala | apache-2.0 | 3,626 |
package info.glennengstrand.news
import java.util.Date
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import info.glennengstrand.io._
/** helper functions for inbound object creation */
object Inbound {
val log = LoggerFactory.getLogger("info.glennengstrand.news.Inbound")
val reader: PersistentDataStoreReader = new CassandraReader
val cache: CacheAware = new MockCache
class InboundBindings extends PersistentDataStoreBindings {
def entity: String = {
"Inbound"
}
def fetchInputs: Iterable[String] = {
List("participantID")
}
def fetchOutputs: Iterable[(String, String)] = {
List(("occurred", "Date"), ("fromParticipantID", "Int"), ("subject", "String"), ("story", "String"))
}
def upsertInputs: Iterable[String] = {
List("participantID", "fromParticipantID", "occurred", "subject", "story")
}
def upsertOutputs: Iterable[(String, String)] = {
List()
}
def fetchOrder: Map[String, String] = {
Map("occurred" -> "desc")
}
}
val bindings = new InboundBindings
def apply(id: Int) : InboundFeed = {
val criteria: Map[String, Any] = Map("participantID" -> id)
new InboundFeed(id, IO.cacheAwareRead(bindings, criteria, reader, cache)) with CassandraWriter with MockCacheAware
}
def apply(state: String): Inbound = {
val s = IO.fromFormPost(state)
new Inbound(Link.toLink(s("participantID").asInstanceOf[String].toLong), IO.df.parse(s("occurred").asInstanceOf[String]), Link.toLink(s("fromParticipantID").asInstanceOf[String].toLong), s("subject").asInstanceOf[String], s("story").asInstanceOf[String]) with CassandraWriter with MockCacheAware
}
}
case class InboundState(participantID: String, occurred: Date, fromParticipantID: String, subject: String, story: String)
/** represents a news item as it appears in your inbound feed */
class Inbound(participantID: String, occurred: Date, fromParticipantID: String, subject: String, story: String) extends InboundState(participantID, occurred, fromParticipantID, subject, story) with MicroServiceSerializable {
this: PersistentDataStoreWriter with CacheAware =>
def getState: Map[String, Any] = {
getState((s) => Link.extractId(participantID).intValue)
}
def getApiState(l:String => Any): Map[String, Any] = {
Map(
"to" -> l(participantID),
"occurred" -> occurred,
"from" -> l(fromParticipantID),
"subject" -> subject,
"story" -> story
)
}
def getState(l:String => Any): Map[String, Any] = {
Map(
"participantID" -> l(participantID),
"occurred" -> occurred,
"fromParticipantID" -> l(fromParticipantID),
"subject" -> subject,
"story" -> story
)
}
/** save item to db */
def save: Unit = {
val criteria: Map[String, Any] = Map(
"participantID" -> Link.extractId(participantID).intValue
)
write(Inbound.bindings, getState, criteria)
invalidate(Inbound.bindings, criteria)
}
override def toJson: String = {
IO.toJson(getApiState((s) => s))
}
override def toJson(factory: FactoryClass): String = toJson
}
/** represents a user's inbound collection of news items */
class InboundFeed(id: Int, state: Iterable[Map[String, Any]]) extends Iterator[Inbound] with MicroServiceSerializable {
val i = state.iterator
def hasNext = i.hasNext
def next() = {
val kv = i.next()
Inbound.log.debug("kv = " + kv)
new Inbound(Link.toLink(id.longValue), IO.convertToDate(kv("occurred")), Link.toLink(IO.convertToInt(kv("fromParticipantID")).longValue), kv("subject").toString, kv("story").toString) with CassandraWriter with MockCacheAware
}
override def toJson: String = {
"[" + map(f => f.toJson).reduce(_ + "," + _) + "]"
}
override def toJson(factory: FactoryClass): String = toJson
}
| gengstrand/clojure-news-feed | server/feed2/src/main/scala/info/glennengstrand/news/Inbound.scala | Scala | epl-1.0 | 3,828 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.cache
import config.ApplicationConfig
import connectors.cache.Conversions
import javax.inject.Inject
import org.joda.time.{DateTime, DateTimeZone}
import play.api.libs.json._
import play.modules.reactivemongo.ReactiveMongoComponent
import reactivemongo.api.DefaultDB
import reactivemongo.api.commands.WriteResult
import reactivemongo.api.indexes.{Index, IndexType}
import reactivemongo.bson.{BSONDocument, BSONObjectID}
import reactivemongo.play.json.ImplicitBSONHandlers._
import uk.gov.hmrc.crypto.json.{JsonDecryptor, JsonEncryptor}
import uk.gov.hmrc.crypto.{ApplicationCrypto, _}
import uk.gov.hmrc.http.cache.client.CacheMap
import uk.gov.hmrc.mongo.ReactiveRepository
import uk.gov.hmrc.mongo.json.ReactiveMongoFormats
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success, Try}
// $COVERAGE-OFF$
// Coverage has been turned off for these types, as the only things we can really do with them
// is mock out the mongo connection, which is bad craic. This has all been manually tested in the running application.
case class Cache(id: String, data: Map[String, JsValue], lastUpdated: DateTime = DateTime.now(DateTimeZone.UTC)) {
/**
* Upsert a value into the cache given its key.
* If the data to be inserted is null then remove the entry by key
*/
def upsert[T](key: String, data: JsValue, hasValue: Boolean) = {
val updated = if (hasValue) {
this.data + (key -> data)
}
else {
this.data - (key)
}
this.copy(
data = updated,
lastUpdated = DateTime.now(DateTimeZone.UTC)
)
}
}
object Cache {
implicit val dateFormat = ReactiveMongoFormats.dateTimeFormats
implicit val format = Json.format[Cache]
def apply(cacheMap: CacheMap): Cache = Cache(cacheMap.id, cacheMap.data)
val empty = Cache("", Map())
}
/**
* Implements getEncryptedEntry[T], which will decrypt the entry on retrieval
* This type itself is a type of Cache.
*
* @param cache The cache to wrap.
* @param crypto The cryptography instance to use to decrypt values
*/
class CryptoCache(cache: Cache, crypto: CompositeSymmetricCrypto) extends Cache(cache.id, cache.data) with CacheOps {
def getEncryptedEntry[T](key: String)(implicit fmt: Reads[T]): Option[T] =
decryptValue(cache, key)(new JsonDecryptor[T]()(crypto, fmt))
}
/**
* An injectible factory for creating new MongoCacheClients
*/
class MongoCacheClientFactory @Inject()(config: ApplicationConfig, applicationCrypto: ApplicationCrypto, component: ReactiveMongoComponent) {
def createClient: MongoCacheClient = new MongoCacheClient(config, component.mongoConnector.db, applicationCrypto)
}
/**
* Implements a client which utilises the GOV UK cache repository to store cached data in Mongo.
*
* @param appConfig The application configuration
*/
class MongoCacheClient(appConfig: ApplicationConfig, db: () => DefaultDB, applicationCrypto: ApplicationCrypto)
extends ReactiveRepository[Cache, BSONObjectID]("app-cache", db, Cache.format)
with Conversions
with CacheOps
{
private val logPrefix = "[MongoCacheClient]"
// $COVERAGE-OFF$
private def debug(msg: String) = logger.debug(s"$logPrefix $msg")
private def error(msg: String, e: Throwable) = logger.error(s"$logPrefix $msg", e)
private def error(msg: String) = logger.error(s"$logPrefix $msg")
// $COVERAGE-ON$
implicit val compositeSymmetricCrypto: CompositeSymmetricCrypto = applicationCrypto.JsonCrypto
val cacheExpiryInSeconds: Int = appConfig.cacheExpiryInSeconds
createIndex("lastUpdated", "cacheExpiry", cacheExpiryInSeconds)
/**
* Inserts data into the cache with the specified key. If the data does not exist, it will be created.
*/
def createOrUpdate[T](credId: String, data: T, key: String)(implicit writes: Writes[T]): Future[Cache] = {
val jsonData = if (appConfig.mongoEncryptionEnabled) {
val jsonEncryptor = new JsonEncryptor[T]()
Json.toJson(Protected(data))(jsonEncryptor)
} else {
Json.toJson(data)
}
fetchAll(Some(credId)) flatMap { maybeNewCache =>
val cache: Cache = maybeNewCache.getOrElse(Cache(credId, Map.empty))
val updatedCache: Cache = cache.copy(
id = credId,
data = cache.data + (key -> jsonData),
lastUpdated = DateTime.now(DateTimeZone.UTC)
)
val document = Json.toJson(updatedCache)
val modifier = BSONDocument("$set" -> document)
collection.update(ordered = false).one(bsonIdQuery(credId), modifier, upsert = true) map { _ => updatedCache }
}
}
/**
* Removes the item with the specified key from the cache
*/
def removeByKey[T](credId: String, key: String): Future[Cache] = {
fetchAll(Some(credId)) flatMap { maybeNewCache =>
val cache = maybeNewCache.getOrElse(Cache(credId, Map.empty))
val updatedCache = cache.copy(
data = cache.data - (key),
lastUpdated = DateTime.now(DateTimeZone.UTC)
)
val document = Json.toJson(updatedCache)
val modifier = BSONDocument("$set" -> document)
collection.update(ordered = false).one(bsonIdQuery(credId), modifier, upsert = true) map { _ => updatedCache }
}
}
/**
* Inserts data into the existing cache object in memory given the specified key. If the data does not exist, it will be created.
*/
def upsert[T](targetCache: CacheMap, data: T, key: String)(implicit writes: Writes[T]): CacheMap = {
val jsonData = if (appConfig.mongoEncryptionEnabled) {
val jsonEncryptor = new JsonEncryptor[T]()
Json.toJson(Protected(data))(jsonEncryptor)
} else {
Json.toJson(data)
}
toCacheMap(Cache(targetCache).upsert[T](key, jsonData, data != None))
}
/**
* Finds an item in the cache with the specified key. If the item cannot be found, None is returned.
*/
def find[T](credId: String, key: String)(implicit reads: Reads[T]): Future[Option[T]] =
fetchAll(credId) map {
case Some(cache) => if (appConfig.mongoEncryptionEnabled) {
decryptValue[T](cache, key)(new JsonDecryptor[T]())
} else {
getValue[T](cache, key)
}
case _ => None
}
/**
* Fetches the whole cache
*/
def fetchAll(credId: String): Future[Option[Cache]] = collection.find(bsonIdQuery(credId), Option.empty[Cache]).one[Cache] map {
case Some(c) if appConfig.mongoEncryptionEnabled => Some(new CryptoCache(c, compositeSymmetricCrypto))
case c => c
}
def fetchAll(credId: Option[String]): Future[Option[Cache]] = {
credId match {
case Some(x) => collection.find(key(x), Option.empty[Cache]).one[Cache] map {
case Some(c) if appConfig.mongoEncryptionEnabled => Some(new CryptoCache(c, compositeSymmetricCrypto))
case c => c
}
case _ => Future.successful(None)
}
}
/**
* Fetches the whole cache and returns default where not exists
*/
def fetchAllWithDefault(credId: String): Future[Cache] =
fetchAll(Some(credId)).map {
_.getOrElse(Cache(credId, Map.empty))
}
/**
* Removes the item with the specified id from the cache
*/
def removeById(credId: String) =
collection.delete().one(key(credId)) map handleWriteResult
/**
* Saves the cache data into the database
*/
def saveAll(cache: Cache): Future[Boolean] = {
// Rebuild the cache and decrypt each key if necessary
val rebuiltCache = Cache(cache.id, cache.data.foldLeft(Map.empty[String, JsValue]) { (acc, value) =>
val plainText = tryDecrypt(Crypted(value._2.toString))
if (appConfig.mongoEncryptionEnabled) {
acc + (value._1 -> JsString(compositeSymmetricCrypto.encrypt(plainText).value))
} else {
acc + (value._1 -> Json.parse(plainText.value))
}
})
collection.update(ordered = false).one(bsonIdQuery(cache.id), BSONDocument("$set" -> Json.toJson(rebuiltCache)), upsert = true) map handleWriteResult
}
def saveAll(cache: Cache, credId: String): Future[Boolean] = {
// Rebuild the cache and decrypt each key if necessary
val rebuiltCache = Cache(credId, cache.data.foldLeft(Map.empty[String, JsValue]) { (acc, value) =>
val plainText = tryDecrypt(Crypted(value._2.toString))
if (appConfig.mongoEncryptionEnabled) {
acc + (value._1 -> JsString(compositeSymmetricCrypto.encrypt(plainText).value))
} else {
acc + (value._1 -> Json.parse(plainText.value))
}
})
collection.update(ordered = false).one(bsonIdQuery(rebuiltCache.id), BSONDocument("$set" -> Json.toJson(rebuiltCache)), upsert = true) map handleWriteResult
}
/**
* Creates a new index on the specified field, using the specified name and the ttl
*/
private def createIndex(field: String, indexName: String, ttl: Int): Future[Boolean] = {
collection.indexesManager.ensure(Index(
Seq((field, IndexType.Ascending)),
Some(indexName),
options = BSONDocument("expireAfterSeconds" -> ttl))
) map { result =>
debug(s"Index $indexName set with value $ttl -> result: $result")
result
} recover {
case e => error("Failed to set TTL index", e); false
}
}
/**
* Generates a BSON document query for an id
*/
private def bsonIdQuery(id: String) = BSONDocument("_id" -> id)
private def key(id: String) = bsonIdQuery(id)
/**
* Handles logging for write results
*/
private def handleWriteResult(writeResult: WriteResult) = writeResult match {
case w if w.ok => true
case w if w.writeErrors.nonEmpty =>
w.writeErrors.map(_.errmsg).foreach(e => error(e))
throw new RuntimeException(w.writeErrors.map(_.errmsg).mkString("; "))
case _ =>
throw new RuntimeException("Error while removing the session data")
}
private def tryDecrypt(value: Crypted): PlainText = Try {
compositeSymmetricCrypto.decrypt(value).value
} match {
case Success(v) => PlainText(v)
case Failure(e) if e.isInstanceOf[SecurityException] => PlainText(value.value)
case Failure(e) => throw e
}
}
// $COVERAGE-ON$ | hmrc/amls-frontend | app/services/cache/MongoCacheClient.scala | Scala | apache-2.0 | 10,720 |
package info.armado.ausleihe.client.remote.services
import java.time.Year
import info.armado.ausleihe.client.transport.dataobjects.entities._
import info.armado.ausleihe.client.transport.dataobjects.inuse._
import info.armado.ausleihe.client.transport.requests.IssueIdentityCardRequestDTO
import info.armado.ausleihe.client.transport.results._
import org.arquillian.ape.rdbms.{ShouldMatchDataSet, UsingDataSet}
import org.jboss.arquillian.extension.rest.client.ArquillianResteasyResource
import org.jboss.arquillian.junit.Arquillian
import org.junit.Test
import org.junit.runner.RunWith
import org.scalatest.Matchers.{convertToAnyShouldWrapper, equal}
import org.scalatest.junit.JUnitSuite
object IssueIdentityCardsServiceTest extends WebDeployment
@RunWith(classOf[Arquillian])
class IssueIdentityCardsServiceTest extends JUnitSuite {
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(
value = Array("datasets/issue-identity-card.xml"),
excludeColumns = Array("LENDIDENTITYCARD.ID", "LENDIDENTITYCARD.LENDTIME")
)
def successfulIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000032", "44000035")) should equal(
IssueIdentityCardSuccessDTO(IdentityCardDTO("33000032"), EnvelopeDTO("44000035"))
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def alreadyIssuedIdentityCardWithGamesIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000010", "44000035")) should equal(
LendingEntityInUseDTO(
IdentityCardDTO("33000010", "Marc Arndt"),
IdentityCardInUseDTO(
EnvelopeDTO("44000013"),
Array(
GameDTO(
"11000014",
"Titel 1",
"Autor 1",
"Verlag 1",
12,
PlayerCountDTO(2),
DurationDTO(90, 120),
Year.of(2016)
),
GameDTO(
"11000025",
"Titel 2",
"Autor 1",
"Verlag 2",
15,
null,
DurationDTO(90, 120),
null
),
GameDTO(
"11000036",
"Titel 2",
"Autor 1",
"Verlag 2",
15,
null,
DurationDTO(90, 120),
Year.of(2015)
)
)
)
)
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def alreadyIssuedIdentityCardWithoutGamesIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000101", "44000035")) should equal(
LendingEntityInUseDTO(
IdentityCardDTO("33000101"),
IdentityCardInUseDTO(EnvelopeDTO("44000104"), Array())
)
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def alreadyIssuedEnvelopeIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000032", "44000013")) should equal(
LendingEntityInUseDTO(
EnvelopeDTO("44000013"),
EnvelopeInUseDTO(
IdentityCardDTO("33000010", "Marc Arndt"),
Array(
GameDTO(
"11000014",
"Titel 1",
"Autor 1",
"Verlag 1",
12,
PlayerCountDTO(2),
DurationDTO(90, 120),
Year.of(2016)
),
GameDTO(
"11000025",
"Titel 2",
"Autor 1",
"Verlag 2",
15,
null,
DurationDTO(90, 120),
null
),
GameDTO(
"11000036",
"Titel 2",
"Autor 1",
"Verlag 2",
15,
null,
DurationDTO(90, 120),
Year.of(2015)
)
)
)
)
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def notActivatedIdentityCardIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000043", "44000035")) should equal(
LendingEntityNotExistsDTO("33000043")
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def notActivatedEnvelopeIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000032", "44000046")) should equal(
LendingEntityNotExistsDTO("44000046")
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def notExistingIdentityCardIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000054", "44000035")) should equal(
LendingEntityNotExistsDTO("33000054")
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def notExistingEnvelopeIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000032", "44000057")) should equal(
LendingEntityNotExistsDTO("44000057")
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def incorrectIdentityCardBarcodeIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000011", "44000035")) should equal(
IncorrectBarcodeDTO("33000011")
)
}
@Test
@UsingDataSet(Array("datasets/initial.xml"))
@ShouldMatchDataSet(Array("datasets/initial.xml"))
def incorrectEnvelopeBarcodeIssueIdentityCard(
@ArquillianResteasyResource issueIdentityCardsService: IssueIdentityCardsService
): Unit = {
issueIdentityCardsService.issueIdentityCard(IssueIdentityCardRequestDTO("33000032", "44000014")) should equal(
IncorrectBarcodeDTO("44000014")
)
}
}
| Spielekreis-Darmstadt/lending | lending-client-backend/src/test/scala/info/armado/ausleihe/client/remote/services/IssueIdentityCardsServiceTest.scala | Scala | apache-2.0 | 7,092 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.planning
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.trees.TreeNode
/**
* Given a [[plans.logical.LogicalPlan LogicalPlan]], returns a list of `PhysicalPlan`s that can
* be used for execution. If this strategy does not apply to the give logical operation then an
* empty list should be returned.
*/
abstract class GenericStrategy[PhysicalPlan <: TreeNode[PhysicalPlan]] extends Logging {
def apply(plan: LogicalPlan): Seq[PhysicalPlan]
}
/**
* Abstract class for transforming [[plans.logical.LogicalPlan LogicalPlan]]s into physical plans.
* Child classes are responsible for specifying a list of [[Strategy]] objects that each of which
* can return a list of possible physical plan options. If a given strategy is unable to plan all
* of the remaining operators in the tree, it can call [[planLater]], which returns a placeholder
* object that will be filled in using other available strategies.
*
* TODO: RIGHT NOW ONLY ONE PLAN IS RETURNED EVER...
* PLAN SPACE EXPLORATION WILL BE IMPLEMENTED LATER.
*
* @tparam PhysicalPlan The type of physical plan produced by this [[QueryPlanner]]
*/
abstract class QueryPlanner[PhysicalPlan <: TreeNode[PhysicalPlan]] {
/** A list of execution strategies that can be used by the planner */
def strategies: Seq[GenericStrategy[PhysicalPlan]]
/**
* Returns a placeholder for a physical plan that executes `plan`. This placeholder will be
* filled in automatically by the QueryPlanner using the other execution strategies that are
* available.
*/
protected def planLater(plan: LogicalPlan) = this.plan(plan).next()
def plan(plan: LogicalPlan): Iterator[PhysicalPlan] = {
// Obviously a lot to do here still...
val iter = strategies.view.flatMap(_(plan)).toIterator
assert(iter.hasNext, s"No plan for $plan")
iter
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/QueryPlanner.scala | Scala | apache-2.0 | 2,752 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.pgsql.core.internal.typecodec.sco
import io.rdbc.pgsql.core.types._
private[typecodec] object ScodecPgVarcharCodec
extends ScodecStringLikeCodec[PgVarchar] {
val typ = PgVarcharType
}
| rdbc-io/rdbc-pgsql | rdbc-pgsql-core/src/main/scala/io/rdbc/pgsql/core/internal/typecodec/sco/ScodecPgVarcharCodec.scala | Scala | apache-2.0 | 809 |
package org.ddecap.snv
import org.bdgenomics.adam.rdd.ADAMContext
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.models.VariantContext
import org.bdgenomics.adam.models.ReferenceRegion
import org.bdgenomics.adam.rdd.ShuffleRegionJoin
import org.bdgenomics.formats.avro._
import org.apache.spark.rdd.RDD
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.conf.Configuration
import org.tmoerman.adam.fx.snpeff.SnpEffContext
import org.bdgenomics.adam.models.SequenceDictionary
import htsjdk.samtools.SAMSequenceRecord
import htsjdk.samtools.SAMSequenceDictionary
import org.apache.spark.SparkContext
import scalax.io.Resource
import scalax.io._
object analysis {
val matchingDpHist = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "% variants"
var x =chart.addCategoryAxis("x", ["_1", "_2"]);
x.title = "RD in matching sample"
var s = chart.addSeries("_2", dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val RocGraphA = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.overrideMin = 0
y.overrideMax = 1
y.title = "1 - FPR_A"
var x =chart.addMeasureAxis("x", "_2");
x.overrideMin = 0
x.overrideMax = 1
x.title = "1 - TPR"
chart.addSeries("_1", dimple.plot.line);
chart.draw();
}
"""
val RocGraphB = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_5");
y.overrideMin = 0
y.overrideMax = 1
y.title = "1 - FPR_B"
var x =chart.addMeasureAxis("x", "_4");
x.overrideMin = 0
x.overrideMax = 1
x.title = "1 - TPR"
chart.addSeries("_1", dimple.plot.line);
chart.draw();
}
"""
val RocGraph2A = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_4");
y.overrideMin = 0
y.overrideMax = 1
y.title = "1 - FPR_A"
var x =chart.addMeasureAxis("x", "_3");
x.overrideMin = 0
x.overrideMax = 1
x.title = "1 - TPR"
var s = chart.addSeries(["_2", "_1"], dimple.plot.line);
s.addOrderRule("_1")
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val RocGraph2B = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_6");
y.overrideMin = 0
y.overrideMax = 1
y.title = "1 - FPR_B"
var x =chart.addMeasureAxis("x", "_5");
x.overrideMin = 0
x.overrideMax = 1
x.title = "1 - TPR"
var s = chart.addSeries(["_2", "_1"], dimple.plot.line);
s.addOrderRule("_1")
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val RocGraphOutA = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_4");
y.overrideMin = 0
y.overrideMax = 1
y.title = "1 - FPR_A"
var x =chart.addMeasureAxis("x", "_3");
x.overrideMin = 0
x.overrideMax = 1
x.title = "1 - TPR"
var s = chart.addSeries("_3", dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val RocGraphOutB = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_6");
y.overrideMin = 0
y.overrideMax = 1
y.title = "1 - FPR_A"
var x =chart.addMeasureAxis("x", "_5");
x.overrideMin = 0
x.overrideMax = 1
x.title = "1 - TPR"
var s = chart.addSeries("_5", dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val QHist = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "normalized % variants"
var x =chart.addLogAxis("x", "_2");
x.title = "Quality"
var s = chart.addSeries(["_2", "_1"], dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val RDHist = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "normalized % variants"
var x =chart.addMeasureAxis("x", "_2");
x.title = "ReadDepth"
var s = chart.addSeries(["_2", "_1"], dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val MRDHist = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "normalized % variants"
var x =chart.addMeasureAxis("x", "_2");
x.title = "Matching ReadDepth"
var s = chart.addSeries(["_2", "_1"], dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val QBDHist = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "normalized % variants"
var x =chart.addMeasureAxis("x", "_2");
x.title = "QualityByDepth"
var s = chart.addSeries(["_2", "_1"], dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val annoFreqGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addLogAxis("y", "_3");
y.title = "# variants"
var x =chart.addCategoryAxis("x", ["_2", "_1"]);
x.title = "annotation type"
chart.addSeries("_1", dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val annoFreqBarGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "# variants"
var x =chart.addCategoryAxis("x", "_2");
x.title = "annotation type"
chart.addSeries("_1", dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val biotypeFreqBarGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "# variants"
var x =chart.addCategoryAxis("x", "_2");
x.title = "transcript biotype"
chart.addSeries("_1", dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val alleleFreqGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_3");
y.title = "rna allele freq"
var x =chart.addMeasureAxis("x", "_2");
x.title = "wxs allele freq"
chart.addSeries(["_2","_1"], dimple.plot.line);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val alleleFreqScGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 20, 480, 330)
var y =chart.addMeasureAxis("y", "_2");
y.title = "rna allele freq"
var x =chart.addMeasureAxis("x", "_3");
var z =chart.addMeasureAxis("z", "_4");
x.title = "wxs allele freq"
chart.addSeries(["_2","_3"], dimple.plot.bubble);
chart.draw();
}
"""
val gqGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(60, 30, 480, 330)
var x = chart.addMeasureAxis("x", "threshold");
x.ticks = 10
x.title = "Genome Quality threshold";
var y = chart.addMeasureAxis("y", "count");
y.title = "# variants"
var s = chart.addSeries(["threshold", "group"], dimple.plot.line);
s.lineWeight = 1;
s.barGap = 0.05;
chart.addLegend(550, 20, 100, 300, "left");
chart.draw();
}
"""
val rpkmGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(60, 30, 480, 330)
var x = chart.addLogAxis("x", "threshold");
x.title = "RPKM threshold";
x.ticks = 10
var y = chart.addMeasureAxis("y", "count");
y.title = "# variants"
var s = chart.addSeries(["threshold", "group"], dimple.plot.line);
s.lineWeight = 1;
s.barGap = 0.05;
chart.addLegend(550, 20, 100, 300, "left");
chart.draw();
}
"""
val dpGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(60, 30, 480, 330)
var x = chart.addLogAxis("x", "threshold");
x.title = "Depth threshold";
x.ticks = 10
x.overrideMin = 1
var y = chart.addMeasureAxis("y", "count");
y.title = "# variants"
var s = chart.addSeries(["threshold", "group"], dimple.plot.line);
s.lineWeight = 1;
s.barGap = 0.05;
chart.addLegend(550, 20, 100, 300, "left");
chart.draw();
}
"""
val qGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(60, 30, 480, 330)
var x = chart.addLogAxis("x", "threshold");
x.title = "Quality threshold";
x.ticks = 10;
x.overrideMin = 1;
var y = chart.addMeasureAxis("y", "count");
y.title = "# variants"
var s = chart.addSeries(["threshold", "group"], dimple.plot.line);
s.lineWeight = 1;
s.barGap = 0.05;
chart.addLegend(550, 20, 100, 300, "left");
chart.draw();
}
"""
val splitBsGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 30, 480, 330)
chart.addPctAxis("y", "count");
chart.addCategoryAxis("x", ["reference", "group"]);
chart.addSeries(["reference", "alternate"], dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val mergedBsGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 30, 480, 330)
chart.addMeasureAxis("y", "count");
chart.addCategoryAxis("x", ("group". "threshold"));
chart.addSeries("baseChange", dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val bChangesGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 30, 480, 330)
chart.addMeasureAxis("y", "_3");
chart.addCategoryAxis("x", ["_1", "_2"]);
chart.addSeries("_2", dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val bChangesPctGraph = """
function(data, headers, chart /*dimple: http://dimplejs.org/*/) {
chart.setBounds(80, 30, 480, 330)
chart.addPctAxis("y", "_3");
chart.addCategoryAxis("x", "_1");
chart.addSeries(["_1", "_2"], dimple.plot.bar);
chart.addLegend(200, 10, 380, 20, "right");
chart.draw();
}
"""
val lgOf2 = math.log(2)
case class GenotypeWithMetadata(
genotype: org.bdgenomics.formats.avro.Genotype,
annotations: org.tmoerman.adam.fx.avro.SnpEffAnnotations,
rpkm:Double,
otherCoverage: Long)
def isSingleNucleotideVariant(g: org.bdgenomics.formats.avro.Genotype): Boolean = {
g.getVariant().getReferenceAllele().length == 1 && g.getVariant().getAlternateAllele().length == 1
}
def filterGenotypes(genotypes: RDD[Iterable[GenotypeWithMetadata]],
mDepthFilter: Double = 0,
depth: Double = 0,
gq: Double = 0,
qual: Double= 0,
qd: Double = 0,
rpkm: Double = 0): RDD[Iterable[GenotypeWithMetadata]] = {
genotypes.filter(x => ! x.filter(v => (v.otherCoverage >= mDepthFilter &&
v.genotype.getReadDepth >= depth &&
v.genotype.getGenotypeQuality >= gq &&
v.genotype.getVariantCallingAnnotations.getVariantCallErrorProbability >= qual &&
v.rpkm >= rpkm &&
v.genotype.getVariantCallingAnnotations.getVariantQualityByDepth >= qd)).isEmpty)
}
def qualityByDeptHist(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : RDD[(String, Double, Double)] = {
val gCount = genotypes.count.toFloat
genotypes.map(x => (math.round(x.maxBy(_.genotype.getVariantCallingAnnotations.getVariantQualityByDepth)
.genotype.getVariantCallingAnnotations.getVariantQualityByDepth*2)/2.0, 1)).reduceByKey(_+_)
.map(x => (name, x._1, x._2/gCount))
}
def binQuality(q: Double): Double ={
math.round(math.pow(2, math.ceil(math.log(q)/(lgOf2/4))/4))
}
def readDepthHist(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String, th: Int = 100) : RDD[(String, Double, Double)] = {
val gCount = genotypes.count.toFloat
genotypes.map(x => (x.maxBy(_.genotype.getReadDepth).genotype.getReadDepth/2*2+1, 1))
.map(x => if (x._1 >= th) (th, x._2) else (x._1, x._2))
.reduceByKey(_+_)
.map(x => (name, x._1.toFloat, x._2/gCount))
}
def matchingReadDepthHist(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String, th: Long = 100) : RDD[(String, Double, Double)] = {
val gCount = genotypes.count.toFloat
genotypes.map(x => (x.maxBy(_.otherCoverage).otherCoverage/2*2+1, 1))
.map(x => if (x._1 >= th) (th, x._2) else (x._1, x._2))
.reduceByKey(_+_)
.map(x => (name, x._1.toFloat, x._2/gCount))
}
def qualityHist(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : RDD[(String, Double, Double)] = {
val gCount = genotypes.count.toFloat
genotypes.map(x => (binQuality(x.maxBy(_.genotype.getVariantCallingAnnotations.getVariantCallErrorProbability)
.genotype.getVariantCallingAnnotations.getVariantCallErrorProbability.toDouble), 1))
.reduceByKey(_+_)
.map(x => (name, x._1, x._2/gCount))
}
def biotypeCount(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : RDD[(String, String, Int)] = {
genotypes.map(x => x.head.annotations.getFunctionalAnnotations.head.getTranscriptBiotype)
.map(x => (x, 1)).reduceByKey(_ + _)
.map{case (x,r) => (name,x,r)}.map(x => if (x._2 == null) (x._1, "No_Biotype", x._3) else (x._1, x._2, x._3))
}
def annotationCount(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : RDD[(String, String, Int)] = {
genotypes.map(x => x.head.annotations.getFunctionalAnnotations.head.getAnnotations.head)
.map(x => (x, 1)).reduceByKey(_ + _)
.map{case (x,r) => (name,x,r)}
}
def isPyrimidine(nucleotyide: String) : Boolean = {
return nucleotyide == "C" || nucleotyide == "T"
}
def isTransition(from: String, to: String) : Boolean = {
return isPyrimidine(from) == isPyrimidine(to)
}
def getTiTvRatio(genotypes: RDD[Iterable[GenotypeWithMetadata]]) : Double = {
genotypes.filter(x => isTransition(x.head.genotype.getVariant.getReferenceAllele,
x.head.genotype.getVariant.getAlternateAllele))
.count.toDouble / genotypes.filter(x => ! isTransition(x.head.genotype.getVariant.getReferenceAllele,
x.head.genotype.getVariant.getAlternateAllele)).count.toDouble
}
def getMultiAllelicSNVsFraction(genotypes: RDD[Iterable[GenotypeWithMetadata]]) : Double = {
genotypes.filter(x => x.size > 1).count.toDouble / genotypes.count.toDouble
}
def getHomozygousFraction(genotypes: RDD[Iterable[GenotypeWithMetadata]]) : Double = {
genotypes.filter(x => x.filter{g =>
val all = g.genotype.getAlleles
all(0) == all(1)}.size == x.size).count.toDouble / genotypes.count.toDouble
}
def getCommonFraction(genotypes: RDD[Iterable[GenotypeWithMetadata]]) : Double = {
genotypes.filter(x => x.head.annotations.getDbSnpAnnotations != null).count.toDouble / genotypes.count.toDouble
}
def getClinvarFraction(genotypes: RDD[Iterable[GenotypeWithMetadata]]) : Double = {
genotypes.filter(x => x.head.annotations.getClinvarAnnotations != null).count.toDouble / genotypes.count.toDouble
}
def alleleFrequency(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : RDD[(String, Double, Int)] = {
genotypes.map(x => ("%.1f".format(x.head.genotype.getAlternateReadDepth.toFloat / x.head.genotype.getReadDepth) ,1))
.reduceByKey(_ + _)
.map{ case (freq, count) => (name,freq.toDouble,count)}
}
def getAllFractions(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : Map[String, Double] = {
Map(name + " Ti/Tv" -> getTiTvRatio(genotypes),
name + " MultiAllele" -> getMultiAllelicSNVsFraction(genotypes),
name + " Homozygous" -> getHomozygousFraction(genotypes),
name + " Common" -> getCommonFraction(genotypes),
name + " ClinVar" -> getClinvarFraction(genotypes))
}
def scatterAlleleFrequency(genotypes: RDD[(Iterable[GenotypeWithMetadata], Iterable[GenotypeWithMetadata])], name: String) : RDD[(String, Double, Double, Int)] = {
genotypes.map(x =>
(x._1.head.genotype.getAlternateReadDepth.toFloat /
(x._1.head.genotype.getAlternateReadDepth + x._1.head.genotype.getReferenceReadDepth),
x._2.head.genotype.getAlternateReadDepth.toFloat /
(x._2.head.genotype.getAlternateReadDepth + x._2.head.genotype.getReferenceReadDepth)))
.map{ case (x, y) => ((name,"%.1f".format(x),"%.1f".format(y)),1)}
.filter(x => x._1._2 != "NaN" && x._1._3 != "NaN")
.reduceByKey(_ + _)
.map{case (x, y) => (x._1, x._2.toDouble, x._3.toDouble, y.toInt)}
}
def baseChanges(genotypes: RDD[Iterable[GenotypeWithMetadata]], name: String) : RDD[(String, String, Long)] = {
genotypes.filter(g=> g.filter(x=> x.genotype.getVariant.getReferenceAllele.length == 1 &&
x.genotype.getVariant.getAlternateAllele.length == 1).size > 0)
.map(g => {
val tmp = g.filter(x=> x.genotype.getVariant.getReferenceAllele.length == 1 &&
x.genotype.getVariant.getAlternateAllele.length == 1).head
((tmp.genotype.getVariant.getReferenceAllele, tmp.genotype.getVariant.getAlternateAllele), 1)
}).reduceByKey(_+_)
.map(x => (name, x._1._1+">"+x._1._2, x._2))
}
def UniquifyNoAllele(genotype: Genotype): String = {
val v = genotype.getVariant
genotype.getSampleId + "@" +v.getContig.getContigName+"+"+v.getStart
}
def Uniquify(genotype: Genotype): String = {
val v = genotype.getVariant
genotype.getSampleId + "@" +v.getContig.getContigName+"+"+v.getStart+":"+
v.getReferenceAllele + ">" +v.getAlternateAllele
}
def readSnvWithMetaRepartition(sc: SparkContext, input: String, rpkmFile: String, mCovFile: String, partitions: Int): RDD[(String, GenotypeWithMetadata)] = {
val rpkm = sc.textFile(rpkmFile).map(x => x.split("\\t")).map(x => (x(0), x(1).toDouble)).repartition(partitions)
val mCov = sc.textFile(mCovFile).map(x => x.split("\\t")).map(x => (x(0), x(1).toLong)).repartition(partitions)
val rpkmWithCov = rpkm.cogroup(mCov).map{ case (id, (r, mcov)) => (id, (r.headOption.getOrElse(0.0),
mcov.headOption.getOrElse(0L)))}
val ec = new SnpEffContext(sc)
val genotypes = ec.loadAnnotatedGenotypes(input).map(x => (UniquifyNoAllele(x.getGenotype), x)).repartition(partitions)
genotypes.leftOuterJoin(rpkmWithCov).map{ case (id, (g, meta)) => (UniquifyNoAllele(g.getGenotype),
GenotypeWithMetadata(g.getGenotype, g.getAnnotations, meta.getOrElse((0.0,0L))._1, meta.getOrElse((0.0,0L))._2)) }
}
def readSnvWithMeta(sc: SparkContext, input: String, rpkmFile: String, mCovFile: String): RDD[(String, GenotypeWithMetadata)] = {
val rpkm = sc.textFile(rpkmFile).map(x => x.split("\\t")).map(x => (x(0), x(1).toDouble))
val mCov = sc.textFile(mCovFile).map(x => x.split("\\t")).map(x => (x(0), x(1).toLong))
val rpkmWithCov = rpkm.cogroup(mCov).map{ case (id, (r, mcov)) => (id, (r.headOption.getOrElse(0.0),
mcov.headOption.getOrElse(0L)))}
val ec = new SnpEffContext(sc)
val genotypes = ec.loadAnnotatedGenotypes(input).map(x => (UniquifyNoAllele(x.getGenotype), x))
genotypes.leftOuterJoin(rpkmWithCov).map{ case (id, (g, meta)) => (UniquifyNoAllele(g.getGenotype),
GenotypeWithMetadata(g.getGenotype, g.getAnnotations, meta.getOrElse((0.0,0L))._1, meta.getOrElse((0.0,0L))._2)) }
}
def getOverlapWithBed(seqDict: SequenceDictionary,
genotypes: RDD[Iterable[GenotypeWithMetadata]],
bedRDD: RDD[ReferenceRegion]): RDD[Iterable[GenotypeWithMetadata]] = {
val posRDD = genotypes.map(g => (ReferenceRegion(g.head.genotype.getVariant.getContig.getContigName,
g.head.genotype.getVariant.getStart,
g.head.genotype.getVariant.getEnd), g))
val maxPartitions = math.max(bedRDD.partitions.length.toLong, posRDD.partitions.length.toLong)
val joinedRDD = ShuffleRegionJoin(seqDict, seqDict.records.map(_.length).sum / maxPartitions)
.partitionAndJoin(posRDD, bedRDD.map(v => (v,1)))
joinedRDD.reduceByKey(_+_).map(x => x._1)
}
}
| ddcap/rna-wxs-analysis | src/main/scala/org/ddecap/snv/analysis.scala | Scala | gpl-3.0 | 22,040 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.plan.{RelOptCluster, RelOptCost, RelOptPlanner, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.metadata.RelMetadataQuery
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.calcite.rex.{RexCall, RexNode}
import org.apache.calcite.sql.SemiJoinType
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.DataSet
import org.apache.flink.table.api.BatchTableEnvironment
import org.apache.flink.table.functions.utils.TableSqlFunction
import org.apache.flink.table.plan.nodes.CommonCorrelate
import org.apache.flink.table.plan.nodes.logical.FlinkLogicalTableFunctionScan
import org.apache.flink.types.Row
/**
* Flink RelNode which matches along with join a user defined table function.
*/
class DataSetCorrelate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputNode: RelNode,
scan: FlinkLogicalTableFunctionScan,
condition: Option[RexNode],
relRowType: RelDataType,
joinRowType: RelDataType,
joinType: SemiJoinType,
ruleDescription: String)
extends SingleRel(cluster, traitSet, inputNode)
with CommonCorrelate
with DataSetRel {
override def deriveRowType() = relRowType
override def computeSelfCost(planner: RelOptPlanner, metadata: RelMetadataQuery): RelOptCost = {
val rowCnt = metadata.getRowCount(getInput) * 1.5
planner.getCostFactory.makeCost(rowCnt, rowCnt, rowCnt * 0.5)
}
override def copy(traitSet: RelTraitSet, inputs: java.util.List[RelNode]): RelNode = {
new DataSetCorrelate(
cluster,
traitSet,
inputs.get(0),
scan,
condition,
relRowType,
joinRowType,
joinType,
ruleDescription)
}
override def toString: String = {
val rexCall = scan.getCall.asInstanceOf[RexCall]
val sqlFunction = rexCall.getOperator.asInstanceOf[TableSqlFunction]
correlateToString(rexCall, sqlFunction)
}
override def explainTerms(pw: RelWriter): RelWriter = {
val rexCall = scan.getCall.asInstanceOf[RexCall]
val sqlFunction = rexCall.getOperator.asInstanceOf[TableSqlFunction]
super.explainTerms(pw)
.item("invocation", scan.getCall)
.item("function", sqlFunction.getTableFunction.getClass.getCanonicalName)
.item("rowType", relRowType)
.item("joinType", joinType)
.itemIf("condition", condition.orNull, condition.isDefined)
}
override def translateToPlan(tableEnv: BatchTableEnvironment): DataSet[Row] = {
val config = tableEnv.getConfig
// we do not need to specify input type
val inputDS = inputNode.asInstanceOf[DataSetRel].translateToPlan(tableEnv)
val funcRel = scan.asInstanceOf[FlinkLogicalTableFunctionScan]
val rexCall = funcRel.getCall.asInstanceOf[RexCall]
val sqlFunction = rexCall.getOperator.asInstanceOf[TableSqlFunction]
val pojoFieldMapping = sqlFunction.getPojoFieldMapping
val udtfTypeInfo = sqlFunction.getRowTypeInfo.asInstanceOf[TypeInformation[Any]]
val mapFunc = correlateMapFunction(
config,
inputDS.getType,
udtfTypeInfo,
getRowType,
joinType,
rexCall,
condition,
Some(pojoFieldMapping),
ruleDescription)
inputDS.flatMap(mapFunc).name(correlateOpName(rexCall, sqlFunction, relRowType))
}
}
| hwstreaming/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetCorrelate.scala | Scala | apache-2.0 | 4,189 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.router2
import org.coursera.courier.codecs.InlineStringCodec
import org.coursera.courier.templates.DataTemplates.DataConversion
import org.coursera.naptime.courier.StringKeyCodec
import org.coursera.naptime.actions.SortOrder
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
import play.api.test.FakeRequest
class CourierQueryParsersTest extends AssertionsForJUnit {
@Test
def checkStrictParseSimple(): Unit = {
val sortOrder = SortOrder(field = "startDate", descending = false)
val sortOrderStr = InlineStringCodec.dataToString(sortOrder.data())
val fakeRequest = FakeRequest("GET", s"/foo?sort=$sortOrderStr")
val parsedDataMap =
CourierQueryParsers.strictParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
val parsedSortOrder = SortOrder.build(parsedDataMap.right.get, DataConversion.SetReadOnly)
assert(sortOrder === parsedSortOrder)
}
@Test
def checkStrictParseMalformed(): Unit = {
val fakeRequest = FakeRequest("GET", s"/foo?sort=baz")
val parsedDataMap =
CourierQueryParsers.strictParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isLeft)
}
@Test
def checkStrictParseMalformed2(): Unit = {
val fakeRequest = FakeRequest("GET", s"/foo?sort=(a~b)")
val parsedDataMap =
CourierQueryParsers.strictParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isLeft)
}
@Test
def checkStrictParseMissingField(): Unit = {
val sortOrder = SortOrder(field = "startDate") // Field descending should be default
val fakeRequest = FakeRequest("GET", s"/foo?sort=(field~startDate)")
val parsedDataMap =
CourierQueryParsers.strictParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
val parsedSortOrder = SortOrder.build(parsedDataMap.right.get, DataConversion.SetReadOnly)
assert(sortOrder === parsedSortOrder)
}
@Test
def checkStrictParseOldFormat(): Unit = {
val sortOrder = SortOrder(field = "startDate", descending = false)
val sortOrderStr = new String(new StringKeyCodec(SortOrder.SCHEMA).mapToBytes(sortOrder.data()))
assert("startDate~false" === sortOrderStr)
val fakeRequest = FakeRequest("GET", s"/foo?sort=$sortOrderStr")
val parsedDataMap =
CourierQueryParsers.strictParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
val parsedSortOrder = SortOrder.build(parsedDataMap.right.get, DataConversion.SetReadOnly)
assert(sortOrder === parsedSortOrder)
}
@Test
def checkOptParsePresent(): Unit = {
val sortOrder = SortOrder(field = "startDate", descending = false)
val sortOrderStr = InlineStringCodec.dataToString(sortOrder.data())
val fakeRequest = FakeRequest("GET", s"/foo?sort=$sortOrderStr")
val parsedDataMap =
CourierQueryParsers.optParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
assert(parsedDataMap.right.get.isDefined)
val parsedSortOrder = SortOrder.build(parsedDataMap.right.get.get, DataConversion.SetReadOnly)
assert(sortOrder === parsedSortOrder)
}
@Test
def checkOptParseAbsent(): Unit = {
val fakeRequest = FakeRequest("GET", s"/foo?bar=baz")
val parsedDataMap =
CourierQueryParsers.optParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
assert(parsedDataMap.right.get.isEmpty)
}
@Test
def checkOptParseMalformed(): Unit = {
val fakeRequest = FakeRequest("GET", s"/foo?sort=baz")
val parsedDataMap =
CourierQueryParsers.optParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isLeft)
}
@Test
def checkOptParseMalformed2(): Unit = {
val fakeRequest = FakeRequest("GET", s"/foo?sort=(a~b)")
val parsedDataMap =
CourierQueryParsers.optParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isLeft)
}
@Test
def checkOptParseMissingField(): Unit = {
val sortOrder = SortOrder(field = "startDate") // Field descending should be default
val fakeRequest = FakeRequest("GET", s"/foo?sort=(field~startDate)")
val parsedDataMap =
CourierQueryParsers.optParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
assert(parsedDataMap.right.get.isDefined)
val parsedSortOrder = SortOrder.build(parsedDataMap.right.get.get, DataConversion.SetReadOnly)
assert(sortOrder === parsedSortOrder)
}
@Test
def checkOptParseOldFormat(): Unit = {
val sortOrder = SortOrder(field = "startDate", descending = false)
val sortOrderStr = new String(new StringKeyCodec(SortOrder.SCHEMA).mapToBytes(sortOrder.data()))
assert("startDate~false" === sortOrderStr)
val fakeRequest = FakeRequest("GET", s"/foo?sort=$sortOrderStr")
val parsedDataMap =
CourierQueryParsers.optParse("sort", SortOrder.SCHEMA, getClass, fakeRequest)
assert(parsedDataMap.isRight)
assert(parsedDataMap.right.get.isDefined)
val parsedSortOrder = SortOrder.build(parsedDataMap.right.get.get, DataConversion.SetReadOnly)
assert(sortOrder === parsedSortOrder)
}
}
| vkuo-coursera/naptime | naptime/src/test/scala/org/coursera/naptime/router2/CourierQueryParsersTest.scala | Scala | apache-2.0 | 5,836 |
package io.getquill.context.cassandra.zio
import io.getquill.context.cassandra.QueryResultTypeCassandraSpec
class QueryResultTypeCassandraZioSpec extends ZioCassandraSpec with QueryResultTypeCassandraSpec {
val context = testZioDB
import context._
override def beforeAll = {
super.beforeAll()
result(context.run(deleteAll))
result(context.run(liftQuery(entries).foreach(e => insert(e))))
()
}
"query" in {
result(context.run(selectAll)) mustEqual entries
}
"stream" in {
result(context.stream(selectAll)) mustEqual entries
}
"querySingle" - {
"size" in {
result(context.run(entitySize)) mustEqual 3
}
"parametrized size" in {
result(context.run(parametrizedSize(lift(10000)))) mustEqual 0
}
}
}
| getquill/quill | quill-cassandra-zio/src/test/scala/io/getquill/context/cassandra/zio/QueryResultTypeCassandraZioSpec.scala | Scala | apache-2.0 | 775 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.`type`
import _root_.scalapb.internal.compat.JavaConverters._
/** Enum value definition.
*
* @param name
* Enum value name.
* @param number
* Enum value number.
* @param options
* Protocol buffer options.
*/
@SerialVersionUID(0L)
final case class EnumValue(
name: _root_.scala.Predef.String = "",
number: _root_.scala.Int = 0,
options: _root_.scala.Seq[com.google.protobuf.`type`.OptionProto] = _root_.scala.Seq.empty,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[EnumValue] {
@transient
private[this] var __serializedSizeMemoized: _root_.scala.Int = 0
private[this] def __computeSerializedSize(): _root_.scala.Int = {
var __size = 0
{
val __value = name
if (!__value.isEmpty) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(1, __value)
}
};
{
val __value = number
if (__value != 0) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(2, __value)
}
};
options.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var __size = __serializedSizeMemoized
if (__size == 0) {
__size = __computeSerializedSize() + 1
__serializedSizeMemoized = __size
}
__size - 1
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = name
if (!__v.isEmpty) {
_output__.writeString(1, __v)
}
};
{
val __v = number
if (__v != 0) {
_output__.writeInt32(2, __v)
}
};
options.foreach { __v =>
val __m = __v
_output__.writeTag(3, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
unknownFields.writeTo(_output__)
}
def withName(__v: _root_.scala.Predef.String): EnumValue = copy(name = __v)
def withNumber(__v: _root_.scala.Int): EnumValue = copy(number = __v)
def clearOptions = copy(options = _root_.scala.Seq.empty)
def addOptions(__vs: com.google.protobuf.`type`.OptionProto *): EnumValue = addAllOptions(__vs)
def addAllOptions(__vs: Iterable[com.google.protobuf.`type`.OptionProto]): EnumValue = copy(options = options ++ __vs)
def withOptions(__v: _root_.scala.Seq[com.google.protobuf.`type`.OptionProto]): EnumValue = copy(options = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => {
val __t = name
if (__t != "") __t else null
}
case 2 => {
val __t = number
if (__t != 0) __t else null
}
case 3 => options
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PString(name)
case 2 => _root_.scalapb.descriptors.PInt(number)
case 3 => _root_.scalapb.descriptors.PRepeated(options.iterator.map(_.toPMessage).toVector)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion: com.google.protobuf.`type`.EnumValue.type = com.google.protobuf.`type`.EnumValue
// @@protoc_insertion_point(GeneratedMessage[google.protobuf.EnumValue])
}
object EnumValue extends scalapb.GeneratedMessageCompanion[com.google.protobuf.`type`.EnumValue] with scalapb.JavaProtoSupport[com.google.protobuf.`type`.EnumValue, com.google.protobuf.EnumValue] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.`type`.EnumValue] with scalapb.JavaProtoSupport[com.google.protobuf.`type`.EnumValue, com.google.protobuf.EnumValue] = this
def toJavaProto(scalaPbSource: com.google.protobuf.`type`.EnumValue): com.google.protobuf.EnumValue = {
val javaPbOut = com.google.protobuf.EnumValue.newBuilder
javaPbOut.setName(scalaPbSource.name)
javaPbOut.setNumber(scalaPbSource.number)
javaPbOut.addAllOptions(_root_.scalapb.internal.compat.toIterable(scalaPbSource.options.iterator.map(com.google.protobuf.`type`.OptionProto.toJavaProto(_))).asJava)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.EnumValue): com.google.protobuf.`type`.EnumValue = com.google.protobuf.`type`.EnumValue(
name = javaPbSource.getName,
number = javaPbSource.getNumber.intValue,
options = javaPbSource.getOptionsList.asScala.iterator.map(com.google.protobuf.`type`.OptionProto.fromJavaProto(_)).toSeq
)
def parseFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.`type`.EnumValue = {
var __name: _root_.scala.Predef.String = ""
var __number: _root_.scala.Int = 0
val __options: _root_.scala.collection.immutable.VectorBuilder[com.google.protobuf.`type`.OptionProto] = new _root_.scala.collection.immutable.VectorBuilder[com.google.protobuf.`type`.OptionProto]
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__name = _input__.readStringRequireUtf8()
case 16 =>
__number = _input__.readInt32()
case 26 =>
__options += _root_.scalapb.LiteParser.readMessage[com.google.protobuf.`type`.OptionProto](_input__)
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder()
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.`type`.EnumValue(
name = __name,
number = __number,
options = __options.result(),
unknownFields = if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.`type`.EnumValue] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.`type`.EnumValue(
name = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""),
number = __fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).map(_.as[_root_.scala.Int]).getOrElse(0),
options = __fieldsMap.get(scalaDescriptor.findFieldByNumber(3).get).map(_.as[_root_.scala.Seq[com.google.protobuf.`type`.OptionProto]]).getOrElse(_root_.scala.Seq.empty)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = TypeProto.javaDescriptor.getMessageTypes().get(3)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = TypeProto.scalaDescriptor.messages(3)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 3 => __out = com.google.protobuf.`type`.OptionProto
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.`type`.EnumValue(
name = "",
number = 0,
options = _root_.scala.Seq.empty
)
implicit class EnumValueLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.`type`.EnumValue]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.`type`.EnumValue](_l) {
def name: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.name)((c_, f_) => c_.copy(name = f_))
def number: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.number)((c_, f_) => c_.copy(number = f_))
def options: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.`type`.OptionProto]] = field(_.options)((c_, f_) => c_.copy(options = f_))
}
final val NAME_FIELD_NUMBER = 1
final val NUMBER_FIELD_NUMBER = 2
final val OPTIONS_FIELD_NUMBER = 3
def of(
name: _root_.scala.Predef.String,
number: _root_.scala.Int,
options: _root_.scala.Seq[com.google.protobuf.`type`.OptionProto]
): _root_.com.google.protobuf.`type`.EnumValue = _root_.com.google.protobuf.`type`.EnumValue(
name,
number,
options
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.EnumValue])
}
| scalapb/ScalaPB | scalapb-runtime/src/main/scalajvm/com/google/protobuf/type/EnumValue.scala | Scala | apache-2.0 | 9,777 |
package pl.touk.nussknacker.engine.api
import java.util.UUID
import scala.util.Random
object Context {
// prefix is to distinguish between externally provided and internal (initially created) id
private val initialContextIdPrefix = "initial-"
/**
* For performance reasons, is used unsecure random - see UUIDBenchmark for details. In this case random correlation id
* is used only for internal purpose so is not important in security context.
*/
private val random = new Random()
/**
* Deprecated: should be used ContextIdGenerator e.g. via EngineRuntimeContext.contextIdGenerator
* Should be used for newly created context - when there is no suitable external correlation / tracing id
*/
def withInitialId: Context = {
Context(initialContextIdPrefix + new UUID(random.nextLong(), random.nextLong()).toString)
}
def apply(id: String) : Context = Context(id, Map.empty, None)
}
case class ContextId(value: String)
/**
* Context is container for variables used in expression evaluation
* @param id correlation id/trace id used for tracing (logs, error presentation) and for tests mechanism, it should be always defined
* @param variables variables available in evaluation
* @param parentContext context used for scopes handling, mainly for subprocess invocation purpose
*/
case class Context(id: String, variables: Map[String, Any], parentContext: Option[Context]) {
def apply[T](name: String): T =
getOrElse(name, throw new RuntimeException(s"Unknown variable: $name"))
def getOrElse[T](name: String, default: => T) =
get(name).getOrElse(default)
def get[T](name: String): Option[T] =
variables.get(name).map(_.asInstanceOf[T])
def modifyVariable[T](name: String, f: T => T): Context =
withVariable(name, f(apply(name)))
def modifyOptionalVariable[T](name: String, f: Option[T] => T): Context =
withVariable(name, f(get[T](name)))
def withVariable(name: String, value: Any): Context =
withVariables(Map(name -> value))
def withVariables(otherVariables: Map[String, Any]): Context =
copy(variables = variables ++ otherVariables)
def pushNewContext(variables: Map[String, Any]) : Context = {
Context(id, variables, Some(this))
}
def popContext : Context =
parentContext.getOrElse(throw new RuntimeException("No parent context available"))
def clearUserVariables: Context = {
//clears variables from context but leaves technical variables, hidden from user
val variablesToLeave = Set(VariableConstants.EventTimestampVariableName)
copy(variables = variables.filterKeys(variablesToLeave))
}
}
| TouK/nussknacker | components-api/src/main/scala/pl/touk/nussknacker/engine/api/Context.scala | Scala | apache-2.0 | 2,633 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.sbt
import com.lightbend.lagom.core.LagomVersion
import sbt.Keys._
import sbt._
object LagomImport extends LagomImportCompat {
private val moduleOrganization = "com.lightbend.lagom"
def component(id: String) = moduleOrganization %% id % LagomVersion.current
private[sbt] val lagomLogbackModuleName = "lagom-logback"
val lagomJavadslApi = component("lagom-javadsl-api")
val lagomJavadslClient = component("lagom-javadsl-client")
val lagomJavadslAkkaDiscovery = component("lagom-javadsl-akka-discovery-service-locator")
val lagomJavadslIntegrationClient = component("lagom-javadsl-integration-client")
val lagomJavadslCluster = component("lagom-javadsl-cluster")
// Scoped to `Provided` because it's needed only at compile-time.
val lagomJavadslImmutables = component("lagom-javadsl-immutables") % Provided
val lagomJavadslJackson = component("lagom-javadsl-jackson")
val lagomJavadslBroker = component("lagom-javadsl-broker")
val lagomJavadslKafkaClient = component("lagom-javadsl-kafka-client")
val lagomJavadslKafkaBroker = component("lagom-javadsl-kafka-broker")
val lagomJavadslPersistence = component("lagom-javadsl-persistence")
val lagomJavadslPersistenceCassandra = component("lagom-javadsl-persistence-cassandra")
val lagomJavadslPersistenceJdbc = component("lagom-javadsl-persistence-jdbc")
val lagomJavadslPersistenceJpa = component("lagom-javadsl-persistence-jpa")
val lagomJavadslPubSub = component("lagom-javadsl-pubsub")
val lagomJavadslServer = component("lagom-javadsl-server")
val lagomJavadslTestKit = component("lagom-javadsl-testkit") % Test
val lagomLogback = component(lagomLogbackModuleName)
val lagomLog4j2 = component("lagom-log4j2")
val lagomScaladslApi = component("lagom-scaladsl-api")
val lagomScaladslClient = component("lagom-scaladsl-client")
val lagomScaladslAkkaDiscovery = component("lagom-scaladsl-akka-discovery-service-locator")
val lagomScaladslServer = component("lagom-scaladsl-server")
val lagomScaladslDevMode = component("lagom-scaladsl-dev-mode")
val lagomScaladslCluster = component("lagom-scaladsl-cluster")
val lagomScaladslBroker = component("lagom-scaladsl-broker")
val lagomScaladslKafkaClient = component("lagom-scaladsl-kafka-client")
val lagomScaladslKafkaBroker = component("lagom-scaladsl-kafka-broker")
val lagomScaladslPersistence = component("lagom-scaladsl-persistence")
val lagomScaladslPersistenceCassandra = component("lagom-scaladsl-persistence-cassandra")
val lagomScaladslPersistenceJdbc = component("lagom-scaladsl-persistence-jdbc")
val lagomScaladslPubSub = component("lagom-scaladsl-pubsub")
val lagomScaladslTestKit = component("lagom-scaladsl-testkit") % Test
val lagomJUnitDeps = Seq(
"junit" % "junit" % "4.12" % Test,
"com.novocode" % "junit-interface" % "0.11" % Test
)
// for forked tests, necessary for Cassandra
def lagomForkedTestSettings: Seq[Setting[_]] = Seq(
fork in Test := true,
concurrentRestrictions in Global += Tags.limit(Tags.Test, 1),
javaOptions in Test ++= Seq("-Xms256M", "-Xmx512M"),
testGrouping in Test := singleTestsGrouping((definedTests in Test).value, (javaOptions in Test).value)
)
// group tests, a single test per group
private def singleTestsGrouping(tests: Seq[TestDefinition], javaOptions: Seq[String]) = {
// We could group non Cassandra tests into another group
// to avoid new JVM for each test, see http://www.scala-sbt.org/release/docs/Testing.html
val forkOptions = getForkOptions(javaOptions.toVector)
tests.map { test =>
Tests.Group(
name = test.name,
tests = Seq(test),
runPolicy = Tests.SubProcess(forkOptions)
)
}
}
}
| rcavalcanti/lagom | dev/sbt-plugin/src/main/scala/com/lightbend/lagom/sbt/LagomImport.scala | Scala | apache-2.0 | 4,191 |
package com.github.utaal.m68k
import com.github.utaal.m68k.ast._
object RegisterState {
def apply(): RegisterState = RegisterState()
}
case class RegisterState(value: Long) {
require (value >= 0L && value <= 0xffffffffL)
def getMask(size: Size): Long = size match {
case Size.B => 0xffL
case Size.W => 0xffffL
case Size.L => 0xffffffffL
}
def mask(size: Size, value: Long): Long = value & getMask(size)
def reverseMask(size: Size, value: Long): Long = value & (0xffffffffL ^ getMask(size))
def get(size: Size): Long = mask(size, value)
def set(size: Size, newValue: Long) =
new RegisterState(reverseMask(size, value) | mask(size, newValue))
}
object ProgramCounter {
def apply(): ProgramCounter = ProgramCounter(0L)
}
case class ProgramCounter(value: Long) {
require (value >= 0L && value <= 0xffffffL)
}
object StatusRegister {
def apply(): StatusRegister = StatusRegister(false, false, false, false, false)
}
case class StatusRegister(C: Boolean, V: Boolean, Z: Boolean, N: Boolean, X: Boolean)
object CPUState {
val DataRegisterNumber = 8
val AddressRegisterNumber = 8
val FramePointerRegister = 6
val StackPointerRegister = 7
private def makeRegisterArray(num: Int): Vector[RegisterState] = {
val emptyRegState = RegisterState(0L)
Vector.fill[RegisterState](num)(emptyRegState)
}
def apply(): CPUState = CPUState(
CPUState.makeRegisterArray(CPUState.DataRegisterNumber),
CPUState.makeRegisterArray(CPUState.AddressRegisterNumber),
ProgramCounter(),
StatusRegister()
)
}
case class CPUState(
D: Vector[RegisterState],
A: Vector[RegisterState],
PC: ProgramCounter,
SR: StatusRegister
) {
private def updatedRegVector(vec: Vector[RegisterState], num: Int, size: Size, value: Long) =
vec.updated(num, vec(num).set(size, value))
def setD(number: Int, size: Size, value: Long) = {
require (number >= 0 && number < CPUState.DataRegisterNumber)
this.copy(D = updatedRegVector(D, number, size, value))
}
def setA(number: Int, size: Size, value: Long) = {
require (number >= 0 && number < CPUState.AddressRegisterNumber)
this.copy(A = updatedRegVector(A, number, size, value))
}
val FP = A(CPUState.FramePointerRegister)
def setFP(size: Size, value: Long) =
setA(CPUState.FramePointerRegister, size, value)
val SP = A(CPUState.StackPointerRegister)
def setSP(size: Size, value: Long) =
setA(CPUState.StackPointerRegister, size, value)
def setPC(value: Long) =
this.copy(PC = ProgramCounter(value))
def setSR(sr: StatusRegister) =
this.copy(SR = sr)
}
| utaal/m68k-interpreter | src/main/scala/CPUState.scala | Scala | mit | 2,610 |
/*
* Licensed to STRATIO (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The STRATIO (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.stratio.crossdata.server.actors
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSelection, Props}
import akka.routing.RoundRobinPool
import com.stratio.crossdata.common.connector.ConnectorClusterConfig
import com.stratio.crossdata.common.data
import com.stratio.crossdata.common.data.{ClusterName, ConnectorName, Status}
import com.stratio.crossdata.common.exceptions.ExecutionException
import com.stratio.crossdata.common.exceptions.validation.CoordinationException
import com.stratio.crossdata.common.executionplan._
import com.stratio.crossdata.common.logicalplan.PartialResults
import com.stratio.crossdata.common.metadata.{CatalogMetadata, ConnectorMetadata, TableMetadata, UpdatableMetadata}
import com.stratio.crossdata.common.result._
import com.stratio.crossdata.common.statements.structures.SelectorHelper
import com.stratio.crossdata.common.utils.{Constants, StringUtils}
import com.stratio.crossdata.communication._
import com.stratio.crossdata.core.coordinator.Coordinator
import com.stratio.crossdata.core.execution.{ExecutionManager, ExecutionManagerException}
import com.stratio.crossdata.core.metadata.MetadataManager
import com.stratio.crossdata.core.query.IPlannedQuery
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
object CoordinatorActor {
/**
* Token attached to query identifiers when the query is part of a trigger execution workflow.
*/
val TriggerToken = Constants.TRIGGER_TOKEN
def props(connectorMgr: ActorRef, coordinator: Coordinator): Props = Props(new CoordinatorActor
(connectorMgr, coordinator))
}
class CoordinatorActor(connectorMgr: ActorRef, coordinator: Coordinator) extends Actor with ActorLogging {
log.info("Lifting coordinator actor")
val host = coordinator.getHost
def receive: Receive = {
case plannedQuery: IPlannedQuery => {
val workflow = plannedQuery.getExecutionWorkflow()
log.debug("Workflow for " + workflow.getActorRef)
manageWorkflow(plannedQuery.getQueryId, workflow, None)
}
case workflow: ExecutionWorkflow => {
log.debug(s"Retrying workflow ${workflow.toString}")
Option(ExecutionManager.MANAGER.getValue(workflow.getQueryId)) match {
case Some(exInfo: ExecutionInfo) => manageWorkflow(workflow.getQueryId, workflow, Some(exInfo.getSender))
case _ => log.error("The retried query has not been found in MetadataManager")
}
}
case connectResult: ConnectToConnectorResult =>
val queryId = connectResult.getQueryId
log.info("Receiving result from " + sender + " with queryId = " + queryId + " result: " + connectResult)
if(queryId.contains("#")){
if(queryId.endsWith("#1")){
connectResult.setQueryId(queryId.split("#")(0))
}
}
try {
val executionInfo = ExecutionManager.MANAGER.getValue(queryId)
val sendResultToClient = executionInfo.asInstanceOf[ExecutionInfo].getSender != null
if(!connectResult.hasError) {
val storedWorkflow = executionInfo.asInstanceOf[ExecutionInfo].getWorkflow
if (storedWorkflow.isInstanceOf[ManagementWorkflow]) {
val managementWorkflow = storedWorkflow.asInstanceOf[ManagementWorkflow]
require(managementWorkflow.getExecutionType == ExecutionType.ATTACH_CONNECTOR)
if (executionInfo.asInstanceOf[ExecutionInfo].isPersistOnSuccess) {
coordinator.executeManagementOperation(managementWorkflow.createManagementOperationMessage())
}
if (executionInfo.asInstanceOf[ExecutionInfo].isUpdateOnSuccess) {
for (catalogName <- MetadataManager.MANAGER.getCluster(managementWorkflow.getClusterName).getPersistedCatalogs.asScala.toList){
for (tableMetadata <- MetadataManager.MANAGER.getTablesByCatalogName(catalogName.getName)){
sender ! UpdateMetadata(tableMetadata, remove = false)
}
}
}
if (executionInfo.asInstanceOf[ExecutionInfo].isRemoveOnSuccess) {
ExecutionManager.MANAGER.deleteEntry(queryId)
}
}else log.error( "QueryId refering to ConnectResult should have an ManagementWorkflow associated")
}
if(sendResultToClient) {
//TODO Add two methods to StringUtils to retrieve AkkaActorRefUri tokening with # for connectors,and $ for clients
val target = executionInfo.asInstanceOf[ExecutionInfo].getSender
.replace("Actor[", "").replace("]", "").split("#")(0)
val clientActor = context.actorSelection(target)
log.info("Send result to: " + target)
clientActor ! connectResult
}
} catch {
case ex: ExecutionManagerException => {
log.error("Cannot access queryId actorRef associated value:" + System.lineSeparator() + ex.getMessage)
}
}
case ACKResult(queryId, status) => {
if(ExecutionManager.MANAGER.exists(queryId) && status == QueryStatus.EXECUTED &&
ExecutionManager.MANAGER.getValue(queryId).asInstanceOf[ExecutionInfo].isRemoveOnSuccess
&& !ExecutionManager.MANAGER.getValue(queryId).asInstanceOf[ExecutionInfo].isTriggeredByStreaming){
log.error("Query " + queryId + " failed")
val executionInfo = ExecutionManager.MANAGER.getValue(queryId)
if(executionInfo != null){
val target = executionInfo.asInstanceOf[ExecutionInfo].getSender
.replace("Actor[", "").replace("]", "").split("#")(0)
val clientActor = context.actorSelection(target)
ExecutionManager.MANAGER.deleteEntry(queryId)
val result = Result.createErrorResult(new ExecutionException("Query failed"))
result.setQueryId(queryId)
clientActor ! result
}
}
}
case result: Result => {
val queryId = result.getQueryId
var retryQuery = false
log.info(s"Receiving result from $sender with queryId = $queryId result: $result")
try{
lazy val executionInfo = ExecutionManager.MANAGER.getValue(queryId)
if(result.isInstanceOf[ErrorResult]){
log.warning(result.asInstanceOf[ErrorResult].getErrorMessage)
if (executionInfo != null ){
val ew = executionInfo.asInstanceOf[ExecutionInfo].getWorkflow
val nextCandidateQueryWorkflow = ew.getLowPriorityExecutionWorkflow
if(nextCandidateQueryWorkflow.isPresent){
log.info(s"Query failed in connector:${ew.getActorRef}. Retrying in another connector.")
retryQuery = true
self ! nextCandidateQueryWorkflow.get()
}
}
}
if(!retryQuery){
//TODO Add two methods to StringUtils to retrieve AkkaActorRefUri tokening with # for connectors,
// and $ for clients
val target = executionInfo.asInstanceOf[ExecutionInfo].getSender
.replace("Actor[", "").replace("]", "").split("#")(0)
val clientActor = context.actorSelection(target)
var sendResultToClient = true
if(queryId.contains("#")){
if(queryId.endsWith("#1")){
result.setQueryId(queryId.split("#")(0))
} else {
sendResultToClient = false
}
}
if(!result.hasError){
if (executionInfo.asInstanceOf[ExecutionInfo].isPersistOnSuccess) {
val storedWorkflow = executionInfo.asInstanceOf[ExecutionInfo].getWorkflow
if(storedWorkflow.isInstanceOf[MetadataWorkflow]){
coordinator.persist(storedWorkflow.asInstanceOf[MetadataWorkflow], result.asInstanceOf[MetadataResult])
} else if (storedWorkflow.isInstanceOf[ManagementWorkflow]) {
coordinator.executeManagementOperation(storedWorkflow.asInstanceOf[ManagementWorkflow].createManagementOperationMessage())
}
}
if (executionInfo.asInstanceOf[ExecutionInfo].isUpdateOnSuccess) {
executionInfo.asInstanceOf[ExecutionInfo].getWorkflow match {
case mw: MetadataWorkflow =>
processUpdateMetadataWorkflow(mw, result)
case managementWorkflow: ManagementWorkflow => managementWorkflow.getExecutionType match {
case ExecutionType.DETACH_CONNECTOR => {
for (catalogName <- MetadataManager.MANAGER.getCluster(managementWorkflow.getClusterName).getPersistedCatalogs.asScala.toList){
sender ! UpdateMetadata(MetadataManager.MANAGER.getCatalog(catalogName), remove = true)
}
}
case message => log.warning ("Sending metadata updates cannot be performed for the ExecutionType :" + message.toString)
}
case _ => log.warning ("Attempt to update the metadata after an operation which is not expected :" +executionInfo.asInstanceOf[ExecutionInfo].getWorkflow.getClass)
}
}
if (executionInfo.asInstanceOf[ExecutionInfo].isRemoveOnSuccess ||
(result.isInstanceOf[QueryResult] && result.asInstanceOf[QueryResult].isLastResultSet && !executionInfo.asInstanceOf[ExecutionInfo].isTriggeredByStreaming)) {
ExecutionManager.MANAGER.deleteEntry(queryId)
}
}
if(sendResultToClient) {
if (executionInfo.asInstanceOf[ExecutionInfo].isTriggeredByStreaming) {
result match {
case res: QueryResult => res.setLastResultSet(false)
case _ =>
}
}
log.info("Send result to: " + target)
clientActor ! result
}
}
} catch {
case ex: ExecutionManagerException => {
log.error("Cannot access queryId actorRef associated value:" + System.lineSeparator() + ex.getMessage)
}
}
}
//TODO both cases below seem to be unused (ConnectResult and DisconnectResult are sent instead)
case ctc: ConnectToConnector =>
MetadataManager.MANAGER.setConnectorStatus(new ConnectorName(ctc.msg), data.Status.ONLINE)
log.info("Connected to connector")
case dfc: DisconnectFromConnector =>
MetadataManager.MANAGER.setConnectorStatus(new ConnectorName(dfc.msg), data.Status.OFFLINE)
log.info("Disconnected from connector")
case _ => {
sender ! new ExecutionException("Non recognized workflow")
}
}
def manageWorkflow(queryId: String, workflow: ExecutionWorkflow, explicitSender: Option[String]) = workflow match {
case metadataWorkflow: MetadataWorkflow => {
val executionInfo = new ExecutionInfo
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
//Getting the connector name to tell sender
val connectorsMetadata=MetadataManager.MANAGER.getConnectors(Status.ONLINE);
val connector=connectorsMetadata.filter(connectorMetadata => connectorMetadata.getActorRefs.contains
(metadataWorkflow.getActorRef))
if (connector.size>0)
sender ! InfoResult(connector.apply(0).getName.getName, metadataWorkflow.getQueryId)
else
//Special case of first create catalog statement
sender ! InfoResult("none", metadataWorkflow.getQueryId)
if (metadataWorkflow.getExecutionType == ExecutionType.DROP_CATALOG) {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(false)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val result = MetadataResult.createSuccessMetadataResult(MetadataResult.OPERATION_DROP_CATALOG)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else if (metadataWorkflow.getExecutionType == ExecutionType.ALTER_CATALOG) {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val result = MetadataResult.createSuccessMetadataResult(MetadataResult.OPERATION_ALTER_CATALOG)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else if (metadataWorkflow.getExecutionType == ExecutionType.DROP_TABLE) {
// Drop table in the Crossdata servers through the MetadataManager
coordinator.persistDropTable(metadataWorkflow.getTableName)
// Send action to the connector
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
// Prepare data for the reply of the connector
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(false)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
} else if(metadataWorkflow.getExecutionType == ExecutionType.UNREGISTER_TABLE) {
// Drop table in the Crossdata servers through the MetadataManager
coordinator.persistDropTable(metadataWorkflow.getTableName)
var result:MetadataResult = null
val tableMetadata = metadataWorkflow.getTableMetadata
updateMetadata(tableMetadata, tableMetadata.getClusterRef, toRemove = true)
executionInfo.setQueryStatus(QueryStatus.PLANNED)
result = MetadataResult.createSuccessMetadataResult(MetadataResult.OPERATION_UNREGISTER_TABLE)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else if (metadataWorkflow.getExecutionType == ExecutionType.ALTER_TABLE) {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
}else if (metadataWorkflow.getExecutionType == ExecutionType.CREATE_INDEX
|| metadataWorkflow.getExecutionType == ExecutionType.CREATE_GLOBAL_INDEX) {
if (metadataWorkflow.isIfNotExists && MetadataManager.MANAGER.exists(metadataWorkflow.getIndexName)) {
val result: MetadataResult = MetadataResult.createSuccessMetadataResult(
MetadataResult.OPERATION_CREATE_INDEX, metadataWorkflow.isIfNotExists)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
}
} else if (metadataWorkflow.getExecutionType == ExecutionType.DROP_INDEX) {
if (metadataWorkflow.isIfExists && !MetadataManager.MANAGER.exists(metadataWorkflow.getIndexName)) {
val result: MetadataResult = MetadataResult.createSuccessMetadataResult(
MetadataResult.OPERATION_DROP_INDEX, metadataWorkflow.isIfExists)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
}
} else if (metadataWorkflow.getExecutionType == ExecutionType.DISCOVER_METADATA) {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(false)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
} else if (metadataWorkflow.getExecutionType == ExecutionType.IMPORT_CATALOGS
|| metadataWorkflow.getExecutionType == ExecutionType.IMPORT_CATALOG
|| metadataWorkflow.getExecutionType == ExecutionType.IMPORT_TABLE) {
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setRemoveOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(metadataWorkflow)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
} else if (metadataWorkflow.getExecutionType == ExecutionType.CREATE_CATALOG){
coordinator.persistCreateCatalog(metadataWorkflow.getCatalogMetadata, metadataWorkflow.isIfNotExists)
executionInfo.setQueryStatus(QueryStatus.EXECUTED)
val result = MetadataResult.createSuccessMetadataResult(MetadataResult.OPERATION_CREATE_CATALOG)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
}else if (metadataWorkflow.getExecutionType == ExecutionType.CREATE_TABLE_AND_CATALOG ||
metadataWorkflow.getExecutionType == ExecutionType.CREATE_TABLE) {
if (metadataWorkflow.isIfNotExists && MetadataManager.MANAGER.exists(metadataWorkflow.getTableName)) {
val result: MetadataResult = MetadataResult.createSuccessMetadataResult(
MetadataResult.OPERATION_CREATE_TABLE, metadataWorkflow.isIfNotExists)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else {
if (metadataWorkflow.getActorRef != null && metadataWorkflow.getActorRef.length() > 0) {
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
} else {
throw new CoordinationException("Actor ref URI is null");
}
}
} else if (metadataWorkflow.getExecutionType == ExecutionType.CREATE_TABLE_REGISTER_CATALOG) {
//Connector is able to create the catalog of the registered table
if (metadataWorkflow.getActorRef != null && metadataWorkflow.getActorRef.length() > 0) {
val actorRef = context.actorSelection(metadataWorkflow.getActorRef)
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setPersistOnSuccess(true)
executionInfo.setUpdateOnSuccess(true)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
log.info("ActorRef: " + actorRef.toString())
actorRef.asInstanceOf[ActorSelection] ! metadataWorkflow.createMetadataOperationMessage()
} else {
throw new CoordinationException("Actor ref URI is null");
}
}else if (metadataWorkflow.getExecutionType == ExecutionType.REGISTER_TABLE
|| metadataWorkflow.getExecutionType == ExecutionType.REGISTER_TABLE_AND_CATALOG) {
if (metadataWorkflow.isIfNotExists && MetadataManager.MANAGER.exists(metadataWorkflow.getTableName)) {
val result: MetadataResult = MetadataResult.createSuccessMetadataResult(MetadataResult.OPERATION_REGISTER_TABLE, metadataWorkflow.isIfNotExists)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
} else {
val result = MetadataResult.createSuccessMetadataResult(MetadataResult.OPERATION_REGISTER_TABLE)
if (metadataWorkflow.getExecutionType == ExecutionType.REGISTER_TABLE_AND_CATALOG) {
coordinator.persistCreateCatalogInCluster(metadataWorkflow.getCatalogName, metadataWorkflow.getClusterName)
}
//Connector is not able to create the catalog of the registered table
coordinator.persistCreateTable(metadataWorkflow.getTableMetadata)
val tableMetadata = metadataWorkflow.getTableMetadata
updateMetadata(tableMetadata, tableMetadata.getClusterRef, toRemove = false)
executionInfo.setQueryStatus(QueryStatus.EXECUTED)
result.setQueryId(queryId)
explicitSender.fold{
sender ! result
}{
explSender =>
context.actorSelection(explSender) ! result
}
}
} else {
throw new CoordinationException("Operation not supported yet");
}
}
case storageWorkflow: StorageWorkflow => {
log.debug("CoordinatorActor: StorageWorkflow received")
val executionInfo = new ExecutionInfo
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), false))
executionInfo.setWorkflow(storageWorkflow)
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
log.info("\\nCoordinate workflow: " + storageWorkflow.toString)
if ((storageWorkflow.getPreviousExecutionWorkflow == null)
&& (ResultType.RESULTS.equals(storageWorkflow.getResultType))) {
executionInfo.setRemoveOnSuccess(true)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo)
//Getting the connector name to tell sender
val connectorsMetadata=MetadataManager.MANAGER.getConnectors(Status.ONLINE);
val connector=connectorsMetadata.filter(connectorMetadata => connectorMetadata.getActorRefs.contains(storageWorkflow.getActorRef))
sender ! InfoResult(connector.apply(0).getName.getName, storageWorkflow.getQueryId)
val actorRef = context.actorSelection(storageWorkflow.getActorRef())
actorRef ! storageWorkflow.getStorageOperation()
} else if ((storageWorkflow.getPreviousExecutionWorkflow != null)
&& (ResultType.TRIGGER_EXECUTION.equals(storageWorkflow.getPreviousExecutionWorkflow.getResultType))) {
val storedExInfo = new ExecutionInfo
storedExInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), false))
val previousExecutionWorkflow = storageWorkflow.getPreviousExecutionWorkflow
storedExInfo.setWorkflow(previousExecutionWorkflow)
storedExInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
if (previousExecutionWorkflow.isInstanceOf[QueryWorkflow]){
//TODO make it beautiful
storedExInfo.setRemoveOnSuccess(Execute.getClass.isInstance(previousExecutionWorkflow.asInstanceOf[QueryWorkflow].getExecuteOperation("")))
}else{
storedExInfo.setRemoveOnSuccess(false);
}
storedExInfo.setTriggeredByStreaming(true)
ExecutionManager.MANAGER.createEntry(queryId, storedExInfo)
val actorRef = StringUtils.getAkkaActorRefUri(previousExecutionWorkflow.getActorRef, false)
val firstConnectorRef = context.actorSelection(actorRef)
log.info(s"Sending init trigger operation: ${queryId} to $firstConnectorRef")
firstConnectorRef ! TriggerExecution(previousExecutionWorkflow, executionInfo)
}
}
case managementWorkflow: ManagementWorkflow => {
log.info("ManagementWorkflow received")
var sendResultToClient = true
if (managementWorkflow.getExecutionType == ExecutionType.ATTACH_CONNECTOR) {
val credentials = null
val managementOperation = managementWorkflow.createManagementOperationMessage()
val attachConnectorOperation = managementOperation.asInstanceOf[AttachConnector]
val clusterName = attachConnectorOperation.targetCluster
val clusterMetadata = MetadataManager.MANAGER.getCluster(clusterName)
val datastoreName = clusterMetadata.getDataStoreRef
val datastoreMetadata = MetadataManager.MANAGER.getDataStore(datastoreName)
val clusterAttachedOpts = datastoreMetadata.getClusterAttachedRefs.get(clusterName)
val clusterOptions = SelectorHelper.convertSelectorMapToStringMap(clusterAttachedOpts.getProperties)
val connectorOptions = SelectorHelper.convertSelectorMapToStringMap(attachConnectorOperation.options)
val connectorClusterConfig = new ConnectorClusterConfig(
clusterName, connectorOptions, clusterOptions)
connectorClusterConfig.setDataStoreName(datastoreName)
val actorRefs = managementWorkflow.getActorRefs
var count = 1
for(actorRef <- actorRefs){
val connectorSelection = context.actorSelection(StringUtils.getAkkaActorRefUri(actorRef, false))
connectorSelection ! new Connect(queryId+"#"+count, credentials, connectorClusterConfig)
count+=1
}
log.info("connectorOptions: " + connectorClusterConfig.getConnectorOptions.toString + " clusterOptions: " +
connectorClusterConfig.getClusterOptions.toString)
val executionInfo = new ExecutionInfo()
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), false))
executionInfo.setWorkflow(managementWorkflow)
executionInfo.setPersistOnSuccess(true)
executionInfo.setRemoveOnSuccess(true)
count = 1
for(actorRef <- actorRefs){
ExecutionManager.MANAGER.createEntry(queryId+"#"+count, executionInfo, true)
count+=1
}
sendResultToClient = false
} else if (managementWorkflow.getExecutionType == ExecutionType.DETACH_CONNECTOR) {
val managementOperation = managementWorkflow.createManagementOperationMessage()
val detachConnectorOperation = managementOperation.asInstanceOf[DetachConnector]
val clusterName = detachConnectorOperation.targetCluster
val connectorClusterConfig = new ConnectorClusterConfig(
clusterName,
SelectorHelper.convertSelectorMapToStringMap(
MetadataManager.MANAGER.getConnector(
new ConnectorName(detachConnectorOperation.connectorName.getName)).getClusterProperties.get(clusterName)),
SelectorHelper.convertSelectorMapToStringMap(
MetadataManager.MANAGER.getCluster(clusterName).getOptions)
)
val clusterMetadata = MetadataManager.MANAGER.getCluster(clusterName)
connectorClusterConfig.setDataStoreName(clusterMetadata.getDataStoreRef)
val actorRefs = managementWorkflow.getActorRefs
for(actorRef <- actorRefs){
val connectorSelection = context.actorSelection(StringUtils.getAkkaActorRefUri(actorRef, false))
connectorSelection ! new DisconnectFromCluster(queryId, connectorClusterConfig.getName.getName)
}
createExecutionInfoForDetach(managementWorkflow, queryId)
} else if (managementWorkflow.getExecutionType == ExecutionType.FORCE_DETACH_CONNECTOR) {
val actorRefs = managementWorkflow.getActorRefs
for(actorRef <- actorRefs){
if(actorRef != null){
val connectorSelection = context.actorSelection(StringUtils.getAkkaActorRefUri(actorRef, false))
connectorSelection ! new DisconnectFromCluster(queryId, managementWorkflow.getConnectorClusterConfig.getName.getName)
}
}
createExecutionInfoForDetach(managementWorkflow, queryId)
}
if(sendResultToClient){
explicitSender.fold{
sender ! coordinator.executeManagementOperation(managementWorkflow.createManagementOperationMessage())
}{
explSender =>
context.actorSelection(explSender) ! coordinator.executeManagementOperation(managementWorkflow.createManagementOperationMessage)
}
}
}
case queryWorkflow: QueryWorkflow => {
log.info("\\nCoordinatorActor: QueryWorkflow received")
val executionInfo = new ExecutionInfo
executionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
executionInfo.setWorkflow(queryWorkflow)
log.info("\\nCoordinate workflow: " + queryWorkflow.toString)
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
//Getting the connector name to tell sender
val connectorsMetadata=MetadataManager.MANAGER.getConnectors(Status.ONLINE);
val connector=connectorsMetadata.filter(connectorMetadata => connectorMetadata.getActorRefs.contains(executionInfo.getWorkflow.getActorRef))
if (ResultType.RESULTS.equals(queryWorkflow.getResultType)) {
//TODO AkkaRefURI should be stored. Indeed, it is likely to be stored instead of toString.
val actorRef = StringUtils.getAkkaActorRefUri(queryWorkflow.getActorRef, false)
val actorSelection = context.actorSelection(actorRef)
val operation = queryWorkflow.getExecuteOperation(queryId)
executionInfo.setRemoveOnSuccess(operation.isInstanceOf[Execute])
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
//Send to sender in which connector will be executed the query
sender ! InfoResult(connector.apply(0).getName.getName , executionInfo.getWorkflow.getQueryId)
actorSelection.asInstanceOf[ActorSelection] ! operation
log.info("\\nMessage sent to " + actorRef.toString())
} else if (ResultType.TRIGGER_EXECUTION.equals(queryWorkflow.getResultType) || ResultType.GLOBAL_INDEX_RESULTS.equals(queryWorkflow.getResultType)) {
val actorRef = StringUtils.getAkkaActorRefUri(queryWorkflow.getActorRef, false)
val firstConnectorActorSelection = context.actorSelection(actorRef)
val isWindowFound = QueryWorkflow.checkStreaming(queryWorkflow.getWorkflow.getLastStep)
executionInfo.setTriggeredByStreaming(isWindowFound)
executionInfo.setRemoveOnSuccess(!isWindowFound)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
val nextExecutionInfo = new ExecutionInfo
nextExecutionInfo.setSender(StringUtils.getAkkaActorRefUri(explicitSender.getOrElse(sender), true))
nextExecutionInfo.setWorkflow(queryWorkflow.getNextExecutionWorkflow)
nextExecutionInfo.getWorkflow.setTriggerStep(executionInfo.getWorkflow.getTriggerStep)
nextExecutionInfo.setRemoveOnSuccess(!isWindowFound)
nextExecutionInfo.setTriggeredByStreaming(isWindowFound)
//Send to sender in which connector will be executed the query
sender ! InfoResult(connector.apply(0).getName.getName , executionInfo.getWorkflow.getQueryId)
firstConnectorActorSelection ! TriggerExecution(queryWorkflow, nextExecutionInfo)
log.info(s"Sending init trigger operation: ${queryId} to $firstConnectorActorSelection")
}
}
case _ => {
log.error("Non recognized workflow")
}
}
def createExecutionInfoForDetach(managementWorkflow: ManagementWorkflow, queryId: String): Unit = {
val executionInfo = new ExecutionInfo()
executionInfo.setQueryStatus(QueryStatus.IN_PROGRESS)
executionInfo.setSender(StringUtils.getAkkaActorRefUri(sender, false))
executionInfo.setWorkflow(managementWorkflow)
executionInfo.setUpdateOnSuccess(true)
ExecutionManager.MANAGER.createEntry(queryId, executionInfo, true)
}
private def processUpdateMetadataWorkflow(mw: MetadataWorkflow, result: Result) = mw.getExecutionType match {
case ExecutionType.CREATE_TABLE | ExecutionType.ALTER_TABLE => {
val tableMetadata = mw.getTableMetadata
updateMetadata(tableMetadata, tableMetadata.getClusterRef, toRemove = false)
}
case ExecutionType.CREATE_TABLE_AND_CATALOG | ExecutionType.CREATE_TABLE_REGISTER_CATALOG => {
val tableMetadata = mw.getTableMetadata
//TODO updateMetadata(mw.getCatalogMetadata, ...)?
updateMetadata(tableMetadata, tableMetadata.getClusterRef, toRemove = false)
}
case ExecutionType.CREATE_INDEX | ExecutionType.DROP_INDEX | ExecutionType.CREATE_GLOBAL_INDEX=> ()
case ExecutionType.DROP_TABLE => {
val tableMetadata = mw.getTableMetadata
updateMetadata(tableMetadata, tableMetadata.getClusterRef, toRemove = true)
}
case ExecutionType.ALTER_CATALOG | ExecutionType.CREATE_CATALOG => {
val catalogMetadata = mw.getCatalogMetadata
for (tableMetadata <- mw.getCatalogMetadata.getTables.values.asScala.toList){
updateMetadata(catalogMetadata, tableMetadata.asInstanceOf[TableMetadata].getClusterRef, toRemove = false)
}
}
case ExecutionType.DROP_CATALOG => {
val catalogMetadata = mw.getCatalogMetadata
for (tableMetadata <- mw.getCatalogMetadata.getTables.values.asScala.toList)
yield updateMetadata(catalogMetadata, tableMetadata.asInstanceOf[TableMetadata].getClusterRef, toRemove = true)
}
case ExecutionType.IMPORT_TABLE => {
for {
tableMetadata <- result.asInstanceOf[MetadataResult].getTableList.asScala.toList
} yield updateMetadata(tableMetadata, tableMetadata.asInstanceOf[TableMetadata].getClusterRef, toRemove = false)
}
case ExecutionType.IMPORT_CATALOGS | ExecutionType.IMPORT_CATALOG => {
for {
catalogMetadata <- result.asInstanceOf[MetadataResult].getCatalogMetadataList.asScala.toList
} yield updateMetadata(catalogMetadata, toRemove = false)
}
case message => log.warning ("Sending metadata updates cannot be performed for the ExecutionType :" + message.toString)
}
/**
* Send a message to the connectors attached to a cluster to update its metadata. It is called any time an update's operation which persist data is finished.
* @param uMetadata the new metadata to be updated
* @param clusterInvolved the cluster which contains the metadata to update
* @param toRemove whether the metadata has been created or deleted
*/
private def updateMetadata(uMetadata: UpdatableMetadata, clusterInvolved: ClusterName, toRemove: Boolean): Unit = {
val listConnectorMetadata = MetadataManager.MANAGER.getAttachedConnectors(Status.ONLINE, clusterInvolved)
listConnectorMetadata.asScala.toList.flatMap(actorToBroadcast).foreach(actor => broadcastMetadata(actor, uMetadata))
def actorToBroadcast(cMetadata: ConnectorMetadata): List[ActorSelection] =
StringUtils.getAkkaActorRefUri(cMetadata.getActorRef(host), false) match {
case null => List()
case strActorRefUri => List(context.actorSelection(strActorRefUri))
}
def broadcastMetadata(bcConnectorActor: ActorSelection, uMetadata: UpdatableMetadata) = {
log.debug("Updating metadata in " + bcConnectorActor.toString)
bcConnectorActor ! UpdateMetadata(uMetadata, toRemove)
}
}
/**
* Send a message to the connectors attached to a cluster to update its metadata. It is called any time an update's operation which persist data is finished.
* @param cMetadata the new metadata to be updated
* @param toRemove whether the metadata has been created or deleted
*/
private def updateMetadata(cMetadata: CatalogMetadata, toRemove: Boolean): Unit = {
val setConnectorMetadata = cMetadata.getTables.values().asScala.toList.flatMap( tableMetadata => MetadataManager.MANAGER.getAttachedConnectors(Status.ONLINE, tableMetadata.getClusterRef).asScala.toList).toSet
setConnectorMetadata.flatMap(actorToBroadcast).foreach(actor => broadcastMetadata(actor, cMetadata))
def actorToBroadcast(cMetadata: ConnectorMetadata): List[ActorSelection] =
StringUtils.getAkkaActorRefUri(cMetadata.getActorRef(host), false) match {
case null => List()
case strActorRefUri => List(context.actorSelection(strActorRefUri))
}
def broadcastMetadata(bcConnectorActor: ActorSelection, uMetadata: UpdatableMetadata) = {
log.debug("Updating metadata in " + bcConnectorActor.toString)
bcConnectorActor ! UpdateMetadata(uMetadata, toRemove)
}
}
}
| pfcoperez/crossdata | crossdata-server/src/main/scala/com/stratio/crossdata/server/actors/CoordinatorActor.scala | Scala | apache-2.0 | 39,954 |
package sharry.common
import java.util.concurrent.ForkJoinPool
import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory
import java.util.concurrent.ForkJoinWorkerThread
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import scala.concurrent._
import cats.effect._
object ThreadFactories {
def ofName(prefix: String): ThreadFactory =
new ThreadFactory {
val counter = new AtomicLong(0)
override def newThread(r: Runnable): Thread = {
val t = Executors.defaultThreadFactory().newThread(r)
t.setName(s"$prefix-${counter.getAndIncrement()}")
t
}
}
def ofNameFJ(prefix: String): ForkJoinWorkerThreadFactory =
new ForkJoinWorkerThreadFactory {
val tf = ForkJoinPool.defaultForkJoinWorkerThreadFactory
val counter = new AtomicLong(0)
def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {
val t = tf.newThread(pool)
t.setName(s"$prefix-${counter.getAndIncrement()}")
t
}
}
def executorResource[F[_]: Sync](
c: => ExecutionContextExecutorService
): Resource[F, ExecutionContextExecutorService] =
Resource.make(Sync[F].delay(c))(ec => Sync[F].delay(ec.shutdown))
def cached[F[_]: Sync](
tf: ThreadFactory
): Resource[F, ExecutionContextExecutorService] =
executorResource(
ExecutionContext.fromExecutorService(Executors.newCachedThreadPool(tf))
)
def fixed[F[_]: Sync](
n: Int,
tf: ThreadFactory
): Resource[F, ExecutionContextExecutorService] =
executorResource(
ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(n, tf))
)
def workSteal[F[_]: Sync](
n: Int,
tf: ForkJoinWorkerThreadFactory
): Resource[F, ExecutionContextExecutorService] =
executorResource(
ExecutionContext.fromExecutorService(
new ForkJoinPool(n, tf, null, true)
)
)
def workSteal[F[_]: Sync](
tf: ForkJoinWorkerThreadFactory
): Resource[F, ExecutionContextExecutorService] =
workSteal[F](Runtime.getRuntime().availableProcessors() + 1, tf)
}
| eikek/sharry | modules/common/src/main/scala/sharry/common/ThreadFactories.scala | Scala | gpl-3.0 | 2,140 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.rest
import java.lang.annotation.Annotation
import java.lang.reflect.Type
import java.io.OutputStream
import javax.servlet.ServletContext
import javax.servlet.http.{ HttpServletResponse, HttpServletRequest }
import javax.ws.rs.ext.{ Provider, MessageBodyWriter }
import javax.ws.rs.core.{ UriInfo, MultivaluedMap, MediaType, Context }
import org.fusesource.scalate.servlet.{ ServletRenderContext, ServletTemplateEngine, ServletHelper, TemplateEngineServlet }
import org.fusesource.scalate.TemplateException
import org.fusesource.scalate.util.{ Log, ResourceNotFoundException }
import javax.ws.rs.WebApplicationException
object ScalateTemplateProvider extends Log;
/**
* A template provider for <a href="https://jersey.dev.java.net/">Jersey</a> using Scalate templates
* to produce HTML of an object.
*
* @version $Revision : 1.1 $
*/
@Provider
class ScalateTemplateProvider extends MessageBodyWriter[AnyRef] {
import ScalateTemplateProvider._
@Context
var servletContext: ServletContext = _
@Context
var request: HttpServletRequest = _
@Context
var response: HttpServletResponse = _
@Context
var uriInfo: UriInfo = _
def resolve(engine: ServletTemplateEngine, argType: Class[_]): String = {
val argBase = argType.getName.replace('.', '/')
engine.extensions.foreach { ext =>
val path = "/" + argBase + "." + ext
try {
engine.load(path)
return path
} catch {
case x: ResourceNotFoundException =>
case x: TemplateException =>
return path
}
}
null
}
def getSize(arg: AnyRef, argType: Class[_], genericType: Type, annotations: Array[Annotation], mediaType: MediaType) = -1L
def isWriteable(argType: Class[_], genericType: Type, annotations: Array[Annotation], mediaType: MediaType) = {
var answer = false
if (mediaType.getType == "text" && mediaType.getSubtype == "html") {
val engine = ServletTemplateEngine(servletContext)
if (engine != null && engine.resourceLoader != null) {
val path = resolve(engine, argType)
answer = path != null
}
}
answer
}
def writeTo(arg: AnyRef, argType: Class[_], genericType: Type, annotations: Array[Annotation], media: MediaType, headers: MultivaluedMap[String, AnyRef], out: OutputStream) = {
// Ensure headers are committed
out.flush()
val engine = ServletTemplateEngine(servletContext)
val path = resolve(engine, argType)
try {
assert(path != null)
request.setAttribute("uri_info", uriInfo)
request.setAttribute("it", arg)
val context = new ServletRenderContext(engine, request, response, servletContext)
context.include(path, true)
} catch {
case e: Exception =>
// lets forward to the error handler
var notFound = true
for (uri <- ServletHelper.errorUris() if notFound) {
try {
val template = engine.load(uri)
request.setAttribute("javax.servlet.error.exception", e)
request.setAttribute("javax.servlet.error.exception_type", e.getClass)
request.setAttribute("javax.servlet.error.message", e.getMessage)
request.setAttribute("javax.servlet.error.request_uri", request.getRequestURI)
request.setAttribute("javax.servlet.error.servlet_name", request.getServerName)
// TODO how to get the status code???
val status = 500
request.setAttribute("javax.servlet.error.status_code", status)
request.setAttribute("it", e)
TemplateEngineServlet.render(uri, engine, servletContext, request, response)
notFound = false
} catch {
case _: Exception =>
}
}
if (notFound) {
throw createContainerException(e)
}
}
}
protected def createContainerException(e: Exception) = {
new WebApplicationException(e)
}
} | maslovalex/scalate | scalate-jaxrs/src/main/scala/org/fusesource/scalate/rest/ScalateTemplateProvider.scala | Scala | apache-2.0 | 4,697 |
package net.sansa_stack.query.spark.graph.jena.expression
import net.sansa_stack.query.spark.graph.jena.util.Result
import org.apache.jena.graph.Node
class Lang(expr: Expression) extends FunctionOne(expr) {
private val tag = "Lang"
override def getValue(result: Map[Node, Node]): Node = {
// compiler here
throw new UnsupportedOperationException
}
override def getValue(result: Result[Node]): Node = {
var lang: Node = null
expr match {
case e: NodeVar => lang = result.getValue(e.getNode)
case e: NodeVal => lang = e.getNode
case _ => throw new TypeNotPresentException("Variable or Value", new Throwable)
}
lang
}
override def getTag: String = { tag }
}
| SANSA-Stack/SANSA-RDF | sansa-query/sansa-query-spark/src/main/scala/net/sansa_stack/query/spark/graph/jena/expression/Lang.scala | Scala | apache-2.0 | 715 |
package org.broadinstitute.clio.util.model
import scala.util.Random
/**
* A unique identifier for each operation that modifies the database.
*/
final class UpsertId private (val id: String) extends AnyVal {
/**
* The filename used for persisting a document's upsert data.
*
* @return the filename where the upsert with the given ID's data is stored
*/
def persistenceFilename: String = s"$id.json"
}
/**
* Generate a lexically-ordered, unique ID based on the Firebase Push ID.
*
* Each ID is a 20-byte string of by 8 characters of timestamp
* followed by 12 characters of random (yet sequential) data.
*
* A recent one looks like this: -KuzbQJIFBhwvtkvrBHF
*
* @see [[https://firebase.googleblog.com/2015/02/the-2120-ways-to-ensure-unique_68.html]]
*/
object UpsertId {
implicit val upsertIdOrdering: Ordering[UpsertId] = Ordering.by(_.id)
private val source = new Random()
/**
* Map from 7-bit data to character encoding.
*/
private val Encoding =
"-0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz"
/**
* There are this many characters of timestamp data.
*/
private val TimestampCount = 8
/**
* There are this many characters of randomized data.
*/
private val RandomCount = 12
/**
* The length of a valid upsert ID.
*/
val IdLength: Int = TimestampCount + RandomCount
/**
* The latest time used to construct an ID.
*/
private var was = 0L
/**
* 8 characters encoded from `was`.
*/
private var timestamp = "timestam"
/**
* 12 characters of monotonic increment seeded with a random value.
*/
private var randomCharacters = Array.fill[Int](12)(Encoding.length)
/**
* Return a random index into `Encoding`.
*/
private val next6bitIndex = (_: Int) => source.nextInt(Encoding.length)
/**
* Return with new values for was, timestamp, and randomCharacters.
*
* @param now is the current system time
*/
private def refreshRandomState(now: Long): Unit = {
was = now
timestamp = Stream
.iterate(now, TimestampCount)(_ / Encoding.length)
.map(n => Encoding((n % Encoding.length).toInt))
.reverse
.mkString
randomCharacters =
Stream.iterate(next6bitIndex(0), RandomCount)(next6bitIndex(_)).toArray
}
/**
* Increment `randomCharacters` or wait until `now` changes when
* `randomCharacters` would roll over to 0.
*/
private def incrementRandomBytes(now: Long): Unit = {
val where = randomCharacters.lastIndexWhere(_ != Encoding.length - 1)
val randomRollover = where == -1
if (randomRollover) {
var still = now
while (still == was) {
still = System.currentTimeMillis()
refreshRandomState(now)
}
} else {
randomCharacters(where) = randomCharacters(where) + 1
for (n <- where + 1 until RandomCount) randomCharacters(n) = 0
}
}
/**
* Check if `idText` is a valid upsert ID.
*
* @param idText the text to validate
* @return true if `idText` is a valid ID
*/
def isValidId(idText: String): Boolean = {
idText.length == IdLength && idText.forall(Encoding.contains(_))
}
/**
* Create an ID from `idText`. The text must be a valid ID.
*
* @param idText the text to construct an ID from
* @return the created ID or `None`
*/
def fromString(idText: String): Option[UpsertId] = {
List(idText).filter(isValidId).map(new UpsertId(_)).headOption
}
/**
* Return the next unique ID.
*
* @return a 20-character unique ID
*/
val nextId: () => UpsertId = () =>
synchronized {
val now = System.currentTimeMillis()
if (now == was) {
incrementRandomBytes(now)
} else {
refreshRandomState(now)
}
new UpsertId(timestamp + randomCharacters.map(Encoding(_)).mkString)
}
}
| broadinstitute/clio | clio-util/src/main/scala/org/broadinstitute/clio/util/model/UpsertId.scala | Scala | bsd-3-clause | 3,894 |
/**
* BasePlate API
* An API for BasePlate to connect with Loanapp.
*
* OpenAPI spec version: 1.0.0
* Contact: [email protected]
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.eclipse.jetty.server._
import org.eclipse.jetty.webapp.WebAppContext
import org.scalatra.servlet.ScalatraListener
object JettyMain {
object conf {
val port = sys.env.get("PORT") map (_.toInt) getOrElse (8080)
val stopTimeout = sys.env.get("STOP_TIMEOUT") map (_.toInt) getOrElse (5000)
val connectorIdleTimeout = sys.env.get("CONNECTOR_IDLE_TIMEOUT") map (_.toInt) getOrElse (90000)
val webapp = sys.env.get("PUBLIC") getOrElse "webapp"
val contextPath = sys.env.get("CONTEXT_PATH") getOrElse "/"
}
def main(args: Array[String]) = {
val server: Server = new Server
println("starting jetty")
server setStopTimeout conf.stopTimeout
//server setDumpAfterStart true
server setStopAtShutdown true
val httpConfig = new HttpConfiguration()
httpConfig setSendDateHeader true
httpConfig setSendServerVersion false
val connector = new NetworkTrafficServerConnector(server, new HttpConnectionFactory(httpConfig))
connector setPort conf.port
connector setSoLingerTime 0
connector setIdleTimeout conf.connectorIdleTimeout
server addConnector connector
val webapp = conf.webapp
val webApp = new WebAppContext
webApp setContextPath conf.contextPath
webApp setResourceBase conf.webapp
webApp setEventListeners Array(new ScalatraListener)
server setHandler webApp
server.start()
}
}
| garywong89/PetStoreAPI | scalatra/src/main/scala/JettyMain.scala | Scala | apache-2.0 | 2,257 |
/**
* Copyright (c) 2014-2016 Tim Bruijnzeels
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of this software, nor the names of its contributors, nor
* the names of the contributors' employers may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package nl.bruijnzeels.tim.rpki.common.cqrs
import java.util.UUID
object EventStore {
// TODO: Use persistent thread safe storage, sign and verify this shit!, log this stuff?
var storedEventList: List[StoredEvent] = List.empty
var listeners: List[EventListener] = List.empty
def subscribe(listener: EventListener) = listeners = listeners :+ listener
def retrieve(aggregateType: AggregateRootType, aggregateId: UUID): List[Event] = storedEventList.filter(e => e.aggregateType == aggregateType && e.versionedId.id == aggregateId).map(_.event)
def store(aggregate: AggregateRoot): Unit = {
val aggregateType = aggregate.aggregateType
val newVersionedId = aggregate.versionedId.next
val newStoredEvents = aggregate.events.map(StoredEvent(aggregateType, newVersionedId, _))
storedEventList = storedEventList ++ newStoredEvents
listeners.foreach(l => l.handle(newStoredEvents))
}
def clear(): Unit = {
storedEventList = List.empty
listeners = List.empty
}
}
trait EventListener {
def handle(events: List[StoredEvent]): Unit
}
case class StoredEvent(aggregateType: AggregateRootType, versionedId: VersionedId, event: Event) | timbru/rpki-ca | src/main/scala/nl/bruijnzeels/tim/rpki/common/cqrs/EventStore.scala | Scala | bsd-3-clause | 2,816 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.IOException
import java.lang.reflect.InvocationTargetException
import java.util
import java.util.Locale
import scala.collection.mutable
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DDL_TIME
import org.apache.hadoop.hive.ql.metadata.HiveException
import org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT
import org.apache.thrift.TException
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical.ColumnStat
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{PartitioningUtils, SourceOptions}
import org.apache.spark.sql.hive.client.HiveClient
import org.apache.spark.sql.internal.HiveSerDe
import org.apache.spark.sql.internal.StaticSQLConf._
import org.apache.spark.sql.types.{DataType, StructType}
/**
* A persistent implementation of the system catalog using Hive.
* All public methods must be synchronized for thread-safety.
*/
private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configuration)
extends ExternalCatalog with Logging {
import CatalogTypes.TablePartitionSpec
import HiveExternalCatalog._
import CatalogTableType._
/**
* A Hive client used to interact with the metastore.
*/
lazy val client: HiveClient = {
HiveUtils.newClientForMetadata(conf, hadoopConf)
}
// Exceptions thrown by the hive client that we would like to wrap
private val clientExceptions = Set(
classOf[HiveException].getCanonicalName,
classOf[TException].getCanonicalName,
classOf[InvocationTargetException].getCanonicalName)
/**
* Whether this is an exception thrown by the hive client that should be wrapped.
*
* Due to classloader isolation issues, pattern matching won't work here so we need
* to compare the canonical names of the exceptions, which we assume to be stable.
*/
private def isClientException(e: Throwable): Boolean = {
var temp: Class[_] = e.getClass
var found = false
while (temp != null && !found) {
found = clientExceptions.contains(temp.getCanonicalName)
temp = temp.getSuperclass
}
found
}
/**
* Run some code involving `client` in a [[synchronized]] block and wrap certain
* exceptions thrown in the process in [[AnalysisException]].
*/
private def withClient[T](body: => T): T = synchronized {
try {
body
} catch {
case NonFatal(exception) if isClientException(exception) =>
val e = exception match {
// Since we are using shim, the exceptions thrown by the underlying method of
// Method.invoke() are wrapped by InvocationTargetException
case i: InvocationTargetException => i.getCause
case o => o
}
throw new AnalysisException(
e.getClass.getCanonicalName + ": " + e.getMessage, cause = Some(e))
}
}
/**
* Get the raw table metadata from hive metastore directly. The raw table metadata may contain
* special data source properties that should not be exposed outside of `HiveExternalCatalog`. We
* should interpret these special data source properties and restore the original table metadata
* before returning it.
*/
private[hive] def getRawTable(db: String, table: String): CatalogTable = {
client.getTable(db, table)
}
/**
* If the given table properties contains datasource properties, throw an exception. We will do
* this check when create or alter a table, i.e. when we try to write table metadata to Hive
* metastore.
*/
private def verifyTableProperties(table: CatalogTable): Unit = {
val invalidKeys = table.properties.keys.filter(_.startsWith(SPARK_SQL_PREFIX))
if (invalidKeys.nonEmpty) {
throw new AnalysisException(s"Cannot persist ${table.qualifiedName} into Hive metastore " +
s"as table property keys may not start with '$SPARK_SQL_PREFIX': " +
invalidKeys.mkString("[", ", ", "]"))
}
// External users are not allowed to set/switch the table type. In Hive metastore, the table
// type can be switched by changing the value of a case-sensitive table property `EXTERNAL`.
if (table.properties.contains("EXTERNAL")) {
throw new AnalysisException("Cannot set or change the preserved property key: 'EXTERNAL'")
}
}
/**
* Checks the validity of data column names. Hive metastore disallows the table to use some
* special characters (',', ':', and ';') in data column names, including nested column names.
* Partition columns do not have such a restriction. Views do not have such a restriction.
*/
private def verifyDataSchema(
tableName: TableIdentifier, tableType: CatalogTableType, dataSchema: StructType): Unit = {
if (tableType != VIEW) {
val invalidChars = Seq(",", ":", ";")
def verifyNestedColumnNames(schema: StructType): Unit = schema.foreach { f =>
f.dataType match {
case st: StructType => verifyNestedColumnNames(st)
case _ if invalidChars.exists(f.name.contains) =>
val invalidCharsString = invalidChars.map(c => s"'$c'").mkString(", ")
val errMsg = "Cannot create a table having a nested column whose name contains " +
s"invalid characters ($invalidCharsString) in Hive metastore. Table: $tableName; " +
s"Column: ${f.name}"
throw new AnalysisException(errMsg)
case _ =>
}
}
dataSchema.foreach { f =>
f.dataType match {
// Checks top-level column names
case _ if f.name.contains(",") =>
throw new AnalysisException("Cannot create a table having a column whose name " +
s"contains commas in Hive metastore. Table: $tableName; Column: ${f.name}")
// Checks nested column names
case st: StructType =>
verifyNestedColumnNames(st)
case _ =>
}
}
}
}
// --------------------------------------------------------------------------
// Databases
// --------------------------------------------------------------------------
override def createDatabase(
dbDefinition: CatalogDatabase,
ignoreIfExists: Boolean): Unit = withClient {
client.createDatabase(dbDefinition, ignoreIfExists)
}
override def dropDatabase(
db: String,
ignoreIfNotExists: Boolean,
cascade: Boolean): Unit = withClient {
client.dropDatabase(db, ignoreIfNotExists, cascade)
}
/**
* Alter a database whose name matches the one specified in `dbDefinition`,
* assuming the database exists.
*
* Note: As of now, this only supports altering database properties!
*/
override def alterDatabase(dbDefinition: CatalogDatabase): Unit = withClient {
val existingDb = getDatabase(dbDefinition.name)
if (existingDb.properties == dbDefinition.properties) {
logWarning(s"Request to alter database ${dbDefinition.name} is a no-op because " +
s"the provided database properties are the same as the old ones. Hive does not " +
s"currently support altering other database fields.")
}
client.alterDatabase(dbDefinition)
}
override def getDatabase(db: String): CatalogDatabase = withClient {
client.getDatabase(db)
}
override def databaseExists(db: String): Boolean = withClient {
client.databaseExists(db)
}
override def listDatabases(): Seq[String] = withClient {
client.listDatabases("*")
}
override def listDatabases(pattern: String): Seq[String] = withClient {
client.listDatabases(pattern)
}
override def setCurrentDatabase(db: String): Unit = withClient {
client.setCurrentDatabase(db)
}
// --------------------------------------------------------------------------
// Tables
// --------------------------------------------------------------------------
override def createTable(
tableDefinition: CatalogTable,
ignoreIfExists: Boolean): Unit = withClient {
assert(tableDefinition.identifier.database.isDefined)
val db = tableDefinition.identifier.database.get
val table = tableDefinition.identifier.table
requireDbExists(db)
verifyTableProperties(tableDefinition)
verifyDataSchema(
tableDefinition.identifier, tableDefinition.tableType, tableDefinition.dataSchema)
if (tableExists(db, table) && !ignoreIfExists) {
throw new TableAlreadyExistsException(db = db, table = table)
}
// Ideally we should not create a managed table with location, but Hive serde table can
// specify location for managed table. And in [[CreateDataSourceTableAsSelectCommand]] we have
// to create the table directory and write out data before we create this table, to avoid
// exposing a partial written table.
val needDefaultTableLocation = tableDefinition.tableType == MANAGED &&
tableDefinition.storage.locationUri.isEmpty
val tableLocation = if (needDefaultTableLocation) {
Some(CatalogUtils.stringToURI(defaultTablePath(tableDefinition.identifier)))
} else {
tableDefinition.storage.locationUri
}
if (DDLUtils.isDatasourceTable(tableDefinition)) {
createDataSourceTable(
tableDefinition.withNewStorage(locationUri = tableLocation),
ignoreIfExists)
} else {
val tableWithDataSourceProps = tableDefinition.copy(
// We can't leave `locationUri` empty and count on Hive metastore to set a default table
// location, because Hive metastore uses hive.metastore.warehouse.dir to generate default
// table location for tables in default database, while we expect to use the location of
// default database.
storage = tableDefinition.storage.copy(locationUri = tableLocation),
// Here we follow data source tables and put table metadata like table schema, partition
// columns etc. in table properties, so that we can work around the Hive metastore issue
// about not case preserving and make Hive serde table and view support mixed-case column
// names.
properties = tableDefinition.properties ++ tableMetaToTableProps(tableDefinition))
client.createTable(tableWithDataSourceProps, ignoreIfExists)
}
}
private def createDataSourceTable(table: CatalogTable, ignoreIfExists: Boolean): Unit = {
// data source table always have a provider, it's guaranteed by `DDLUtils.isDatasourceTable`.
val provider = table.provider.get
val options = new SourceOptions(table.storage.properties)
// To work around some hive metastore issues, e.g. not case-preserving, bad decimal type
// support, no column nullability, etc., we should do some extra works before saving table
// metadata into Hive metastore:
// 1. Put table metadata like table schema, partition columns, etc. in table properties.
// 2. Check if this table is hive compatible.
// 2.1 If it's not hive compatible, set location URI, schema, partition columns and bucket
// spec to empty and save table metadata to Hive.
// 2.2 If it's hive compatible, set serde information in table metadata and try to save
// it to Hive. If it fails, treat it as not hive compatible and go back to 2.1
val tableProperties = tableMetaToTableProps(table)
// put table provider and partition provider in table properties.
tableProperties.put(DATASOURCE_PROVIDER, provider)
if (table.tracksPartitionsInCatalog) {
tableProperties.put(TABLE_PARTITION_PROVIDER, TABLE_PARTITION_PROVIDER_CATALOG)
}
// Ideally we should also put `locationUri` in table properties like provider, schema, etc.
// However, in older version of Spark we already store table location in storage properties
// with key "path". Here we keep this behaviour for backward compatibility.
val storagePropsWithLocation = table.storage.properties ++
table.storage.locationUri.map("path" -> CatalogUtils.URIToString(_))
// converts the table metadata to Spark SQL specific format, i.e. set data schema, names and
// bucket specification to empty. Note that partition columns are retained, so that we can
// call partition-related Hive API later.
def newSparkSQLSpecificMetastoreTable(): CatalogTable = {
table.copy(
// Hive only allows directory paths as location URIs while Spark SQL data source tables
// also allow file paths. For non-hive-compatible format, we should not set location URI
// to avoid hive metastore to throw exception.
storage = table.storage.copy(
locationUri = None,
properties = storagePropsWithLocation),
schema = StructType(EMPTY_DATA_SCHEMA ++ table.partitionSchema),
bucketSpec = None,
properties = table.properties ++ tableProperties)
}
// converts the table metadata to Hive compatible format, i.e. set the serde information.
def newHiveCompatibleMetastoreTable(serde: HiveSerDe): CatalogTable = {
val location = if (table.tableType == EXTERNAL) {
// When we hit this branch, we are saving an external data source table with hive
// compatible format, which means the data source is file-based and must have a `path`.
require(table.storage.locationUri.isDefined,
"External file-based data source table must have a `path` entry in storage properties.")
Some(table.location)
} else {
None
}
table.copy(
storage = table.storage.copy(
locationUri = location,
inputFormat = serde.inputFormat,
outputFormat = serde.outputFormat,
serde = serde.serde,
properties = storagePropsWithLocation
),
properties = table.properties ++ tableProperties)
}
val qualifiedTableName = table.identifier.quotedString
val maybeSerde = HiveSerDe.sourceToSerDe(provider)
val (hiveCompatibleTable, logMessage) = maybeSerde match {
case _ if options.skipHiveMetadata =>
val message =
s"Persisting data source table $qualifiedTableName into Hive metastore in" +
"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
// our bucketing is un-compatible with hive(different hash function)
case _ if table.bucketSpec.nonEmpty =>
val message =
s"Persisting bucketed data source table $qualifiedTableName into " +
"Hive metastore in Spark SQL specific format, which is NOT compatible with Hive. "
(None, message)
case Some(serde) =>
val message =
s"Persisting file based data source table $qualifiedTableName into " +
s"Hive metastore in Hive compatible format."
(Some(newHiveCompatibleMetastoreTable(serde)), message)
case _ =>
val message =
s"Couldn't find corresponding Hive SerDe for data source provider $provider. " +
s"Persisting data source table $qualifiedTableName into Hive metastore in " +
s"Spark SQL specific format, which is NOT compatible with Hive."
(None, message)
}
(hiveCompatibleTable, logMessage) match {
case (Some(table), message) =>
// We first try to save the metadata of the table in a Hive compatible way.
// If Hive throws an error, we fall back to save its metadata in the Spark SQL
// specific way.
try {
logInfo(message)
saveTableIntoHive(table, ignoreIfExists)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not persist ${table.identifier.quotedString} in a Hive " +
"compatible way. Persisting it into Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists)
}
case (None, message) =>
logWarning(message)
saveTableIntoHive(newSparkSQLSpecificMetastoreTable(), ignoreIfExists)
}
}
/**
* Data source tables may be non Hive compatible and we need to store table metadata in table
* properties to workaround some Hive metastore limitations.
* This method puts table schema, partition column names, bucket specification into a map, which
* can be used as table properties later.
*/
private def tableMetaToTableProps(table: CatalogTable): mutable.Map[String, String] = {
tableMetaToTableProps(table, table.schema)
}
private def tableMetaToTableProps(
table: CatalogTable,
schema: StructType): mutable.Map[String, String] = {
val partitionColumns = table.partitionColumnNames
val bucketSpec = table.bucketSpec
val properties = new mutable.HashMap[String, String]
properties.put(CREATED_SPARK_VERSION, table.createVersion)
// Serialized JSON schema string may be too long to be stored into a single metastore table
// property. In this case, we split the JSON string and store each part as a separate table
// property.
val threshold = conf.get(SCHEMA_STRING_LENGTH_THRESHOLD)
val schemaJsonString = schema.json
// Split the JSON string.
val parts = schemaJsonString.grouped(threshold).toSeq
properties.put(DATASOURCE_SCHEMA_NUMPARTS, parts.size.toString)
parts.zipWithIndex.foreach { case (part, index) =>
properties.put(s"$DATASOURCE_SCHEMA_PART_PREFIX$index", part)
}
if (partitionColumns.nonEmpty) {
properties.put(DATASOURCE_SCHEMA_NUMPARTCOLS, partitionColumns.length.toString)
partitionColumns.zipWithIndex.foreach { case (partCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_PARTCOL_PREFIX$index", partCol)
}
}
if (bucketSpec.isDefined) {
val BucketSpec(numBuckets, bucketColumnNames, sortColumnNames) = bucketSpec.get
properties.put(DATASOURCE_SCHEMA_NUMBUCKETS, numBuckets.toString)
properties.put(DATASOURCE_SCHEMA_NUMBUCKETCOLS, bucketColumnNames.length.toString)
bucketColumnNames.zipWithIndex.foreach { case (bucketCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_BUCKETCOL_PREFIX$index", bucketCol)
}
if (sortColumnNames.nonEmpty) {
properties.put(DATASOURCE_SCHEMA_NUMSORTCOLS, sortColumnNames.length.toString)
sortColumnNames.zipWithIndex.foreach { case (sortCol, index) =>
properties.put(s"$DATASOURCE_SCHEMA_SORTCOL_PREFIX$index", sortCol)
}
}
}
properties
}
private def defaultTablePath(tableIdent: TableIdentifier): String = {
val dbLocation = getDatabase(tableIdent.database.get).locationUri
new Path(new Path(dbLocation), tableIdent.table).toString
}
private def saveTableIntoHive(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit = {
assert(DDLUtils.isDatasourceTable(tableDefinition),
"saveTableIntoHive only takes data source table.")
// If this is an external data source table...
if (tableDefinition.tableType == EXTERNAL &&
// ... that is not persisted as Hive compatible format (external tables in Hive compatible
// format always set `locationUri` to the actual data location and should NOT be hacked as
// following.)
tableDefinition.storage.locationUri.isEmpty) {
// !! HACK ALERT !!
//
// Due to a restriction of Hive metastore, here we have to set `locationUri` to a temporary
// directory that doesn't exist yet but can definitely be successfully created, and then
// delete it right after creating the external data source table. This location will be
// persisted to Hive metastore as standard Hive table location URI, but Spark SQL doesn't
// really use it. Also, since we only do this workaround for external tables, deleting the
// directory after the fact doesn't do any harm.
//
// Please refer to https://issues.apache.org/jira/browse/SPARK-15269 for more details.
val tempPath = {
val dbLocation = new Path(getDatabase(tableDefinition.database).locationUri)
new Path(dbLocation, tableDefinition.identifier.table + "-__PLACEHOLDER__")
}
try {
client.createTable(
tableDefinition.withNewStorage(locationUri = Some(tempPath.toUri)),
ignoreIfExists)
} finally {
FileSystem.get(tempPath.toUri, hadoopConf).delete(tempPath, true)
}
} else {
client.createTable(tableDefinition, ignoreIfExists)
}
}
override def dropTable(
db: String,
table: String,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = withClient {
requireDbExists(db)
client.dropTable(db, table, ignoreIfNotExists, purge)
}
override def renameTable(
db: String,
oldName: String,
newName: String): Unit = withClient {
val rawTable = getRawTable(db, oldName)
// Note that Hive serde tables don't use path option in storage properties to store the value
// of table location, but use `locationUri` field to store it directly. And `locationUri` field
// will be updated automatically in Hive metastore by the `alterTable` call at the end of this
// method. Here we only update the path option if the path option already exists in storage
// properties, to avoid adding a unnecessary path option for Hive serde tables.
val hasPathOption = CaseInsensitiveMap(rawTable.storage.properties).contains("path")
val storageWithNewPath = if (rawTable.tableType == MANAGED && hasPathOption) {
// If it's a managed table with path option and we are renaming it, then the path option
// becomes inaccurate and we need to update it according to the new table name.
val newTablePath = defaultTablePath(TableIdentifier(newName, Some(db)))
updateLocationInStorageProps(rawTable, Some(newTablePath))
} else {
rawTable.storage
}
val newTable = rawTable.copy(
identifier = TableIdentifier(newName, Some(db)),
storage = storageWithNewPath)
client.alterTable(db, oldName, newTable)
}
private def getLocationFromStorageProps(table: CatalogTable): Option[String] = {
CaseInsensitiveMap(table.storage.properties).get("path")
}
private def updateLocationInStorageProps(
table: CatalogTable,
newPath: Option[String]): CatalogStorageFormat = {
// We can't use `filterKeys` here, as the map returned by `filterKeys` is not serializable,
// while `CatalogTable` should be serializable.
val propsWithoutPath = table.storage.properties.filter {
case (k, v) => k.toLowerCase(Locale.ROOT) != "path"
}
table.storage.copy(properties = propsWithoutPath ++ newPath.map("path" -> _))
}
/**
* Alter a table whose name that matches the one specified in `tableDefinition`,
* assuming the table exists. This method does not change the properties for data source and
* statistics.
*
* Note: As of now, this doesn't support altering table schema, partition column names and bucket
* specification. We will ignore them even if users do specify different values for these fields.
*/
override def alterTable(tableDefinition: CatalogTable): Unit = withClient {
assert(tableDefinition.identifier.database.isDefined)
val db = tableDefinition.identifier.database.get
requireTableExists(db, tableDefinition.identifier.table)
verifyTableProperties(tableDefinition)
if (tableDefinition.tableType == VIEW) {
client.alterTable(tableDefinition)
} else {
val oldTableDef = getRawTable(db, tableDefinition.identifier.table)
val newStorage = if (DDLUtils.isHiveTable(tableDefinition)) {
tableDefinition.storage
} else {
// We can't alter the table storage of data source table directly for 2 reasons:
// 1. internally we use path option in storage properties to store the value of table
// location, but the given `tableDefinition` is from outside and doesn't have the path
// option, we need to add it manually.
// 2. this data source table may be created on a file, not a directory, then we can't set
// the `locationUri` field and save it to Hive metastore, because Hive only allows
// directory as table location.
//
// For example, an external data source table is created with a single file '/path/to/file'.
// Internally, we will add a path option with value '/path/to/file' to storage properties,
// and set the `locationUri` to a special value due to SPARK-15269(please see
// `saveTableIntoHive` for more details). When users try to get the table metadata back, we
// will restore the `locationUri` field from the path option and remove the path option from
// storage properties. When users try to alter the table storage, the given
// `tableDefinition` will have `locationUri` field with value `/path/to/file` and the path
// option is not set.
//
// Here we need 2 extra steps:
// 1. add path option to storage properties, to match the internal format, i.e. using path
// option to store the value of table location.
// 2. set the `locationUri` field back to the old one from the existing table metadata,
// if users don't want to alter the table location. This step is necessary as the
// `locationUri` is not always same with the path option, e.g. in the above example
// `locationUri` is a special value and we should respect it. Note that, if users
// want to alter the table location to a file path, we will fail. This should be fixed
// in the future.
val newLocation = tableDefinition.storage.locationUri.map(CatalogUtils.URIToString(_))
val storageWithPathOption = tableDefinition.storage.copy(
properties = tableDefinition.storage.properties ++ newLocation.map("path" -> _))
val oldLocation = getLocationFromStorageProps(oldTableDef)
if (oldLocation == newLocation) {
storageWithPathOption.copy(locationUri = oldTableDef.storage.locationUri)
} else {
storageWithPathOption
}
}
val partitionProviderProp = if (tableDefinition.tracksPartitionsInCatalog) {
TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_CATALOG
} else {
TABLE_PARTITION_PROVIDER -> TABLE_PARTITION_PROVIDER_FILESYSTEM
}
// Add old data source properties to table properties, to retain the data source table format.
// Add old stats properties to table properties, to retain spark's stats.
// Set the `schema`, `partitionColumnNames` and `bucketSpec` from the old table definition,
// to retain the spark specific format if it is.
val propsFromOldTable = oldTableDef.properties.filter { case (k, v) =>
k.startsWith(DATASOURCE_PREFIX) || k.startsWith(STATISTICS_PREFIX) ||
k.startsWith(CREATED_SPARK_VERSION)
}
val newTableProps = propsFromOldTable ++ tableDefinition.properties + partitionProviderProp
val newDef = tableDefinition.copy(
storage = newStorage,
schema = oldTableDef.schema,
partitionColumnNames = oldTableDef.partitionColumnNames,
bucketSpec = oldTableDef.bucketSpec,
properties = newTableProps)
client.alterTable(newDef)
}
}
/**
* Alter the data schema of a table identified by the provided database and table name. The new
* data schema should not have conflict column names with the existing partition columns, and
* should still contain all the existing data columns.
*/
override def alterTableDataSchema(
db: String,
table: String,
newDataSchema: StructType): Unit = withClient {
requireTableExists(db, table)
val oldTable = getTable(db, table)
verifyDataSchema(oldTable.identifier, oldTable.tableType, newDataSchema)
val schemaProps =
tableMetaToTableProps(oldTable, StructType(newDataSchema ++ oldTable.partitionSchema)).toMap
if (isDatasourceTable(oldTable)) {
// For data source tables, first try to write it with the schema set; if that does not work,
// try again with updated properties and the partition schema. This is a simplified version of
// what createDataSourceTable() does, and may leave the table in a state unreadable by Hive
// (for example, the schema does not match the data source schema, or does not match the
// storage descriptor).
try {
client.alterTableDataSchema(db, table, newDataSchema, schemaProps)
} catch {
case NonFatal(e) =>
val warningMessage =
s"Could not alter schema of table ${oldTable.identifier.quotedString} in a Hive " +
"compatible way. Updating Hive metastore in Spark SQL specific format."
logWarning(warningMessage, e)
client.alterTableDataSchema(db, table, EMPTY_DATA_SCHEMA, schemaProps)
}
} else {
client.alterTableDataSchema(db, table, newDataSchema, schemaProps)
}
}
/** Alter the statistics of a table. If `stats` is None, then remove all existing statistics. */
override def alterTableStats(
db: String,
table: String,
stats: Option[CatalogStatistics]): Unit = withClient {
requireTableExists(db, table)
val rawTable = getRawTable(db, table)
// convert table statistics to properties so that we can persist them through hive client
val statsProperties =
if (stats.isDefined) {
statsToProperties(stats.get)
} else {
new mutable.HashMap[String, String]()
}
val oldTableNonStatsProps = rawTable.properties.filterNot(_._1.startsWith(STATISTICS_PREFIX))
val updatedTable = rawTable.copy(properties = oldTableNonStatsProps ++ statsProperties)
client.alterTable(updatedTable)
}
override def getTable(db: String, table: String): CatalogTable = withClient {
restoreTableMetadata(getRawTable(db, table))
}
/**
* Restores table metadata from the table properties. This method is kind of a opposite version
* of [[createTable]].
*
* It reads table schema, provider, partition column names and bucket specification from table
* properties, and filter out these special entries from table properties.
*/
private def restoreTableMetadata(inputTable: CatalogTable): CatalogTable = {
if (conf.get(DEBUG_MODE)) {
return inputTable
}
var table = inputTable
table.properties.get(DATASOURCE_PROVIDER) match {
case None if table.tableType == VIEW =>
// If this is a view created by Spark 2.2 or higher versions, we should restore its schema
// from table properties.
if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) {
table = table.copy(schema = getSchemaFromTableProperties(table))
}
// No provider in table properties, which means this is a Hive serde table.
case None =>
table = restoreHiveSerdeTable(table)
// This is a regular data source table.
case Some(provider) =>
table = restoreDataSourceTable(table, provider)
}
// Restore version info
val version: String = table.properties.getOrElse(CREATED_SPARK_VERSION, "2.2 or prior")
// Restore Spark's statistics from information in Metastore.
val restoredStats =
statsFromProperties(table.properties, table.identifier.table, table.schema)
if (restoredStats.isDefined) {
table = table.copy(stats = restoredStats)
}
// Get the original table properties as defined by the user.
table.copy(
createVersion = version,
properties = table.properties.filterNot { case (key, _) => key.startsWith(SPARK_SQL_PREFIX) })
}
// Reorder table schema to put partition columns at the end. Before Spark 2.2, the partition
// columns are not put at the end of schema. We need to reorder it when reading the schema
// from the table properties.
private def reorderSchema(schema: StructType, partColumnNames: Seq[String]): StructType = {
val partitionFields = partColumnNames.map { partCol =>
schema.find(_.name == partCol).getOrElse {
throw new AnalysisException("The metadata is corrupted. Unable to find the " +
s"partition column names from the schema. schema: ${schema.catalogString}. " +
s"Partition columns: ${partColumnNames.mkString("[", ", ", "]")}")
}
}
StructType(schema.filterNot(partitionFields.contains) ++ partitionFields)
}
private def restoreHiveSerdeTable(table: CatalogTable): CatalogTable = {
val options = new SourceOptions(table.storage.properties)
val hiveTable = table.copy(
provider = Some(DDLUtils.HIVE_PROVIDER),
tracksPartitionsInCatalog = true)
// If this is a Hive serde table created by Spark 2.1 or higher versions, we should restore its
// schema from table properties.
if (table.properties.contains(DATASOURCE_SCHEMA_NUMPARTS)) {
val schemaFromTableProps = getSchemaFromTableProperties(table)
val partColumnNames = getPartitionColumnsFromTableProperties(table)
val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames)
if (DataType.equalsIgnoreCaseAndNullability(reorderedSchema, table.schema) ||
options.respectSparkSchema) {
hiveTable.copy(
schema = reorderedSchema,
partitionColumnNames = partColumnNames,
bucketSpec = getBucketSpecFromTableProperties(table))
} else {
// Hive metastore may change the table schema, e.g. schema inference. If the table
// schema we read back is different(ignore case and nullability) from the one in table
// properties which was written when creating table, we should respect the table schema
// from hive.
logWarning(s"The table schema given by Hive metastore(${table.schema.catalogString}) is " +
"different from the schema when this table was created by Spark SQL" +
s"(${schemaFromTableProps.catalogString}). We have to fall back to the table schema " +
"from Hive metastore which is not case preserving.")
hiveTable.copy(schemaPreservesCase = false)
}
} else {
hiveTable.copy(schemaPreservesCase = false)
}
}
private def restoreDataSourceTable(table: CatalogTable, provider: String): CatalogTable = {
// Internally we store the table location in storage properties with key "path" for data
// source tables. Here we set the table location to `locationUri` field and filter out the
// path option in storage properties, to avoid exposing this concept externally.
val storageWithLocation = {
val tableLocation = getLocationFromStorageProps(table)
// We pass None as `newPath` here, to remove the path option in storage properties.
updateLocationInStorageProps(table, newPath = None).copy(
locationUri = tableLocation.map(CatalogUtils.stringToURI(_)))
}
val storageWithoutHiveGeneratedProperties = storageWithLocation.copy(
properties = storageWithLocation.properties.filterKeys(!HIVE_GENERATED_STORAGE_PROPERTIES(_)))
val partitionProvider = table.properties.get(TABLE_PARTITION_PROVIDER)
val schemaFromTableProps = getSchemaFromTableProperties(table)
val partColumnNames = getPartitionColumnsFromTableProperties(table)
val reorderedSchema = reorderSchema(schema = schemaFromTableProps, partColumnNames)
table.copy(
provider = Some(provider),
storage = storageWithoutHiveGeneratedProperties,
schema = reorderedSchema,
partitionColumnNames = partColumnNames,
bucketSpec = getBucketSpecFromTableProperties(table),
tracksPartitionsInCatalog = partitionProvider == Some(TABLE_PARTITION_PROVIDER_CATALOG),
properties = table.properties.filterKeys(!HIVE_GENERATED_TABLE_PROPERTIES(_)))
}
override def tableExists(db: String, table: String): Boolean = withClient {
client.tableExists(db, table)
}
override def listTables(db: String): Seq[String] = withClient {
requireDbExists(db)
client.listTables(db)
}
override def listTables(db: String, pattern: String): Seq[String] = withClient {
requireDbExists(db)
client.listTables(db, pattern)
}
override def loadTable(
db: String,
table: String,
loadPath: String,
isOverwrite: Boolean,
isSrcLocal: Boolean): Unit = withClient {
requireTableExists(db, table)
client.loadTable(
loadPath,
s"$db.$table",
isOverwrite,
isSrcLocal)
}
override def loadPartition(
db: String,
table: String,
loadPath: String,
partition: TablePartitionSpec,
isOverwrite: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = withClient {
requireTableExists(db, table)
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
getTable(db, table).partitionColumnNames.foreach { colName =>
// Hive metastore is not case preserving and keeps partition columns with lower cased names,
// and Hive will validate the column names in partition spec to make sure they are partition
// columns. Here we Lowercase the column names before passing the partition spec to Hive
// client, to satisfy Hive.
// scalastyle:off caselocale
orderedPartitionSpec.put(colName.toLowerCase, partition(colName))
// scalastyle:on caselocale
}
client.loadPartition(
loadPath,
db,
table,
orderedPartitionSpec,
isOverwrite,
inheritTableSpecs,
isSrcLocal)
}
override def loadDynamicPartitions(
db: String,
table: String,
loadPath: String,
partition: TablePartitionSpec,
replace: Boolean,
numDP: Int): Unit = withClient {
requireTableExists(db, table)
val orderedPartitionSpec = new util.LinkedHashMap[String, String]()
getTable(db, table).partitionColumnNames.foreach { colName =>
// Hive metastore is not case preserving and keeps partition columns with lower cased names,
// and Hive will validate the column names in partition spec to make sure they are partition
// columns. Here we Lowercase the column names before passing the partition spec to Hive
// client, to satisfy Hive.
// scalastyle:off caselocale
orderedPartitionSpec.put(colName.toLowerCase, partition(colName))
// scalastyle:on caselocale
}
client.loadDynamicPartitions(
loadPath,
db,
table,
orderedPartitionSpec,
replace,
numDP)
}
// --------------------------------------------------------------------------
// Partitions
// --------------------------------------------------------------------------
// Hive metastore is not case preserving and the partition columns are always lower cased. We need
// to lower case the column names in partition specification before calling partition related Hive
// APIs, to match this behaviour.
private def lowerCasePartitionSpec(spec: TablePartitionSpec): TablePartitionSpec = {
// scalastyle:off caselocale
spec.map { case (k, v) => k.toLowerCase -> v }
// scalastyle:on caselocale
}
// Build a map from lower-cased partition column names to exact column names for a given table
private def buildLowerCasePartColNameMap(table: CatalogTable): Map[String, String] = {
val actualPartColNames = table.partitionColumnNames
// scalastyle:off caselocale
actualPartColNames.map(colName => (colName.toLowerCase, colName)).toMap
// scalastyle:on caselocale
}
// Hive metastore is not case preserving and the column names of the partition specification we
// get from the metastore are always lower cased. We should restore them w.r.t. the actual table
// partition columns.
private def restorePartitionSpec(
spec: TablePartitionSpec,
partColMap: Map[String, String]): TablePartitionSpec = {
// scalastyle:off caselocale
spec.map { case (k, v) => partColMap(k.toLowerCase) -> v }
// scalastyle:on caselocale
}
private def restorePartitionSpec(
spec: TablePartitionSpec,
partCols: Seq[String]): TablePartitionSpec = {
spec.map { case (k, v) => partCols.find(_.equalsIgnoreCase(k)).get -> v }
}
override def createPartitions(
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = withClient {
requireTableExists(db, table)
val tableMeta = getTable(db, table)
val partitionColumnNames = tableMeta.partitionColumnNames
val tablePath = new Path(tableMeta.location)
val partsWithLocation = parts.map { p =>
// Ideally we can leave the partition location empty and let Hive metastore to set it.
// However, Hive metastore is not case preserving and will generate wrong partition location
// with lower cased partition column names. Here we set the default partition location
// manually to avoid this problem.
val partitionPath = p.storage.locationUri.map(uri => new Path(uri)).getOrElse {
ExternalCatalogUtils.generatePartitionPath(p.spec, partitionColumnNames, tablePath)
}
p.copy(storage = p.storage.copy(locationUri = Some(partitionPath.toUri)))
}
val lowerCasedParts = partsWithLocation.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec)))
client.createPartitions(db, table, lowerCasedParts, ignoreIfExists)
}
override def dropPartitions(
db: String,
table: String,
parts: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = withClient {
requireTableExists(db, table)
client.dropPartitions(
db, table, parts.map(lowerCasePartitionSpec), ignoreIfNotExists, purge, retainData)
}
override def renamePartitions(
db: String,
table: String,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = withClient {
client.renamePartitions(
db, table, specs.map(lowerCasePartitionSpec), newSpecs.map(lowerCasePartitionSpec))
val tableMeta = getTable(db, table)
val partitionColumnNames = tableMeta.partitionColumnNames
// Hive metastore is not case preserving and keeps partition columns with lower cased names.
// When Hive rename partition for managed tables, it will create the partition location with
// a default path generate by the new spec with lower cased partition column names. This is
// unexpected and we need to rename them manually and alter the partition location.
// scalastyle:off caselocale
val hasUpperCasePartitionColumn = partitionColumnNames.exists(col => col.toLowerCase != col)
// scalastyle:on caselocale
if (tableMeta.tableType == MANAGED && hasUpperCasePartitionColumn) {
val tablePath = new Path(tableMeta.location)
val fs = tablePath.getFileSystem(hadoopConf)
val newParts = newSpecs.map { spec =>
val rightPath = renamePartitionDirectory(fs, tablePath, partitionColumnNames, spec)
val partition = client.getPartition(db, table, lowerCasePartitionSpec(spec))
partition.copy(storage = partition.storage.copy(locationUri = Some(rightPath.toUri)))
}
alterPartitions(db, table, newParts)
}
}
/**
* Rename the partition directory w.r.t. the actual partition columns.
*
* It will recursively rename the partition directory from the first partition column, to be most
* compatible with different file systems. e.g. in some file systems, renaming `a=1/b=2` to
* `A=1/B=2` will result to `a=1/B=2`, while in some other file systems, the renaming works, but
* will leave an empty directory `a=1`.
*/
private def renamePartitionDirectory(
fs: FileSystem,
tablePath: Path,
partCols: Seq[String],
newSpec: TablePartitionSpec): Path = {
import ExternalCatalogUtils.getPartitionPathString
var currentFullPath = tablePath
partCols.foreach { col =>
val partValue = newSpec(col)
val expectedPartitionString = getPartitionPathString(col, partValue)
val expectedPartitionPath = new Path(currentFullPath, expectedPartitionString)
if (fs.exists(expectedPartitionPath)) {
// It is possible that some parental partition directories already exist or doesn't need to
// be renamed. e.g. the partition columns are `a` and `B`, then we don't need to rename
// `/table_path/a=1`. Or we already have a partition directory `A=1/B=2`, and we rename
// another partition to `A=1/B=3`, then we will have `A=1/B=2` and `a=1/b=3`, and we should
// just move `a=1/b=3` into `A=1` with new name `B=3`.
} else {
// scalastyle:off caselocale
val actualPartitionString = getPartitionPathString(col.toLowerCase, partValue)
// scalastyle:on caselocale
val actualPartitionPath = new Path(currentFullPath, actualPartitionString)
try {
fs.rename(actualPartitionPath, expectedPartitionPath)
} catch {
case e: IOException =>
throw new SparkException("Unable to rename partition path from " +
s"$actualPartitionPath to $expectedPartitionPath", e)
}
}
currentFullPath = expectedPartitionPath
}
currentFullPath
}
private def statsToProperties(stats: CatalogStatistics): Map[String, String] = {
val statsProperties = new mutable.HashMap[String, String]()
statsProperties += STATISTICS_TOTAL_SIZE -> stats.sizeInBytes.toString()
if (stats.rowCount.isDefined) {
statsProperties += STATISTICS_NUM_ROWS -> stats.rowCount.get.toString()
}
stats.colStats.foreach { case (colName, colStat) =>
colStat.toMap(colName).foreach { case (k, v) =>
// Fully qualified name used in table properties for a particular column stat.
// For example, for column "mycol", and "min" stat, this should return
// "spark.sql.statistics.colStats.mycol.min".
statsProperties += (STATISTICS_COL_STATS_PREFIX + k -> v)
}
}
statsProperties.toMap
}
private def statsFromProperties(
properties: Map[String, String],
table: String,
schema: StructType): Option[CatalogStatistics] = {
val statsProps = properties.filterKeys(_.startsWith(STATISTICS_PREFIX))
if (statsProps.isEmpty) {
None
} else {
val colStats = new mutable.HashMap[String, CatalogColumnStat]
val colStatsProps = properties.filterKeys(_.startsWith(STATISTICS_COL_STATS_PREFIX)).map {
case (k, v) => k.drop(STATISTICS_COL_STATS_PREFIX.length) -> v
}
// Find all the column names by matching the KEY_VERSION properties for them.
colStatsProps.keys.filter {
k => k.endsWith(CatalogColumnStat.KEY_VERSION)
}.map { k =>
k.dropRight(CatalogColumnStat.KEY_VERSION.length + 1)
}.foreach { fieldName =>
// and for each, create a column stat.
CatalogColumnStat.fromMap(table, fieldName, colStatsProps).foreach { cs =>
colStats += fieldName -> cs
}
}
Some(CatalogStatistics(
sizeInBytes = BigInt(statsProps(STATISTICS_TOTAL_SIZE)),
rowCount = statsProps.get(STATISTICS_NUM_ROWS).map(BigInt(_)),
colStats = colStats.toMap))
}
}
override def alterPartitions(
db: String,
table: String,
newParts: Seq[CatalogTablePartition]): Unit = withClient {
val lowerCasedParts = newParts.map(p => p.copy(spec = lowerCasePartitionSpec(p.spec)))
val rawTable = getRawTable(db, table)
// convert partition statistics to properties so that we can persist them through hive api
val withStatsProps = lowerCasedParts.map { p =>
if (p.stats.isDefined) {
val statsProperties = statsToProperties(p.stats.get)
p.copy(parameters = p.parameters ++ statsProperties)
} else {
p
}
}
client.alterPartitions(db, table, withStatsProps)
}
override def getPartition(
db: String,
table: String,
spec: TablePartitionSpec): CatalogTablePartition = withClient {
val part = client.getPartition(db, table, lowerCasePartitionSpec(spec))
restorePartitionMetadata(part, getTable(db, table))
}
/**
* Restores partition metadata from the partition properties.
*
* Reads partition-level statistics from partition properties, puts these
* into [[CatalogTablePartition#stats]] and removes these special entries
* from the partition properties.
*/
private def restorePartitionMetadata(
partition: CatalogTablePartition,
table: CatalogTable): CatalogTablePartition = {
val restoredSpec = restorePartitionSpec(partition.spec, table.partitionColumnNames)
// Restore Spark's statistics from information in Metastore.
// Note: partition-level statistics were introduced in 2.3.
val restoredStats =
statsFromProperties(partition.parameters, table.identifier.table, table.schema)
if (restoredStats.isDefined) {
partition.copy(
spec = restoredSpec,
stats = restoredStats,
parameters = partition.parameters.filterNot {
case (key, _) => key.startsWith(SPARK_SQL_PREFIX) })
} else {
partition.copy(spec = restoredSpec)
}
}
/**
* Returns the specified partition or None if it does not exist.
*/
override def getPartitionOption(
db: String,
table: String,
spec: TablePartitionSpec): Option[CatalogTablePartition] = withClient {
client.getPartitionOption(db, table, lowerCasePartitionSpec(spec)).map { part =>
restorePartitionMetadata(part, getTable(db, table))
}
}
/**
* Returns the partition names from hive metastore for a given table in a database.
*/
override def listPartitionNames(
db: String,
table: String,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = withClient {
val catalogTable = getTable(db, table)
val partColNameMap = buildLowerCasePartColNameMap(catalogTable).mapValues(escapePathName)
val clientPartitionNames =
client.getPartitionNames(catalogTable, partialSpec.map(lowerCasePartitionSpec))
clientPartitionNames.map { partitionPath =>
val partSpec = PartitioningUtils.parsePathFragmentAsSeq(partitionPath)
partSpec.map { case (partName, partValue) =>
// scalastyle:off caselocale
partColNameMap(partName.toLowerCase) + "=" + escapePathName(partValue)
// scalastyle:on caselocale
}.mkString("/")
}
}
/**
* Returns the partitions from hive metastore for a given table in a database.
*/
override def listPartitions(
db: String,
table: String,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = withClient {
val partColNameMap = buildLowerCasePartColNameMap(getTable(db, table))
val res = client.getPartitions(db, table, partialSpec.map(lowerCasePartitionSpec)).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
partialSpec match {
// This might be a bug of Hive: When the partition value inside the partial partition spec
// contains dot, and we ask Hive to list partitions w.r.t. the partial partition spec, Hive
// treats dot as matching any single character and may return more partitions than we
// expected. Here we do an extra filter to drop unexpected partitions.
case Some(spec) if spec.exists(_._2.contains(".")) =>
res.filter(p => isPartialPartitionSpec(spec, p.spec))
case _ => res
}
}
override def listPartitionsByFilter(
db: String,
table: String,
predicates: Seq[Expression],
defaultTimeZoneId: String): Seq[CatalogTablePartition] = withClient {
val rawTable = getRawTable(db, table)
val catalogTable = restoreTableMetadata(rawTable)
val partColNameMap = buildLowerCasePartColNameMap(catalogTable)
val clientPrunedPartitions =
client.getPartitionsByFilter(rawTable, predicates).map { part =>
part.copy(spec = restorePartitionSpec(part.spec, partColNameMap))
}
prunePartitionsByFilter(catalogTable, clientPrunedPartitions, predicates, defaultTimeZoneId)
}
// --------------------------------------------------------------------------
// Functions
// --------------------------------------------------------------------------
override def createFunction(
db: String,
funcDefinition: CatalogFunction): Unit = withClient {
requireDbExists(db)
// Hive's metastore is case insensitive. However, Hive's createFunction does
// not normalize the function name (unlike the getFunction part). So,
// we are normalizing the function name.
val functionName = funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT)
requireFunctionNotExists(db, functionName)
val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName)
client.createFunction(db, funcDefinition.copy(identifier = functionIdentifier))
}
override def dropFunction(db: String, name: String): Unit = withClient {
requireFunctionExists(db, name)
client.dropFunction(db, name)
}
override def alterFunction(
db: String, funcDefinition: CatalogFunction): Unit = withClient {
requireDbExists(db)
val functionName = funcDefinition.identifier.funcName.toLowerCase(Locale.ROOT)
requireFunctionExists(db, functionName)
val functionIdentifier = funcDefinition.identifier.copy(funcName = functionName)
client.alterFunction(db, funcDefinition.copy(identifier = functionIdentifier))
}
override def renameFunction(
db: String,
oldName: String,
newName: String): Unit = withClient {
requireFunctionExists(db, oldName)
requireFunctionNotExists(db, newName)
client.renameFunction(db, oldName, newName)
}
override def getFunction(db: String, funcName: String): CatalogFunction = withClient {
requireFunctionExists(db, funcName)
client.getFunction(db, funcName)
}
override def functionExists(db: String, funcName: String): Boolean = withClient {
requireDbExists(db)
client.functionExists(db, funcName)
}
override def listFunctions(db: String, pattern: String): Seq[String] = withClient {
requireDbExists(db)
client.listFunctions(db, pattern)
}
}
object HiveExternalCatalog {
val SPARK_SQL_PREFIX = "spark.sql."
val DATASOURCE_PREFIX = SPARK_SQL_PREFIX + "sources."
val DATASOURCE_PROVIDER = DATASOURCE_PREFIX + "provider"
val DATASOURCE_SCHEMA = DATASOURCE_PREFIX + "schema"
val DATASOURCE_SCHEMA_PREFIX = DATASOURCE_SCHEMA + "."
val DATASOURCE_SCHEMA_NUMPARTS = DATASOURCE_SCHEMA_PREFIX + "numParts"
val DATASOURCE_SCHEMA_NUMPARTCOLS = DATASOURCE_SCHEMA_PREFIX + "numPartCols"
val DATASOURCE_SCHEMA_NUMSORTCOLS = DATASOURCE_SCHEMA_PREFIX + "numSortCols"
val DATASOURCE_SCHEMA_NUMBUCKETS = DATASOURCE_SCHEMA_PREFIX + "numBuckets"
val DATASOURCE_SCHEMA_NUMBUCKETCOLS = DATASOURCE_SCHEMA_PREFIX + "numBucketCols"
val DATASOURCE_SCHEMA_PART_PREFIX = DATASOURCE_SCHEMA_PREFIX + "part."
val DATASOURCE_SCHEMA_PARTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "partCol."
val DATASOURCE_SCHEMA_BUCKETCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "bucketCol."
val DATASOURCE_SCHEMA_SORTCOL_PREFIX = DATASOURCE_SCHEMA_PREFIX + "sortCol."
val STATISTICS_PREFIX = SPARK_SQL_PREFIX + "statistics."
val STATISTICS_TOTAL_SIZE = STATISTICS_PREFIX + "totalSize"
val STATISTICS_NUM_ROWS = STATISTICS_PREFIX + "numRows"
val STATISTICS_COL_STATS_PREFIX = STATISTICS_PREFIX + "colStats."
val TABLE_PARTITION_PROVIDER = SPARK_SQL_PREFIX + "partitionProvider"
val TABLE_PARTITION_PROVIDER_CATALOG = "catalog"
val TABLE_PARTITION_PROVIDER_FILESYSTEM = "filesystem"
val CREATED_SPARK_VERSION = SPARK_SQL_PREFIX + "create.version"
val HIVE_GENERATED_TABLE_PROPERTIES = Set(DDL_TIME)
val HIVE_GENERATED_STORAGE_PROPERTIES = Set(SERIALIZATION_FORMAT)
// When storing data source tables in hive metastore, we need to set data schema to empty if the
// schema is hive-incompatible. However we need a hack to preserve existing behavior. Before
// Spark 2.0, we do not set a default serde here (this was done in Hive), and so if the user
// provides an empty schema Hive would automatically populate the schema with a single field
// "col". However, after SPARK-14388, we set the default serde to LazySimpleSerde so this
// implicit behavior no longer happens. Therefore, we need to do it in Spark ourselves.
val EMPTY_DATA_SCHEMA = new StructType()
.add("col", "array<string>", nullable = true, comment = "from deserializer")
// A persisted data source table always store its schema in the catalog.
private def getSchemaFromTableProperties(metadata: CatalogTable): StructType = {
val errorMessage = "Could not read schema from the hive metastore because it is corrupted."
val props = metadata.properties
val schema = props.get(DATASOURCE_SCHEMA)
if (schema.isDefined) {
// Originally, we used `spark.sql.sources.schema` to store the schema of a data source table.
// After SPARK-6024, we removed this flag.
// Although we are not using `spark.sql.sources.schema` any more, we need to still support.
DataType.fromJson(schema.get).asInstanceOf[StructType]
} else if (props.filterKeys(_.startsWith(DATASOURCE_SCHEMA_PREFIX)).isEmpty) {
// If there is no schema information in table properties, it means the schema of this table
// was empty when saving into metastore, which is possible in older version(prior to 2.1) of
// Spark. We should respect it.
new StructType()
} else {
val numSchemaParts = props.get(DATASOURCE_SCHEMA_NUMPARTS)
if (numSchemaParts.isDefined) {
val parts = (0 until numSchemaParts.get.toInt).map { index =>
val part = metadata.properties.get(s"$DATASOURCE_SCHEMA_PART_PREFIX$index").orNull
if (part == null) {
throw new AnalysisException(errorMessage +
s" (missing part $index of the schema, ${numSchemaParts.get} parts are expected).")
}
part
}
// Stick all parts back to a single schema string.
DataType.fromJson(parts.mkString).asInstanceOf[StructType]
} else {
throw new AnalysisException(errorMessage)
}
}
}
private def getColumnNamesByType(
props: Map[String, String],
colType: String,
typeName: String): Seq[String] = {
for {
numCols <- props.get(s"spark.sql.sources.schema.num${colType.capitalize}Cols").toSeq
index <- 0 until numCols.toInt
} yield props.getOrElse(
s"$DATASOURCE_SCHEMA_PREFIX${colType}Col.$index",
throw new AnalysisException(
s"Corrupted $typeName in catalog: $numCols parts expected, but part $index is missing."
)
)
}
private def getPartitionColumnsFromTableProperties(metadata: CatalogTable): Seq[String] = {
getColumnNamesByType(metadata.properties, "part", "partitioning columns")
}
private def getBucketSpecFromTableProperties(metadata: CatalogTable): Option[BucketSpec] = {
metadata.properties.get(DATASOURCE_SCHEMA_NUMBUCKETS).map { numBuckets =>
BucketSpec(
numBuckets.toInt,
getColumnNamesByType(metadata.properties, "bucket", "bucketing columns"),
getColumnNamesByType(metadata.properties, "sort", "sorting columns"))
}
}
/**
* Detects a data source table. This checks both the table provider and the table properties,
* unlike DDLUtils which just checks the former.
*/
private[spark] def isDatasourceTable(table: CatalogTable): Boolean = {
val provider = table.provider.orElse(table.properties.get(DATASOURCE_PROVIDER))
provider.isDefined && provider != Some(DDLUtils.HIVE_PROVIDER)
}
}
| yanboliang/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala | Scala | apache-2.0 | 61,366 |
import scala.language.implicitConversions
trait Fooable[T]
object Fooable {
implicit def conjure[T]: Fooable[T] = {
println("conjure")
new Fooable[T]{}
}
}
object Test {
implicit def traversable[T, Coll[_] <: Iterable[_]](implicit
elem: Fooable[T]): Fooable[Coll[T]] = {
println("traversable")
new Fooable[Coll[T]]{}
}
def main(args: Array[String]): Unit = {
implicitly[Fooable[List[Any]]]
}
}
| scala/scala | test/files/run/t7291.scala | Scala | apache-2.0 | 430 |
package mesosphere.marathon
package core.base
/**
* Simple value container which is used to help things know if Marathon is on it's way down
*/
trait LifecycleState {
def isRunning: Boolean
}
object LifecycleState {
object WatchingJVM extends LifecycleState {
private[this] var running = true
/* Note - each shutdown hook is run in it's own thread, so this won't have to wait until some other shutdownHook
* finishes before the boolean can be set */
sys.addShutdownHook {
running = false
}
override def isRunning: Boolean = running
}
object Ignore extends LifecycleState {
override val isRunning: Boolean = true
}
}
| guenter/marathon | src/main/scala/mesosphere/marathon/core/base/LifecycleState.scala | Scala | apache-2.0 | 670 |
package mypipe.kafka
import mypipe.api.event.Mutation
import scala.reflect.runtime.universe._
import org.slf4j.LoggerFactory
import mypipe.avro.schema.GenericSchemaRepository
import org.apache.avro.Schema
import org.apache.avro.specific.SpecificRecord
import mypipe.avro.AvroVersionedRecordDeserializer
abstract class KafkaMutationAvroConsumer[InsertMutationType <: SpecificRecord, UpdateMutationType <: SpecificRecord, DeleteMutationType <: SpecificRecord, SchemaId](
topic: String,
zkConnect: String,
groupId: String,
schemaIdSizeInBytes: Int)(insertCallback: (InsertMutationType) ⇒ Boolean,
updateCallback: (UpdateMutationType) ⇒ Boolean,
deleteCallback: (DeleteMutationType) ⇒ Boolean,
implicit val insertTag: TypeTag[InsertMutationType],
implicit val updateTag: TypeTag[UpdateMutationType],
implicit val deletetag: TypeTag[DeleteMutationType])
extends KafkaConsumer(topic, zkConnect, groupId) {
// abstract fields and methods
protected val schemaRepoClient: GenericSchemaRepository[SchemaId, Schema]
protected def bytesToSchemaId(bytes: Array[Byte], offset: Int): SchemaId
protected def avroSchemaSubjectForMutationByte(byte: Byte): String
protected val logger = LoggerFactory.getLogger(getClass.getName)
protected val headerLength = PROTO_HEADER_LEN_V0 + schemaIdSizeInBytes
val insertDeserializer: AvroVersionedRecordDeserializer[InsertMutationType]
val updateDeserializer: AvroVersionedRecordDeserializer[UpdateMutationType]
val deleteDeserializer: AvroVersionedRecordDeserializer[DeleteMutationType]
override def onEvent(bytes: Array[Byte]): Boolean = try {
val magicByte = bytes(0)
if (magicByte != PROTO_MAGIC_V0) {
logger.error(s"We have encountered an unknown magic byte! Magic Byte: $magicByte")
false
} else {
val mutationType = bytes(1)
val schemaId = bytesToSchemaId(bytes, PROTO_HEADER_LEN_V0)
val continue = mutationType match {
case Mutation.InsertByte ⇒ schemaRepoClient
.getSchema(avroSchemaSubjectForMutationByte(Mutation.InsertByte), schemaId)
.map(insertDeserializer.deserialize(_, bytes, headerLength).map(m ⇒ insertCallback(m)))
.getOrElse(None)
case Mutation.UpdateByte ⇒ schemaRepoClient
.getSchema(avroSchemaSubjectForMutationByte(Mutation.UpdateByte), schemaId)
.map(updateDeserializer.deserialize(_, bytes, headerLength).map(m ⇒ updateCallback(m)))
.getOrElse(None)
case Mutation.DeleteByte ⇒ schemaRepoClient
.getSchema(avroSchemaSubjectForMutationByte(Mutation.DeleteByte), schemaId)
.map(deleteDeserializer.deserialize(_, bytes, headerLength).map(m ⇒ deleteCallback(m)))
.getOrElse(None)
}
continue.getOrElse(false)
}
} catch {
case e: Exception ⇒
log.error("Could not run callback on " + bytes.mkString + " => " + e.getMessage + "\\n" + e.getStackTraceString)
false
}
}
| Asana/mypipe | mypipe-kafka/src/main/scala/mypipe/kafka/KafkaMutationAvroConsumer.scala | Scala | apache-2.0 | 3,103 |
import org.scalatest._
import java.util.Date
class Test1 extends FlatSpec {
it should "work" in {
val start = Counter.add(13)
println(s"Starting test 1 ($start)...")
Thread.sleep(2000L)
val end = Counter.get
println(s"Test 1 done ($end)")
assert(end == start, s"Expected Counter to stay at $start, but it changed to $end")
}
} | pdalpra/sbt | sbt/src/sbt-test/tests/serial/sub1/src/test/scala/Test1.scala | Scala | bsd-3-clause | 346 |
package fpinscala.answers.streamingio
/*
* `Eq[A,B]` provides evidence that types `A` and `B` are equal.
* There is just one public constructor, `Eq.refl`, ensuring that
* we cannot construct an `Eq` instance in which the `A` and `B`
* differ.
*
* There is a version of this in the scala standard library,
* called =:=[A,B] (and usually written infix as `A =:= B`) but
* we include a version here just to show that it is not magic.
*/
case class Eq[A,B] private(to: A => B, from: B => A)
object Eq { def refl[A]: Eq[A,A] = Eq(identity, identity) }
| fpinscala/fpinscala | src/main/scala/fpinscala/answers/streamingio/Eq.scala | Scala | mit | 565 |
package com.stackstate.actor
import akka.actor.{ActorRef, ActorSystem, Props}
import com.stackstate.event._
import com.stackstate.graph.Node
import com.typesafe.config.ConfigFactory
class NodeActorTest extends ActorTestBase(ActorSystem("test-tasker", ConfigFactory.load("application.conf"))) {
private val node: Node = Node("A")
describe("A Node Actor") {
it("should propagate failure to parent if thresold is breached") {
val nodeActor: ActorRef = system.actorOf(Props(classOf[NodeActor], node))
nodeActor ! Major
expectNoMsg
nodeActor ! Critical
expectNoMsg
nodeActor ! Fatal
expectMsg(PropagationAlert(node, Warning))
}
it("should acknowledge its state when health check event is received"){
val nodeActor: ActorRef = system.actorOf(Props(classOf[NodeActor], node))
nodeActor ! Major
expectNoMsg
nodeActor ! HealthCheck
expectMsg(Ok)
}
it("should send a failure alert if severity is more than threshold when health check event is received"){
val nodeActor: ActorRef = system.actorOf(Props(classOf[NodeActor], node))
nodeActor ! Major
expectNoMsg
nodeActor ! Critical
expectNoMsg
nodeActor ! Fatal
expectMsg(PropagationAlert(node, Warning))
nodeActor ! HealthCheck
expectMsg(Alert(5,Some("Node is in critical condition, please take remedial actions")))
}
}
}
| sameerarora/dependency-graph | src/test/scala/com/stackstate/actor/NodeActorTest.scala | Scala | apache-2.0 | 1,425 |
package avrohugger
package input
package parsers
import reflectivecompilation._
import reflectivecompilation.schemagen._
import org.apache.avro.Protocol
import org.apache.avro.Schema
import org.apache.avro.Schema.Parser
import org.apache.avro.SchemaParseException
import org.apache.avro.compiler.idl.Idl
import org.apache.avro.compiler.idl.ParseException
import scala.collection.JavaConverters._
import java.nio.charset.Charset
import scala.reflect.runtime.universe._
import scala.reflect.runtime.currentMirror
import scala.tools.reflect.ToolBox
// tries schema first, then protocol, then idl, then for case class defs
class StringInputParser {
lazy val schemaParser = new Parser()
def getSchemas(inputString: String): List[Schema] = {
def trySchema(str: String) = {
try {
List(schemaParser.parse(inputString))}
catch {
case notSchema: SchemaParseException => tryProtocol(inputString)
case unknown: Throwable => sys.error("Unexpected exception: " + unknown)
}
}
def tryProtocol(protocolStr: String): List[Schema] = {
try {
Protocol.parse(protocolStr).getTypes().asScala.toList}
catch {
case notProtocol: SchemaParseException => tryIDL(inputString)
case unknown: Throwable => sys.error("Unexpected exception: " + unknown)
}
}
def tryIDL(idlString: String): List[Schema] = {
try {
val bytes = idlString.getBytes(Charset.forName("UTF-8"))
val inStream = new java.io.ByteArrayInputStream(bytes)
val idlParser = new Idl(inStream)
val protocol = idlParser.CompilationUnit()
val types = protocol.getTypes
types.asScala.toList}
catch {
case notIDL: ParseException => tryCaseClass(inputString)
case unknown: Throwable => sys.error("Unexpected exception: " + unknown)
}
}
def tryCaseClass(codeStr: String): List[Schema] = {
val compilationUnits = PackageSplitter.getCompilationUnits(codeStr)
val trees = compilationUnits.map(compUnit => Toolbox.toolBox.parse(compUnit))
val schemas = trees.flatMap(tree => TreeInputParser.parse(tree))
TypecheckDependencyStore.knownClasses.clear
schemas
}
// tries schema first, then protocol, then idl, then for case class defs
val schemas: List[Schema] = trySchema(inputString)
schemas
}
}
| ppearcy/avrohugger | avrohugger-core/src/main/scala/input/parsers/StringInputParser.scala | Scala | apache-2.0 | 2,378 |
package unfiltered.request
import org.specs._
object BasicAuthSpecJetty extends unfiltered.spec.jetty.Served with BasicAuthSpec {
def setup = { _.filter(unfiltered.filter.Planify(intent)) }
}
object BasicAuthSpecNetty extends unfiltered.spec.netty.Served with BasicAuthSpec {
def setup = { p =>
unfiltered.netty.Http(p).handler(unfiltered.netty.cycle.Planify(intent))
}
}
trait BasicAuthSpec extends unfiltered.spec.Hosted {
import unfiltered.response._
import unfiltered.request._
import unfiltered.request.{Path => UFPath}
import dispatch._
def intent[A,B]: unfiltered.Cycle.Intent[A,B] = {
case GET(UFPath("/secret") & BasicAuth(name, pass)) => (name, pass) match {
case ("test", "secret") => ResponseString("pass")
case _ => ResponseString("fail")
}
}
"Basic Auth" should {
shareVariables()
"authenticate a valid user" in {
val resp = Http(host / "secret" as_!("test", "secret") as_str)
resp must_=="pass"
}
"not authenticate an invalid user" in {
val resp = Http(host / "secret" as_!("joe", "shmo") as_str)
resp must_=="fail"
}
}
}
| softprops/Unfiltered | library/src/test/scala/BasicAuthSpec.scala | Scala | mit | 1,139 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.core.utils
import util.control.Exception._
import org.specs2.mutable.Specification
import play.utils.Threads
class ThreadsSpec extends Specification {
"Threads" should {
"restore the correct class loader" in {
"if the block returns successfully" in {
val currentCl = Thread.currentThread.getContextClassLoader
Threads.withContextClassLoader(testClassLoader) {
Thread.currentThread.getContextClassLoader must be equalTo testClassLoader
"a string"
} must be equalTo "a string"
Thread.currentThread.getContextClassLoader must be equalTo currentCl
}
"if the block throws an exception" in {
val currentCl = Thread.currentThread.getContextClassLoader
(catching(classOf[RuntimeException]) opt Threads.withContextClassLoader(testClassLoader) {
Thread.currentThread.getContextClassLoader must be equalTo testClassLoader
throw new RuntimeException("Uh oh")
}) must beNone
Thread.currentThread.getContextClassLoader must be equalTo currentCl
}
}
}
val testClassLoader = new ClassLoader() {}
}
| Shenker93/playframework | framework/src/play/src/test/scala/play/core/utils/ThreadsSpec.scala | Scala | apache-2.0 | 1,218 |
/*
Copyright (c) 2017, Qvantel
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Qvantel nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Qvantel BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.qvantel.jsonapi
import scala.annotation.compileTimeOnly
import scala.reflect.macros.whitebox.{Context => WhiteboxContext}
import mojolly.inflector.Inflector
import com.qvantel.jsonapi.JsonApiResourceMacro.{Mode, NoId, Normal}
import com.qvantel.jsonapi.macrosupport.JsonApiCommon
@compileTimeOnly("‘jsonApiResource’ is a compile-time annotation")
final class JsonApiResourceMacro(val c: WhiteboxContext) extends JsonApiCommon {
import c.universe._
private[this] def createIdentifiable(name: TypeName): c.Tree =
q"""implicit final val ${TermName(s"${name}Identifiable")}: _root_.com.qvantel.jsonapi.Identifiable[$name] = _root_.com.qvantel.jsonapi.Identifiable.by(_.id.toString)"""
private[this] def createResourceType(name: TypeName, resourceTypeName: String): c.Tree =
q"""implicit final val ${TermName(s"${name}ResourceType")}: _root_.com.qvantel.jsonapi.ResourceType[$name] = _root_.com.qvantel.jsonapi.ResourceType($resourceTypeName)"""
private[this] def createPathToId(name: TypeName, resourceTypeName: String): c.Tree = {
val path = s"/$resourceTypeName"
q"""implicit final val ${TermName(s"${name}PathTo")}: _root_.com.qvantel.jsonapi.PathToId[$name] = new _root_.com.qvantel.jsonapi.PathToId[$name] {
import _root_.io.lemonlabs.uri.typesafe.dsl._
override final val root: _root_.io.lemonlabs.uri.Url =
_root_.scala.Predef.implicitly[_root_.com.qvantel.jsonapi.ApiRoot].apiRoot match {
case _root_.scala.Some(root) => root.addPathPart($resourceTypeName)
case _root_.scala.None => $path
}
}"""
}
private[this] def createPathTo(name: TypeName, resourceTypeName: String): c.Tree = {
val path = s"/$resourceTypeName"
q"""implicit final val ${TermName(s"${name}PathTo")}: _root_.com.qvantel.jsonapi.PathTo[$name] = new _root_.com.qvantel.jsonapi.PathTo[$name] {
import _root_.io.lemonlabs.uri.typesafe.dsl._
override final val root: _root_.io.lemonlabs.uri.Url =
_root_.scala.Predef.implicitly[_root_.com.qvantel.jsonapi.ApiRoot].apiRoot match {
case _root_.scala.Some(root) => root.addPathPart($resourceTypeName)
case _root_.scala.None => $path
}
}"""
}
private[this] def createIncludes(name: TypeName): c.Tree =
q"""implicit final val ${TermName(s"${name}Includes")}: _root_.com.qvantel.jsonapi.Includes[$name] = _root_.com.qvantel.jsonapi.includes[$name]"""
private[this] def createJsonApiFormat(name: TypeName): c.Tree =
q"""implicit final val ${TermName(s"${name}JsonApiFormat")}: _root_.com.qvantel.jsonapi.JsonApiFormat[$name] = _root_.com.qvantel.jsonapi.jsonApiFormat[$name]"""
private[this] def modifiedDeclaration(classDecl: ClassDef, maybeCompanionDecl: Option[ModuleDef]): Tree = {
val className = classDecl.name
val (maybeResourceTypeName, mode) = getResourceTypeAndMode(c.prefix.tree)
val noId = mode == NoId
val resourceTypeName =
maybeResourceTypeName.getOrElse(Inflector.pluralize(NameManglers.dasherized(className.toString)))
val companionDecl = maybeCompanionDecl getOrElse q"""object ${className.toTermName} {}""" match {
case m @ q"$mods object $name extends { ..$earlydefns } with ..$parents { $self => ..$body }" =>
val defaultParts = List(createResourceType(className, resourceTypeName),
createIncludes(className),
createJsonApiFormat(className))
val withIdRequiringParts = if (noId) {
defaultParts ++ List(createPathTo(className, resourceTypeName))
} else {
defaultParts ++ List(createIdentifiable(className), createPathToId(className, resourceTypeName))
}
q"""object $name {
..$withIdRequiringParts
..$body }"""
}
val result = q"""
$classDecl
$companionDecl
"""
result
}
private[this] def getResourceTypeAndMode(tree: Tree): (Option[String], Mode) = {
def resourceTypeToOpt(res: String) =
res match {
case "normal" => None
case _ => Some(res)
}
tree match {
case q"new $name( ..$params )" =>
params match {
case resourceType :: mode :: Nil =>
(resourceTypeToOpt(c.eval[String](c.Expr(resourceType))), Mode(c.eval[String](c.Expr(mode))))
case resourceType :: Nil =>
(resourceTypeToOpt(c.eval[String](c.Expr(resourceType))), Normal)
case _ => (None, Normal)
}
case _ => (None, Normal)
}
}
def impl(annottees: c.Tree*): c.Tree = {
import c.universe._
annottees match {
case (classDecl: ClassDef) :: Nil =>
modifiedDeclaration(classDecl, None)
case (classDecl: ClassDef) :: (compDecl: ModuleDef) :: Nil =>
modifiedDeclaration(classDecl, Some(compDecl))
case _ =>
c.abort(c.enclosingPosition, "Invalid annottee")
}
}
}
object JsonApiResourceMacro {
sealed trait Mode
case object NoId extends Mode
case object Normal extends Mode
object Mode {
def apply(str: String): Mode = str.toLowerCase match {
case "normal" => Normal
case "no-id" => NoId
case _ => Normal
}
}
}
| qvantel/jsonapi-scala | core/src/main/scala/com/qvantel/jsonapi/JsonApiResourceMacro.scala | Scala | bsd-3-clause | 6,768 |
package scodec.protocols
package ip
package v6
import scodec._
import scodec.bits._
import scodec.codecs._
import scodec.codecs.literals._
import scodec.stream._
/** Simple version of an IPv6 header. Does not support extension headers. */
case class SimpleHeader(
trafficClass: Int,
flowLabel: Int,
payloadLength: Int,
protocol: Int,
hopLimit: Int,
sourceIp: Address,
destinationIp: Address
)
object SimpleHeader {
implicit val codec: Codec[SimpleHeader] = {
("version" | bin"0110") :~>:
("traffic_class" | uint8) ::
("flow_label" | uint(20)) ::
("payload_length" | uint(16)) ::
("next_header" | uint8) ::
("hop_limit" | uint8) ::
("source_address" | Codec[Address]) ::
("destination_address" | Codec[Address])
}.as[SimpleHeader]
def sdecoder(ethernetHeader: pcap.EthernetFrameHeader): StreamDecoder[SimpleHeader] =
if (ethernetHeader.ethertype == Some(pcap.EtherType.IPv6)) decode.once[SimpleHeader]
else decode.empty
}
| scodec/scodec-protocols | src/main/scala/scodec/protocols/ip/v6/SimpleHeader.scala | Scala | bsd-3-clause | 1,040 |
package net.fwbrasil.radon.transaction
import scala.collection.JavaConversions.collectionAsScalaIterable
import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scala.util.Failure
import scala.util.Success
import net.fwbrasil.radon.ref.Ref
import net.fwbrasil.radon.ref.RefContent
import net.fwbrasil.radon.util.ExclusiveThreadLocalItem
import net.fwbrasil.radon.util.Lockable.lockall
import java.util.concurrent.atomic.AtomicLong
class Transaction private[fwbrasil] (val transient: Boolean = false, transactionType: TransactionType = ReadWrite(), val shadow: Boolean = false)(implicit val context: TransactionContext)
extends TransactionValidator
with ExclusiveThreadLocalItem {
def this()(implicit context: TransactionContext) = this(false, ReadWrite())
import context._
private var refsRead: ListBuffer[Ref[Any]] = _
private var refsReadOnly: ListBuffer[Ref[Any]] = _
private var refsWrite: ListBuffer[Ref[Any]] = _
private var snapshots: List[RefSnapshot] = _
private var readLocks: List[Ref[Any]] = _
private var writeLocks: List[Ref[Any]] = _
var attachments = new ListBuffer[Any]()
def reads =
refsReadOnly
def assignments =
if (snapshots != null)
for (snapshot <- snapshots if (snapshot.isWrite == true)) yield (snapshot.ref, snapshot.value, snapshot.destroyedFlag)
else
List()
private[radon] def put[T](ref: Ref[T], value: Option[T]) = {
if (transactionType == readOnly) throw new IllegalStateException("Trying to write on a read only transaction. Ref: " + ref + " Value: " + value)
val anyRef = ref.asInstanceOf[Ref[Any]]
snapshotWrite(anyRef, value)
}
private[radon] def get[T](ref: Ref[T]): Option[T] =
snapshotRead(ref.asInstanceOf[Ref[Any]]).asInstanceOf[Option[T]]
private[radon] def getOriginalValue[T](ref: Ref[T]): Option[T] =
snapshotReadOriginalValue(ref.asInstanceOf[Ref[Any]]).asInstanceOf[Option[T]]
private[radon] def destroy[T](ref: Ref[T]): Unit = {
val anyRef = ref.asInstanceOf[Ref[Any]]
snapshotDestroy(anyRef)
}
private[radon] def isDestroyed[T](ref: Ref[T]): Boolean =
snapshotIsDestroyed(ref.asInstanceOf[Ref[Any]])
private[radon] def isDirty[T](ref: Ref[T]): Boolean = {
val snap = refsSnapshot.get(ref)
snap != null && snap.isWrite
}
def commit(): Unit =
commit(rollback = false)
def asyncCommit()(implicit ectx: ExecutionContext): Future[Unit] =
asyncCommit(rollback = false)
private def updateReadsAndWrites = {
import scala.collection.JavaConversions._
val refsRead = new ListBuffer[Ref[Any]]()
val refsReadOnly = new ListBuffer[Ref[Any]]()
val refsWrite = new ListBuffer[Ref[Any]]()
val snapshots = refsSnapshot.values.toList
for (snapshot <- snapshots) {
val ref = snapshot.ref
if (snapshot.isRead) {
refsRead += ref
if (!snapshot.isWrite)
refsReadOnly += ref
}
if (snapshot.isWrite)
refsWrite += ref
}
this.refsRead = refsRead
this.refsReadOnly = refsReadOnly
this.refsWrite = refsWrite
this.snapshots = snapshots
}
private def acquireLocks = {
val (readLockeds, readUnlockeds) = lockall(refsReadOnly, _.tryReadLock)
val (writeLockeds, writeUnlockeds) = lockall(refsWrite, _.tryWriteLock)
readLocks = readLockeds
writeLocks = writeLockeds
retryIfTrue(readUnlockeds.nonEmpty, readUnlockeds)
retryIfTrue(writeUnlockeds.nonEmpty, writeUnlockeds)
}
private def freeLocks = {
if (writeLocks != null)
writeLocks.foreach(_.writeUnlock)
if (readLocks != null)
readLocks.foreach(_.readUnlock)
writeLocks = null
readLocks = null
}
private def commit(rollback: Boolean): Unit = {
if (!rollback) transactionManager.runInTransaction(this)(beforeCommit(this))
updateReadsAndWrites
if (transactionType.validateCommit || !refsWrite.isEmpty) {
startIfNotStarted
try {
acquireLocks
if (!shadow)
validateTransaction
if (!transient && !rollback)
context.makeDurable(this)
flushTransaction
if (!rollback) afterCommit(this)
attachments.clear
} catch {
case e: Throwable =>
prepareRollback
flushTransaction
attachments.clear
throw e
}
}
}
private def asyncCommit(rollback: Boolean)(implicit ectx: ExecutionContext): Future[Unit] = {
if (!rollback) transactionManager.runInTransaction(this)(beforeCommit(this))
updateReadsAndWrites
startIfNotStarted
Future {
acquireLocks
validateTransaction
}.flatMap { _ =>
if (!transient && !rollback)
context.makeDurableAsync(this)
else
Future.successful()
}.transform(
{
_ =>
flushTransaction
if (!rollback) afterCommit(this)
attachments.clear
}, {
e =>
prepareRollback
flushTransaction
attachments.clear
throw e
})
}
private def flushTransaction = {
startIfNotStarted
stop
flushToMemory
freeLocks
clear
}
private def flushToMemory = {
val snapshotsIterator = snapshots.iterator
snapshots = List()
snapshotsIterator.foreach(setRefContent)
}
private def validateTransaction =
if (transactionType.validateCommit) {
refsReadOnly.foreach(e => {
validateContext(e)
validateConcurrentRefCreation(e)
})
if (transactionType.validateReads)
refsRead.foreach { e =>
validateRead(e)
validateDestroyed(e)
}
refsWrite.foreach(e => {
validateContext(e)
validateConcurrentRefCreation(e)
validateWrite(e)
validateDestroyed(e)
})
}
private def setRefContent(snapshot: RefSnapshot) = {
val ref = snapshot.ref
val refContent = ref.refContent
var value: Option[Any] = None
var destroyedFlag = false
if (snapshot.isWrite) {
value = snapshot.value
destroyedFlag = snapshot.destroyedFlag
} else {
value = refContent.value
destroyedFlag = refContent.destroyedFlag
}
val read =
readTimestamp(snapshot.isRead, refContent)
val write =
writeTimestamp(snapshot.isWrite, refContent)
require(((ref.creationTransactionId != transactionId || write != 0) &&
write != Long.MaxValue) || transient)
ref.setRefContent(value, read, write, destroyedFlag)
}
private def readTimestamp(isRefRead: Boolean, refContent: RefContent[_]) =
if (isRefRead && refContent.readTimestamp < startTimestamp && !shadow)
startTimestamp
else
refContent.readTimestamp
private def writeTimestamp(isRefWrite: Boolean, refContent: RefContent[_]) =
if (isRefWrite && refContent.writeTimestamp < startTimestamp && !shadow)
endTimestamp
else
refContent.writeTimestamp
def prepareRollback = {
val refsWrote =
if (refsWrite != null)
refsWrite
else
new ListBuffer[Ref[Any]]()
val refsCreated =
refsWrote.filter(_.creationTransactionId == transactionId)
clear
for (ref <- refsCreated)
destroy(ref)
updateReadsAndWrites
}
def rollback() = {
updateReadsAndWrites
prepareRollback
commit(rollback = true)
}
private[transaction] def clear = {
refsRead = null
refsWrite = null
clearSnapshots
clearStopWatch
}
}
object Transaction {
private val lastId = new AtomicLong(0)
def nextId = lastId.incrementAndGet
} | fwbrasil/radon-stm | src/main/scala/net/fwbrasil/radon/transaction/Transaction.scala | Scala | lgpl-2.1 | 8,697 |
import com.weez.mercury.common._
import com.weez.mercury.product._
import com.github.nscala_time.time.Imports._ | weeztech/weez-mercury | main/src/main/scala/com/weez/mercury/stock/Stocks.scala | Scala | apache-2.0 | 111 |
package ch.bsisa.hyperbird.dao.xqs
import ch.bsisa.hyperbird.dao.{Queries,DbConfig}
import play.api.Logger
import ch.bsisa.hyperbird.CollectionsConfig
/**
* Implements Queries trait accessing database using XQS/XQJ drivers.
*
* @author Patrick Refondini
*/
object XQSQueries extends Queries {
/**
* Implements Queries
*/
def allHbCollectionsQuery(implicit conf: DbConfig): String = {
//TODO: The response produced by xmldb:get-child-collections('${conf.databaseName}')
// does not produce valid XML but a list of strings.
// Review this query.
// val query = s"""xmldb:get-child-collections('${conf.databaseName}')"""
// TODO: We could Queries trait could enforce Option[String] as return type
// allowing for clean Not implemented messages from the API.
val query = s""""NOT IMPLEMENTED""""
Logger.debug("allHbCollectionsQuery: " + query)
query
}
/**
* Implements Queries
*/
def filteredCollectionQuery(collectionId: String, xpath: String = "//ELFIN")(implicit conf: DbConfig): String = {
val query = s"""collection('${conf.databaseName}/${collectionId}')${xpath}"""
Logger.debug("fileteredCollectionQuery: " + query)
query
}
/**
* Implements Queries
*/
def elfinQuery(collectionId: String, elfinId: String)(implicit conf: DbConfig): String = {
val query = s"""collection('${conf.databaseName}/${collectionId}')//ELFIN[@Id='${elfinId}']"""
Logger.debug("elfin: " + query)
query
}
/**
* Implements Queries
*/
def elfinQuery(elfinId: String)(implicit conf: DbConfig): String = {
val query = s"""collection('${conf.databaseName}')//ELFIN[@Id='${elfinId}']"""
Logger.debug("elfin: " + query)
query
}
/**
* Returns a query to access a single ELFIN of CLASSE USER identified by the given email.
* TODO: Not implemented yet.
*/
def elfinUserPerEmailQuery(email: String)(implicit dbConf: DbConfig, collectionsConf: CollectionsConfig): String = {
val query = s""""NOT IMPLEMENTED""""
Logger.debug("elfinUserPerEmailQuery: " + query)
query
}
} | bsisa/hb-api | app/ch/bsisa/hyperbird/dao/xqs/XQSQueries.scala | Scala | gpl-2.0 | 2,122 |
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////
package com.adobe
package queries
// Spark.
import org.apache.spark.{SparkConf,SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.rdd.RDD
import scala.math
// Analytics Thrift objects.
import thrift.AnalyticsData
object TopReferringDomains extends Query {
def colsNeeded = Seq("visit_referrer", "post_visid_high", "post_visid_low",
"visit_num")
def run(c: QueryConf) = {
val allData = c.sc.union(c.data).cache()
val numAllRows = c.dailyRows.reduce(_+_)
val numPartitions = math.max((numAllRows/c.targetPartitionSize).toInt, 1)
val topReferrers = allData.collect{
case (root) if !root.visit_referrer.isEmpty =>
(BigInt(root.post_visid_high + root.post_visid_low + root.visit_num),
QueryMeta.getDomainFromReferrer(root.visit_referrer))
}
.distinct(numPartitions){
Ordering.by((entry: (BigInt,String)) => entry._1)
}
.collect{
case (visit_id, ref) if !ref.isEmpty() => (ref, 1)
}
.reduceByKey(_+_,numPartitions)
.top(5) {
Ordering.by((entry: ((String, Int))) => entry._2)
}.toSeq
allData.unpersist()
if (c.profile)
"[" + topReferrers.map("\\"" + _.toString + "\\"").mkString(", ") + "]"
else {
html.TopReferringDomains("TopReferringDomains",
c.daysInRange, topReferrers).toString
}
}
}
| alexanderfield/spindle | src/main/scala/queries/TopReferringDomains.scala | Scala | apache-2.0 | 2,204 |
/*
* Copyright (c) 2014 Contributor. All rights reserved.
*/
package org.scalaide.debug.internal.expression.proxies.primitives.operations.bitwise
import org.junit.Test
import org.scalaide.debug.internal.expression.Names.Java
import org.scalaide.debug.internal.expression.BaseIntegrationTest
import org.scalaide.debug.internal.expression.BaseIntegrationTestCompanion
import org.scalaide.debug.internal.expression.TestValues
class BitwiseOrTest extends BaseIntegrationTest(BitwiseOrTest) {
import TestValues.ValuesTestCase._
@Test
def `byte | sth`(): Unit = {
eval("byte | byte2", byte | byte2, Java.primitives.int)
eval("byte | short2", byte | short2, Java.primitives.int)
eval("byte | char2", byte | char2, Java.primitives.int)
eval("byte | int2", byte | int2, Java.primitives.int)
eval("byte | long2", byte | long2, Java.primitives.long)
expectReflectiveCompilationError("byte | float")
expectReflectiveCompilationError("byte | double")
}
@Test
def `short | sth`(): Unit = {
eval("short | byte2", short | byte2, Java.primitives.int)
eval("short | short2", short | short2, Java.primitives.int)
eval("short | char2", short | char2, Java.primitives.int)
eval("short | int2", short | int2, Java.primitives.int)
eval("short | long2", short | long2, Java.primitives.long)
expectReflectiveCompilationError("short | float")
expectReflectiveCompilationError("short | double")
}
@Test
def `char | sth`(): Unit = {
eval("char | byte2", char | byte2, Java.primitives.int)
eval("char | short2", char | short2, Java.primitives.int)
eval("char | char2", char | char2, Java.primitives.int)
eval("char | int2", char | int2, Java.primitives.int)
eval("char | long2", char | long2, Java.primitives.long)
expectReflectiveCompilationError("char | float")
expectReflectiveCompilationError("char | double")
}
@Test
def `int | sth`(): Unit = {
eval("int | byte2", int | byte2, Java.primitives.int)
eval("int | short2", int | short2, Java.primitives.int)
eval("int | char", int | char, Java.primitives.int)
eval("int | int2", int | int2, Java.primitives.int)
eval("int | long2", int | long2, Java.primitives.long)
expectReflectiveCompilationError("int | float")
expectReflectiveCompilationError("int | double")
}
@Test
def `long | sth`(): Unit = {
eval("long | byte2", long | byte2, Java.primitives.long)
eval("long | short2", long | short2, Java.primitives.long)
eval("long | char", long | char, Java.primitives.long)
eval("long | int2", long | int2, Java.primitives.long)
eval("long | long2", long | long2, Java.primitives.long)
expectReflectiveCompilationError("long | float")
expectReflectiveCompilationError("long | double")
}
@Test
def `float | byte`(): Unit = {
expectReflectiveCompilationError("float | byte2")
expectReflectiveCompilationError("float | short2")
expectReflectiveCompilationError("float | char")
expectReflectiveCompilationError("float | int2")
expectReflectiveCompilationError("float | long2")
expectReflectiveCompilationError("float | float2")
expectReflectiveCompilationError("float | double")
}
@Test
def `double | sth`(): Unit = {
expectReflectiveCompilationError("double | byte2")
expectReflectiveCompilationError("double | short2")
expectReflectiveCompilationError("double | char")
expectReflectiveCompilationError("double | int2")
expectReflectiveCompilationError("double | long2")
expectReflectiveCompilationError("double | float")
expectReflectiveCompilationError("double | double2")
}
}
object BitwiseOrTest extends BaseIntegrationTestCompanion
| Kwestor/scala-ide | org.scala-ide.sdt.debug.expression.tests/src/org/scalaide/debug/internal/expression/proxies/primitives/operations/bitwise/BitwiseOrTest.scala | Scala | bsd-3-clause | 3,708 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import cascading.tuple.Fields
import org.scalatest.WordSpec
import scala.collection.JavaConverters._
import org.apache.hadoop.mapred.JobConf
class ReplTest extends WordSpec {
import ReplImplicits._
import ReplImplicitContext._
val tutorialData = "../tutorial/data"
val helloPath = tutorialData + "/hello.txt"
def test() = {
val suffix = mode match {
case _: CascadingLocal => "local"
case _: HadoopMode => "hadoop"
}
val testPath = "/tmp/scalding-repl/test/" + suffix + "/"
val helloRef = List("Hello world", "Goodbye world")
"save -- TypedPipe[String]" in {
val hello = TypedPipe.from(TextLine(helloPath))
val out = TypedTsv[String](testPath + "output0.txt")
hello.save(out)
val output = out.toIterator.toList
assert(output === helloRef)
}
"snapshot" should {
"only -- TypedPipe[String]" in {
val hello = TypedPipe.from(TextLine(helloPath))
val s: TypedPipe[String] = hello.snapshot
// shallow verification that the snapshot was created correctly without
// actually running a new flow to check the contents (just check that
// it's a TypedPipe from a MemorySink or SequenceFile)
assert(s.toString.contains("IterablePipe") || s.toString.contains("TypedPipeFactory"))
val pipeName = mode match {
case m: HadoopMode => m.jobConf.get("hadoop.tmp.dir")
case _ => "IterableSource"
}
assert(s.toPipe(Fields.ALL).toString.contains(pipeName))
}
"can be mapped and saved -- TypedPipe[String]" in {
val s = TypedPipe.from(TextLine(helloPath))
.flatMap(_.split("\\\\s+"))
.snapshot
val out = TypedTsv[String](testPath + "output1.txt")
// can call 'map' and 'save' on snapshot
s.map(_.toLowerCase).save(out)
val output = out.toIterator.toList
assert(output === helloRef.flatMap(_.split("\\\\s+")).map(_.toLowerCase))
}
"tuples -- TypedPipe[(String,Int)]" in {
val s = TypedPipe.from(TextLine(helloPath))
.flatMap(_.split("\\\\s+"))
.map(w => (w.toLowerCase, w.length))
.snapshot
val output = s.toList
assert(output === helloRef.flatMap(_.split("\\\\s+")).map(w => (w.toLowerCase, w.length)))
}
"grouped -- Grouped[String,String]" which {
val grp = TypedPipe.from(TextLine(helloPath))
.groupBy(_.toLowerCase)
val correct = helloRef.map(l => (l.toLowerCase, l))
"is explicit" in {
(grp.snapshot.toList === correct)
}
// Note: Must explicitly to toIterator because `grp.toList` resolves to `KeyedList.toList`
"is implicit" in {
assert(grp.toIterator.toList === correct)
}
}
"joined -- CoGrouped[String, Long]" which {
val linesByWord = TypedPipe.from(TextLine(helloPath))
.flatMap(_.split("\\\\s+"))
.groupBy(_.toLowerCase)
val wordScores = TypedPipe.from(TypedTsv[(String, Double)](tutorialData + "/word_scores.tsv")).group
val grp = linesByWord.join(wordScores)
.mapValues { case (text, score) => score }
.sum
val correct = Map("hello" -> 1.0, "goodbye" -> 3.0, "world" -> 4.0)
"is explicit" in {
val s = grp.snapshot
assert(s.toIterator.toMap === correct)
}
"is implicit" in {
assert(grp.toIterator.toMap === correct)
}
}
"support toOption on ValuePipe" in {
val hello = TypedPipe.from(TextLine(helloPath))
val res = hello.map(_.length).sum
val correct = helloRef.map(_.length).sum
assert(res.toOption === Some(correct))
}
}
"reset flow" in {
resetFlowDef()
assert(flowDef.getSources.asScala.isEmpty)
}
"run entire flow" in {
resetFlowDef()
val hello = TypedPipe.from(TextLine(helloPath))
.flatMap(_.split("\\\\s+"))
.map(_.toLowerCase)
.distinct
val out = TypedTsv[String](testPath + "words.tsv")
hello.write(out)
ReplImplicits.run
val words = out.toIterator.toSet
assert(words === Set("hello", "world", "goodbye"))
}
"TypedPipe of a TextLine" should {
val hello = TypedPipe.from(TextLine(helloPath))
"support toIterator" in {
hello.toIterator.foreach { line: String =>
assert(line.contains("Hello world") || line.contains("Goodbye world"))
}
}
"support toList" in {
assert(hello.toList === helloRef)
}
}
"toIterator should generate a snapshot for TypedPipe with" should {
val hello = TypedPipe.from(TextLine(helloPath))
"flatMap" in {
val out = hello.flatMap(_.split("\\\\s+")).toList
assert(out === helloRef.flatMap(_.split("\\\\s+")))
}
"tuple" in {
assert(hello.map(l => (l, l.length)).toList === helloRef.map(l => (l, l.length)))
}
}
}
"REPL in Local mode" should {
ReplImplicits.mode = Local(strictSources = true)
test()
}
"REPL in Hadoop mode" should {
ReplImplicits.mode = Hdfs(strict = true, new JobConf)
test()
}
}
| MansurAshraf/scalding | scalding-repl/src/test/scala/com/twitter/scalding/ReplTest.scala | Scala | apache-2.0 | 5,794 |
package com.blogspot.ramannanda.scala.algorithms.cp3.adhoc.anagrams
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.io.StdIn
//uva 00454
object FindAnagrams {
def anagramPairs(words: Seq[String]): Unit = {
val anaMap = mutable.LinkedHashMap[String, Seq[String]]()
for (word <- words) {
val wordStripped = word.replaceAll("\\\\s+", "").sorted
if (anaMap.contains(wordStripped)) {
anaMap.put(wordStripped, anaMap(wordStripped) :+ word)
}
else {
anaMap.put(wordStripped, Seq(word))
}
}
anaMap.foreach(kv => {
val kvSet = kv._2.toSet
for (i <- 0 until kvSet.size) {
for (j <- i + 1 until kvSet.size) {
println(s"${kv._2(i)} = ${kv._2(j)}")
}
}
})
}
def main(args: Array[String]): Unit = {
var lb = ListBuffer[String]()
val cases = StdIn.readLine().trim.toInt
var i = 0
while (i < cases) {
val word = StdIn.readLine()
if (word.isEmpty) {
anagramPairs(lb)
i += 1
}
lb += word
}
}
}
| ramannanda9/algorithms-in-scala | src/main/scala/com/blogspot/ramannanda/scala/algorithms/cp3/adhoc/anagrams/FindAnagrams.scala | Scala | gpl-3.0 | 1,096 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.datastream
import java.lang.{Byte => JByte}
import java.util.{List => JList}
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.RelFieldCollation.Direction.ASCENDING
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.Window.Group
import org.apache.calcite.rel.core.{AggregateCall, Window}
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.calcite.rex.RexLiteral
import org.apache.flink.api.java.functions.NullByteKeySelector
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.functions.KeyedProcessFunction
import org.apache.flink.table.api.{StreamQueryConfig, StreamTableEnvironment, TableConfig, TableException}
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.AggregationCodeGenerator
import org.apache.flink.table.plan.nodes.OverAggregate
import org.apache.flink.table.plan.rules.datastream.DataStreamRetractionRules
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.runtime.CRowKeySelector
import org.apache.flink.table.runtime.aggregate.AggregateUtil.CalcitePair
import org.apache.flink.table.runtime.aggregate._
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.table.util.Logging
import org.apache.flink.types.Row
import scala.collection.JavaConverters._
class DataStreamOverAggregate(
logicWindow: Window,
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputNode: RelNode,
schema: RowSchema,
inputSchema: RowSchema)
extends SingleRel(cluster, traitSet, inputNode)
with OverAggregate
with DataStreamRel
with Logging {
override def deriveRowType(): RelDataType = schema.relDataType
override def needsUpdatesAsRetraction = true
override def consumesRetractions = true
override def copy(traitSet: RelTraitSet, inputs: JList[RelNode]): RelNode = {
new DataStreamOverAggregate(
logicWindow,
cluster,
traitSet,
inputs.get(0),
schema,
inputSchema)
}
override def toString: String = {
s"OverAggregate($aggOpName)"
}
override def explainTerms(pw: RelWriter): RelWriter = {
val overWindow: Group = logicWindow.groups.get(0)
val constants: Seq[RexLiteral] = logicWindow.constants.asScala
val partitionKeys: Array[Int] = overWindow.keys.toArray
val namedAggregates: Seq[CalcitePair[AggregateCall, String]] = generateNamedAggregates
super.explainTerms(pw)
.itemIf("partitionBy",
partitionToString(schema.relDataType, partitionKeys), partitionKeys.nonEmpty)
.item("orderBy",
orderingToString(schema.relDataType, overWindow.orderKeys.getFieldCollations))
.itemIf("rows", windowRange(logicWindow, overWindow, inputNode), overWindow.isRows)
.itemIf("range", windowRange(logicWindow, overWindow, inputNode), !overWindow.isRows)
.item(
"select", aggregationToString(
inputSchema.relDataType,
constants,
schema.relDataType,
namedAggregates))
}
override def translateToPlan(
tableEnv: StreamTableEnvironment,
queryConfig: StreamQueryConfig): DataStream[CRow] = {
if (logicWindow.groups.size > 1) {
throw new TableException(
"Unsupported use of OVER windows. All aggregates must be computed on the same window.")
}
val overWindow: org.apache.calcite.rel.core.Window.Group = logicWindow.groups.get(0)
val orderKeys = overWindow.orderKeys.getFieldCollations
if (orderKeys.size() != 1) {
throw new TableException(
"Unsupported use of OVER windows. The window can only be ordered by a single time column.")
}
val orderKey = orderKeys.get(0)
if (!orderKey.direction.equals(ASCENDING)) {
throw new TableException(
"Unsupported use of OVER windows. The window can only be ordered in ASCENDING mode.")
}
val inputDS = input.asInstanceOf[DataStreamRel].translateToPlan(tableEnv, queryConfig)
val inputIsAccRetract = DataStreamRetractionRules.isAccRetract(input)
if (inputIsAccRetract) {
throw new TableException(
"Retraction on Over window aggregation is not supported yet. " +
"Note: Over window aggregation should not follow a non-windowed GroupBy aggregation.")
}
if (!logicWindow.groups.get(0).keys.isEmpty && queryConfig.getMinIdleStateRetentionTime < 0) {
LOG.warn(
"No state retention interval configured for a query which accumulates state. " +
"Please provide a query configuration with valid retention interval to prevent " +
"excessive state size. You may specify a retention time of 0 to not clean up the state.")
}
val constants: Seq[RexLiteral] = logicWindow.constants.asScala
val generator = new AggregationCodeGenerator(
tableEnv.getConfig,
false,
inputSchema.typeInfo,
Some(constants))
val constantTypes = constants.map(_.getType)
val fieldTypes = input.getRowType.getFieldList.asScala.map(_.getType)
val aggInTypes = fieldTypes ++ constantTypes
val aggInNames = aggInTypes.indices.map("f" + _)
val aggregateInputType =
getCluster.getTypeFactory.createStructType(aggInTypes.asJava, aggInNames.asJava)
val timeType = schema.relDataType
.getFieldList
.get(orderKey.getFieldIndex)
.getType
// identify window rowtime attribute
val rowTimeIdx: Option[Int] = if (FlinkTypeFactory.isRowtimeIndicatorType(timeType)) {
Some(orderKey.getFieldIndex)
} else if (FlinkTypeFactory.isProctimeIndicatorType(timeType)) {
None
} else {
throw new TableException(s"OVER windows can only be applied on time attributes.")
}
if (overWindow.lowerBound.isPreceding && overWindow.lowerBound.isUnbounded &&
overWindow.upperBound.isCurrentRow) {
// unbounded OVER window
createUnboundedAndCurrentRowOverWindow(
queryConfig,
tableEnv.getConfig,
generator,
inputDS,
rowTimeIdx,
aggregateInputType,
isRowsClause = overWindow.isRows)
} else if (
overWindow.lowerBound.isPreceding && !overWindow.lowerBound.isUnbounded &&
overWindow.upperBound.isCurrentRow) {
// bounded OVER window
createBoundedAndCurrentRowOverWindow(
queryConfig,
generator,
inputDS,
rowTimeIdx,
aggregateInputType,
isRowsClause = overWindow.isRows,
tableEnv.getConfig)
} else {
throw new TableException("OVER RANGE FOLLOWING windows are not supported yet.")
}
}
def createUnboundedAndCurrentRowOverWindow(
queryConfig: StreamQueryConfig,
tableConfig: TableConfig,
generator: AggregationCodeGenerator,
inputDS: DataStream[CRow],
rowTimeIdx: Option[Int],
aggregateInputType: RelDataType,
isRowsClause: Boolean): DataStream[CRow] = {
val overWindow: Group = logicWindow.groups.get(0)
val partitionKeys: Array[Int] = overWindow.keys.toArray
val namedAggregates: Seq[CalcitePair[AggregateCall, String]] = generateNamedAggregates
// get the output types
val returnTypeInfo = CRowTypeInfo(schema.typeInfo)
def createKeyedProcessFunction[K]: KeyedProcessFunction[K, CRow, CRow] = {
AggregateUtil.createUnboundedOverProcessFunction[K](
generator,
namedAggregates,
aggregateInputType,
inputSchema.relDataType,
inputSchema.typeInfo,
inputSchema.fieldTypeInfos,
queryConfig,
tableConfig,
rowTimeIdx,
partitionKeys.nonEmpty,
isRowsClause)
}
val result: DataStream[CRow] =
// partitioned aggregation
if (partitionKeys.nonEmpty) {
inputDS
.keyBy(new CRowKeySelector(partitionKeys, inputSchema.projectedTypeInfo(partitionKeys)))
.process(createKeyedProcessFunction[Row])
.returns(returnTypeInfo)
.name(aggOpName)
.asInstanceOf[DataStream[CRow]]
}
// non-partitioned aggregation
else {
inputDS.keyBy(new NullByteKeySelector[CRow])
.process(createKeyedProcessFunction[JByte]).setParallelism(1).setMaxParallelism(1)
.returns(returnTypeInfo)
.name(aggOpName)
}
result
}
def createBoundedAndCurrentRowOverWindow(
queryConfig: StreamQueryConfig,
generator: AggregationCodeGenerator,
inputDS: DataStream[CRow],
rowTimeIdx: Option[Int],
aggregateInputType: RelDataType,
isRowsClause: Boolean,
tableConfig: TableConfig): DataStream[CRow] = {
val overWindow: Group = logicWindow.groups.get(0)
val partitionKeys: Array[Int] = overWindow.keys.toArray
val namedAggregates: Seq[CalcitePair[AggregateCall, String]] = generateNamedAggregates
val precedingOffset =
getLowerBoundary(logicWindow, overWindow, getInput()) + (if (isRowsClause) 1 else 0)
// get the output types
val returnTypeInfo = CRowTypeInfo(schema.typeInfo)
def createKeyedProcessFunction[K]: KeyedProcessFunction[K, CRow, CRow] = {
AggregateUtil.createBoundedOverProcessFunction[K](
generator,
namedAggregates,
aggregateInputType,
inputSchema.relDataType,
inputSchema.typeInfo,
inputSchema.fieldTypeInfos,
precedingOffset,
queryConfig,
tableConfig,
isRowsClause,
rowTimeIdx
)
}
val result: DataStream[CRow] =
// partitioned aggregation
if (partitionKeys.nonEmpty) {
inputDS
.keyBy(new CRowKeySelector(partitionKeys, inputSchema.projectedTypeInfo(partitionKeys)))
.process(createKeyedProcessFunction[Row])
.returns(returnTypeInfo)
.name(aggOpName)
}
// non-partitioned aggregation
else {
inputDS
.keyBy(new NullByteKeySelector[CRow])
.process(createKeyedProcessFunction[JByte]).setParallelism(1).setMaxParallelism(1)
.returns(returnTypeInfo)
.name(aggOpName)
}
result
}
private def generateNamedAggregates: Seq[CalcitePair[AggregateCall, String]] = {
val overWindow: Group = logicWindow.groups.get(0)
val aggregateCalls = overWindow.getAggregateCalls(logicWindow)
for (i <- 0 until aggregateCalls.size())
yield new CalcitePair[AggregateCall, String](aggregateCalls.get(i), "w0$o" + i)
}
private def aggOpName = {
val overWindow: Group = logicWindow.groups.get(0)
val constants: Seq[RexLiteral] = logicWindow.constants.asScala
val partitionKeys: Array[Int] = overWindow.keys.toArray
val namedAggregates: Seq[CalcitePair[AggregateCall, String]] = generateNamedAggregates
s"over: (${
if (!partitionKeys.isEmpty) {
s"PARTITION BY: ${partitionToString(inputSchema.relDataType, partitionKeys)}, "
} else {
""
}
}ORDER BY: ${orderingToString(inputSchema.relDataType,
overWindow.orderKeys.getFieldCollations)}, " +
s"${if (overWindow.isRows) "ROWS" else "RANGE"}" +
s"${windowRange(logicWindow, overWindow, inputNode)}, " +
s"select: (${
aggregationToString(
inputSchema.relDataType,
constants,
schema.relDataType,
namedAggregates)
}))"
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/datastream/DataStreamOverAggregate.scala | Scala | apache-2.0 | 12,203 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.remote
import org.apache.spark._
import org.apache.spark.internal.{Logging, config}
import org.apache.spark.io.CompressionCodec
import org.apache.spark.serializer.SerializerManager
import org.apache.spark.shuffle.{BaseShuffleHandle, ShuffleReadMetricsReporter, ShuffleReader}
import org.apache.spark.storage.{BlockId, BlockManagerId}
import org.apache.spark.util.CompletionIterator
import org.apache.spark.util.collection.RemoteSorter
/**
* Fetches and reads the partitions in range [startPartition, endPartition) from a shuffle by
* requesting them from other nodes' block stores.
*/
private[spark] class RemoteShuffleReader[K, C](
handle: BaseShuffleHandle[K, _, C],
resolver: RemoteShuffleBlockResolver,
blocksByAddress: Iterator[(BlockManagerId, Seq[(BlockId, Long, Int)])],
context: TaskContext,
readMetrics: ShuffleReadMetricsReporter,
serializerManager: SerializerManager = SparkEnv.get.serializerManager,
mapOutputTracker: MapOutputTracker = SparkEnv.get.mapOutputTracker,
shouldBatchFetch: Boolean = false)
extends ShuffleReader[K, C] with Logging {
private val dep = handle.dependency
private def fetchContinuousBlocksInBatch: Boolean = {
val conf = SparkEnv.get.conf
val serializerRelocatable = dep.serializer.supportsRelocationOfSerializedObjects
val compressed = conf.get(config.SHUFFLE_COMPRESS)
val codecConcatenation = if (compressed) {
CompressionCodec.supportsConcatenationOfSerializedStreams(CompressionCodec.createCodec(conf))
} else {
true
}
val useOldFetchProtocol = conf.get(config.SHUFFLE_USE_OLD_FETCH_PROTOCOL)
val doBatchFetch = shouldBatchFetch && serializerRelocatable &&
(!compressed || codecConcatenation) && !useOldFetchProtocol
if (shouldBatchFetch && !doBatchFetch) {
logDebug("The feature tag of continuous shuffle block fetching is set to true, but " +
"we can not enable the feature because other conditions are not satisfied. " +
s"Shuffle compress: $compressed, serializer relocatable: $serializerRelocatable, " +
s"codec concatenation: $codecConcatenation, use old shuffle fetch protocol: " +
s"$useOldFetchProtocol.")
}
doBatchFetch
}
/** Read the combined key-values for this reduce task */
override def read(): Iterator[Product2[K, C]] = {
val wrappedStreams = new RemoteShuffleBlockIterator(
context,
resolver.remoteShuffleTransferService,
resolver,
blocksByAddress,
serializerManager.wrapStream,
// Note: we use getSizeAsMb when no suffix is provided for backwards compatibility
SparkEnv.get.conf.get(config.REDUCER_MAX_SIZE_IN_FLIGHT) * 1024 * 1024,
SparkEnv.get.conf.get(config.REDUCER_MAX_REQS_IN_FLIGHT),
SparkEnv.get.conf.get(config.REDUCER_MAX_BLOCKS_IN_FLIGHT_PER_ADDRESS),
SparkEnv.get.conf.get(config.SHUFFLE_DETECT_CORRUPT),
SparkEnv.get.conf.get(config.SHUFFLE_DETECT_CORRUPT_MEMORY),
readMetrics,
fetchContinuousBlocksInBatch).toCompletionIterator
val serializerInstance = dep.serializer.newInstance()
// Create a key/value iterator for each stream
val recordIter = wrappedStreams.flatMap { case (blockId, wrappedStream) =>
// Note: the asKeyValueIterator below wraps a key/value iterator inside of a
// NextIterator. The NextIterator makes sure that close() is called on the
// underlying InputStream when all records have been read.
serializerInstance.deserializeStream(wrappedStream).asKeyValueIterator
}
// Update the context task metrics for each record read.
val metricIter = CompletionIterator[(Any, Any), Iterator[(Any, Any)]](
recordIter.map { record =>
readMetrics.incRecordsRead(1)
record
},
context.taskMetrics().mergeShuffleReadMetrics())
// An interruptible iterator must be used here in order to support task cancellation
val interruptibleIter = new InterruptibleIterator[(Any, Any)](context, metricIter)
val aggregatedIter: Iterator[Product2[K, C]] = if (dep.aggregator.isDefined) {
val remoteAggregator = dep.aggregator.map(new RemoteAggregator(_, resolver))
if (dep.mapSideCombine) {
// We are reading values that are already combined
val combinedKeyValuesIterator = interruptibleIter.asInstanceOf[Iterator[(K, C)]]
remoteAggregator.get.combineCombinersByKey(combinedKeyValuesIterator, context)
} else {
// We don't know the value type, but also don't care -- the dependency *should*
// have made sure its compatible w/ this aggregator, which will convert the value
// type to the combined type C
val keyValuesIterator = interruptibleIter.asInstanceOf[Iterator[(K, Nothing)]]
remoteAggregator.get.combineValuesByKey(keyValuesIterator, context)
}
} else {
interruptibleIter.asInstanceOf[Iterator[Product2[K, C]]]
}
// Sort the output if there is a sort ordering defined.
val resultIter = dep.keyOrdering match {
case Some(keyOrd: Ordering[K]) =>
// Create an ExternalSorter to sort the data.
val sorter = new RemoteSorter[K, C, C](
context, resolver, ordering = Some(keyOrd), serializer = dep.serializer)
sorter.insertAll(aggregatedIter)
context.taskMetrics().incMemoryBytesSpilled(sorter.memoryBytesSpilled)
context.taskMetrics().incDiskBytesSpilled(sorter.diskBytesSpilled)
context.taskMetrics().incPeakExecutionMemory(sorter.peakMemoryUsedBytes)
// Use completion callback to stop sorter if task was finished/cancelled.
context.addTaskCompletionListener[Unit](_ => {
sorter.stop()
})
CompletionIterator[Product2[K, C], Iterator[Product2[K, C]]](sorter.iterator, sorter.stop())
case None =>
aggregatedIter
}
resultIter match {
case _: InterruptibleIterator[Product2[K, C]] => resultIter
case _ =>
// Use another interruptible iterator here to support task cancellation as aggregator
// or(and) sorter may have consumed previous interruptible iterator.
new InterruptibleIterator[Product2[K, C]](context, resultIter)
}
}
}
| Intel-bigdata/OAP | oap-shuffle/remote-shuffle/src/main/scala/org/apache/spark/shuffle/remote/RemoteShuffleReader.scala | Scala | apache-2.0 | 7,050 |
package models
import play.api.db.slick.Config.driver.simple._
case class TSVFileSampleLink(id: Option[Int], tsvFileId: Int, sampleId: Int, created: java.sql.Timestamp)
class TSVFileSampleLinkTable(tag: Tag) extends Table[TSVFileSampleLink](tag, "tsv_file_sample_link") {
val tsvFiles = TableQuery[TSVFileTable]
val samples = TableQuery[SampleTable]
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def tsvFileId = column[Int]("tsv_file_id", O.NotNull)
def sampleId = column[Int]("sample_id", O.NotNull)
def created = column[java.sql.Timestamp]("created_tstmp", O.NotNull)
def * = (id.?, tsvFileId, sampleId, created) <> (TSVFileSampleLink.tupled, TSVFileSampleLink.unapply)
def tsvFile = foreignKey("TSV_FILE_FK", tsvFileId, tsvFiles)(_.id)
def sample = foreignKey("SAMPLE_FK", sampleId, samples)(_.id)
} | seqprodbio/restoule | app/models/TSVFileSampleLink.scala | Scala | gpl-3.0 | 844 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers.dsl
import org.scalatest.matchers._
import org.scalactic._
import scala.util.matching.Regex
import org.scalatest.Resources
import org.scalatest.UnquotedString
import org.scalatest.matchers.MatchersHelper.startWithRegexWithGroups
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="../Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class StartWithWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* "1.7b" should (startWith ("1.7") and startWith ("1.7b"))
* ^
* </pre>
*/
def apply(right: String): Matcher[String] =
new Matcher[String] {
def apply(left: String): MatchResult =
MatchResult(
left startsWith right,
Resources.rawDidNotStartWith,
Resources.rawStartedWith,
Vector(left, right)
)
override def toString: String = "startWith (" + Prettifier.default(right) + ")"
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* val decimal = """(-)?(\d+)(\.\d*)?"""
* "1.7b" should (startWith regex (decimal) and startWith regex (decimal))
* ^
* </pre>
*/
def regex[T <: String](right: T): Matcher[T] = regex(right.r)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should not { startWith regex ("a(b*)c" withGroup "bb") }
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups) =
new Matcher[String] {
def apply(left: String): MatchResult =
startWithRegexWithGroups(left, regexWithGroups.regex, regexWithGroups.groups)
override def toString: String = "startWith regex " + Prettifier.default(regexWithGroups)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* val decimalRegex = """(-)?(\d+)(\.\d*)?""".r
* "1.7" should (startWith regex (decimalRegex) and startWith regex (decimalRegex))
* ^
* </pre>
*/
def regex(rightRegex: Regex): Matcher[String] =
new Matcher[String] {
def apply(left: String): MatchResult =
MatchResult(
rightRegex.pattern.matcher(left).lookingAt,
Resources.rawDidNotStartWithRegex,
Resources.rawStartedWithRegex,
Vector(left, UnquotedString(rightRegex.toString))
)
override def toString: String = "startWith regex " + Prettifier.default(rightRegex)
}
/**
* Overrides toString to return "startWith"
*/
override def toString: String = "startWith"
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/matchers/dsl/StartWithWord.scala | Scala | apache-2.0 | 3,368 |
package sttp.client3.httpclient.fs2
import cats.effect.IO
import cats.implicits._
import fs2.Pipe
import sttp.capabilities.fs2.Fs2Streams
import sttp.client3.impl.fs2.Fs2WebSockets
import sttp.client3.testing.websocket.{WebSocketConcurrentTest, WebSocketStreamingTest, WebSocketTest}
import sttp.ws.WebSocketFrame
class HttpClientFs2WebSocketTest
extends WebSocketTest[IO]
with WebSocketStreamingTest[IO, Fs2Streams[IO]]
with WebSocketConcurrentTest[IO]
with HttpClientFs2TestBase {
override val streams: Fs2Streams[IO] = new Fs2Streams[IO] {}
override def functionToPipe(
f: WebSocketFrame.Data[_] => Option[WebSocketFrame]
): fs2.Pipe[IO, WebSocketFrame.Data[_], WebSocketFrame] = in => in.mapFilter(f)
override def fromTextPipe(
function: String => WebSocketFrame
): Pipe[IO, WebSocketFrame.Data[_], WebSocketFrame] =
Fs2WebSockets.fromTextPipe[IO](function)
override def prepend(
item: WebSocketFrame.Text
)(to: Pipe[IO, WebSocketFrame.Data[_], WebSocketFrame]): Pipe[IO, WebSocketFrame.Data[_], WebSocketFrame] = {
to.andThen(rest => fs2.Stream.eval(item.pure[IO]) ++ rest)
}
override def concurrently[T](fs: List[() => IO[T]]): IO[List[T]] = fs.map(_()).parSequence
}
| softwaremill/sttp | httpclient-backend/fs2/src/test/scala/sttp/client3/httpclient/fs2/HttpClientFs2WebSocketTest.scala | Scala | apache-2.0 | 1,240 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification
import scala.util.Random
import breeze.linalg.{DenseMatrix => BDM, DenseVector => BDV, Vector => BV}
import breeze.stats.distributions.{Multinomial => BrzMultinomial}
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.{LocalClusterSparkContext, MLlibTestSparkContext}
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.util.Utils
object NaiveBayesSuite {
import NaiveBayes.{Multinomial, Bernoulli}
private def calcLabel(p: Double, pi: Array[Double]): Int = {
var sum = 0.0
for (j <- 0 until pi.length) {
sum += pi(j)
if (p < sum) return j
}
-1
}
// Generate input of the form Y = (theta * x).argmax()
def generateNaiveBayesInput(
pi: Array[Double], // 1XC
theta: Array[Array[Double]], // CXD
nPoints: Int,
seed: Int,
modelType: String = Multinomial,
sample: Int = 10): Seq[LabeledPoint] = {
val D = theta(0).length
val rnd = new Random(seed)
val _pi = pi.map(math.pow(math.E, _))
val _theta = theta.map(row => row.map(math.pow(math.E, _)))
for (i <- 0 until nPoints) yield {
val y = calcLabel(rnd.nextDouble(), _pi)
val xi = modelType match {
case Bernoulli => Array.tabulate[Double] (D) { j =>
if (rnd.nextDouble () < _theta(y)(j) ) 1 else 0
}
case Multinomial =>
val mult = BrzMultinomial(BDV(_theta(y)))
val emptyMap = (0 until D).map(x => (x, 0.0)).toMap
val counts = emptyMap ++ mult.sample(sample).groupBy(x => x).map {
case (index, reps) => (index, reps.size.toDouble)
}
counts.toArray.sortBy(_._1).map(_._2)
case _ =>
// This should never happen.
throw new UnknownError(s"Invalid modelType: $modelType.")
}
LabeledPoint(y, Vectors.dense(xi))
}
}
/** Bernoulli NaiveBayes with binary labels, 3 features */
private val binaryBernoulliModel = new NaiveBayesModel(labels = Array(0.0, 1.0),
pi = Array(0.2, 0.8), theta = Array(Array(0.1, 0.3, 0.6), Array(0.2, 0.4, 0.4)), Bernoulli)
/** Multinomial NaiveBayes with binary labels, 3 features */
private val binaryMultinomialModel = new NaiveBayesModel(labels = Array(0.0, 1.0),
pi = Array(0.2, 0.8), theta = Array(Array(0.1, 0.3, 0.6), Array(0.2, 0.4, 0.4)), Multinomial)
}
class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext {
import NaiveBayes.{Multinomial, Bernoulli}
def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) {
val numOfPredictions = predictions.zip(input).count {
case (prediction, expected) =>
prediction != expected.label
}
// At least 80% of the predictions should be on.
assert(numOfPredictions < input.length / 5)
}
def validateModelFit(
piData: Array[Double],
thetaData: Array[Array[Double]],
model: NaiveBayesModel): Unit = {
val modelIndex = piData.indices.zip(model.labels.map(_.toInt))
try {
for (i <- modelIndex) {
assert(math.exp(piData(i._2)) ~== math.exp(model.pi(i._1)) absTol 0.05)
for (j <- thetaData(i._2).indices) {
assert(math.exp(thetaData(i._2)(j)) ~== math.exp(model.theta(i._1)(j)) absTol 0.05)
}
}
} catch {
case e: TestFailedException =>
def arr2str(a: Array[Double]): String = a.mkString("[", ", ", "]")
def msg(orig: String): String = orig + "\\nvalidateModelFit:\\n" +
" piData: " + arr2str(piData) + "\\n" +
" thetaData: " + thetaData.map(arr2str).mkString("\\n") + "\\n" +
" model.labels: " + arr2str(model.labels) + "\\n" +
" model.pi: " + arr2str(model.pi) + "\\n" +
" model.theta: " + model.theta.map(arr2str).mkString("\\n")
throw e.modifyMessage(_.map(msg))
}
}
test("model types") {
assert(Multinomial === "multinomial")
assert(Bernoulli === "bernoulli")
}
test("get, set params") {
val nb = new NaiveBayes()
nb.setLambda(2.0)
assert(nb.getLambda === 2.0)
nb.setLambda(3.0)
assert(nb.getLambda === 3.0)
}
test("Naive Bayes Multinomial") {
val nPoints = 1000
val pi = Array(0.5, 0.1, 0.4).map(math.log)
val theta = Array(
Array(0.70, 0.10, 0.10, 0.10), // label 0
Array(0.10, 0.70, 0.10, 0.10), // label 1
Array(0.10, 0.10, 0.70, 0.10) // label 2
).map(_.map(math.log))
val testData = NaiveBayesSuite.generateNaiveBayesInput(pi, theta, nPoints, 42, Multinomial)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val model = NaiveBayes.train(testRDD, 1.0, Multinomial)
validateModelFit(pi, theta, model)
val validationData = NaiveBayesSuite.generateNaiveBayesInput(
pi, theta, nPoints, 17, Multinomial)
val validationRDD = sc.parallelize(validationData, 2)
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
// Test posteriors
validationData.map(_.features).foreach { features =>
val predicted = model.predictProbabilities(features).toArray
assert(predicted.sum ~== 1.0 relTol 1.0e-10)
val expected = expectedMultinomialProbabilities(model, features)
expected.zip(predicted).foreach { case (e, p) => assert(e ~== p relTol 1.0e-10) }
}
}
/**
* @param model Multinomial Naive Bayes model
* @param testData input to compute posterior probabilities for
* @return posterior class probabilities (in order of labels) for input
*/
private def expectedMultinomialProbabilities(model: NaiveBayesModel, testData: Vector) = {
val piVector = new BDV(model.pi)
// model.theta is row-major; treat it as col-major representation of transpose, and transpose:
val thetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten).t
val logClassProbs: BV[Double] = piVector + (thetaMatrix * testData.asBreeze)
val classProbs = logClassProbs.toArray.map(math.exp)
val classProbsSum = classProbs.sum
classProbs.map(_ / classProbsSum)
}
test("Naive Bayes Bernoulli") {
val nPoints = 10000
val pi = Array(0.5, 0.3, 0.2).map(math.log)
val theta = Array(
Array(0.50, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.40), // label 0
Array(0.02, 0.70, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02), // label 1
Array(0.02, 0.02, 0.60, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.30) // label 2
).map(_.map(math.log))
val testData = NaiveBayesSuite.generateNaiveBayesInput(
pi, theta, nPoints, 45, Bernoulli)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val model = NaiveBayes.train(testRDD, 1.0, Bernoulli)
validateModelFit(pi, theta, model)
val validationData = NaiveBayesSuite.generateNaiveBayesInput(
pi, theta, nPoints, 20, Bernoulli)
val validationRDD = sc.parallelize(validationData, 2)
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
// Test posteriors
validationData.map(_.features).foreach { features =>
val predicted = model.predictProbabilities(features).toArray
assert(predicted.sum ~== 1.0 relTol 1.0e-10)
val expected = expectedBernoulliProbabilities(model, features)
expected.zip(predicted).foreach { case (e, p) => assert(e ~== p relTol 1.0e-10) }
}
}
/**
* @param model Bernoulli Naive Bayes model
* @param testData input to compute posterior probabilities for
* @return posterior class probabilities (in order of labels) for input
*/
private def expectedBernoulliProbabilities(model: NaiveBayesModel, testData: Vector) = {
val piVector = new BDV(model.pi)
val thetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten).t
val negThetaMatrix = new BDM(model.theta(0).length, model.theta.length,
model.theta.flatten.map(v => math.log(1.0 - math.exp(v)))).t
val testBreeze = testData.asBreeze
val negTestBreeze = new BDV(Array.fill(testBreeze.size)(1.0)) - testBreeze
val piTheta: BV[Double] = piVector + (thetaMatrix * testBreeze)
val logClassProbs: BV[Double] = piTheta + (negThetaMatrix * negTestBreeze)
val classProbs = logClassProbs.toArray.map(math.exp)
val classProbsSum = classProbs.sum
classProbs.map(_ / classProbsSum)
}
test("detect negative values") {
val dense = Seq(
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(-1.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(0.0)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(dense, 2))
}
val sparse = Seq(
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(0.0, Vectors.sparse(1, Array(0), Array(-1.0))),
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(1.0, Vectors.sparse(1, Array.empty, Array.empty)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(sparse, 2))
}
val nan = Seq(
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(0.0, Vectors.sparse(1, Array(0), Array(Double.NaN))),
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(1.0, Vectors.sparse(1, Array.empty, Array.empty)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(nan, 2))
}
}
test("detect non zero or one values in Bernoulli") {
val badTrain = Seq(
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(0.0)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(badTrain, 2), 1.0, Bernoulli)
}
val okTrain = Seq(
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(1.0))
)
val badPredict = Seq(
Vectors.dense(1.0),
Vectors.dense(2.0),
Vectors.dense(1.0),
Vectors.dense(0.0))
val model = NaiveBayes.train(sc.makeRDD(okTrain, 2), 1.0, Bernoulli)
intercept[SparkException] {
model.predict(sc.makeRDD(badPredict, 2)).collect()
}
}
test("model save/load: 2.0 to 2.0") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
Seq(NaiveBayesSuite.binaryBernoulliModel, NaiveBayesSuite.binaryMultinomialModel).map {
model =>
// Save model, load it back, and compare.
try {
model.save(sc, path)
val sameModel = NaiveBayesModel.load(sc, path)
assert(model.labels === sameModel.labels)
assert(model.pi === sameModel.pi)
assert(model.theta === sameModel.theta)
assert(model.modelType === sameModel.modelType)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
test("model save/load: 1.0 to 2.0") {
val model = NaiveBayesSuite.binaryMultinomialModel
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
// Save model as version 1.0, load it back, and compare.
try {
val data = NaiveBayesModel.SaveLoadV1_0.Data(model.labels, model.pi, model.theta)
NaiveBayesModel.SaveLoadV1_0.save(sc, path, data)
val sameModel = NaiveBayesModel.load(sc, path)
assert(model.labels === sameModel.labels)
assert(model.pi === sameModel.pi)
assert(model.theta === sameModel.theta)
assert(model.modelType === Multinomial)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
class NaiveBayesClusterSuite extends SparkFunSuite with LocalClusterSparkContext {
test("task size should be small in both training and prediction") {
val m = 10
val n = 200000
val examples = sc.parallelize(0 until m, 2).mapPartitionsWithIndex { (idx, iter) =>
val random = new Random(idx)
iter.map { i =>
LabeledPoint(random.nextInt(2), Vectors.dense(Array.fill(n)(random.nextDouble())))
}
}
// If we serialize data directly in the task closure, the size of the serialized task
// would be greater than 1MB and hence Spark would throw an error.
val model = NaiveBayes.train(examples)
val predictions = model.predict(examples.map(_.features))
}
}
| gioenn/xSpark | mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala | Scala | apache-2.0 | 13,858 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.std
trait StdLib extends Library {
val math = MathLib
val structural = StructuralLib
val agg = AggLib
val identity = IdentityLib
val relations = RelationsLib
val set = SetLib
val array = ArrayLib
val string = StringLib
val date = DateLib
}
object StdLib extends StdLib
| drostron/quasar | frontend/src/main/scala/quasar/std/std.scala | Scala | apache-2.0 | 916 |
package edu.rice.habanero.benchmarks.concsll
import edu.rice.habanero.actors.{ScalazActor, ScalazActorState, ScalazPool}
import edu.rice.habanero.benchmarks.concsll.SortedListConfig.{DoWorkMessage, EndWorkMessage}
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner, PseudoRandom}
/**
*
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object SortedListScalazActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new SortedListScalazActorBenchmark)
}
private final class SortedListScalazActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
SortedListConfig.parseArgs(args)
}
def printArgInfo() {
SortedListConfig.printArgs()
}
def runIteration() {
val numWorkers: Int = SortedListConfig.NUM_ENTITIES
val numMessagesPerWorker: Int = SortedListConfig.NUM_MSGS_PER_WORKER
val master = new Master(numWorkers, numMessagesPerWorker)
master.start()
ScalazActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
if (lastIteration) {
ScalazPool.shutdown()
}
}
}
private class Master(numWorkers: Int, numMessagesPerWorker: Int) extends ScalazActor[AnyRef] {
private final val workers = new Array[ScalazActor[AnyRef]](numWorkers)
private final val sortedList = new SortedList()
private var numWorkersTerminated: Int = 0
override def onPostStart() {
sortedList.start()
var i: Int = 0
while (i < numWorkers) {
workers(i) = new Worker(this, sortedList, i, numMessagesPerWorker)
workers(i).start()
workers(i).send(DoWorkMessage.ONLY)
i += 1
}
}
override def process(msg: AnyRef) {
if (msg.isInstanceOf[SortedListConfig.EndWorkMessage]) {
numWorkersTerminated += 1
if (numWorkersTerminated == numWorkers) {
sortedList.send(EndWorkMessage.ONLY)
exit()
}
}
}
}
private class Worker(master: Master, sortedList: SortedList, id: Int, numMessagesPerWorker: Int) extends ScalazActor[AnyRef] {
private final val writePercent = SortedListConfig.WRITE_PERCENTAGE
private final val sizePercent = SortedListConfig.SIZE_PERCENTAGE
private var messageCount: Int = 0
private final val random = new PseudoRandom(id + numMessagesPerWorker + writePercent + sizePercent)
override def process(msg: AnyRef) {
messageCount += 1
if (messageCount <= numMessagesPerWorker) {
val anInt: Int = random.nextInt(100)
if (anInt < sizePercent) {
sortedList.send(new SortedListConfig.SizeMessage(this))
} else if (anInt < (sizePercent + writePercent)) {
sortedList.send(new SortedListConfig.WriteMessage(this, random.nextInt))
} else {
sortedList.send(new SortedListConfig.ContainsMessage(this, random.nextInt))
}
} else {
master.send(EndWorkMessage.ONLY)
exit()
}
}
}
private class SortedList extends ScalazActor[AnyRef] {
private[concsll] final val dataList = new SortedLinkedList[Integer]
override def process(msg: AnyRef) {
msg match {
case writeMessage: SortedListConfig.WriteMessage =>
val value: Int = writeMessage.value
dataList.add(value)
val sender = writeMessage.sender.asInstanceOf[ScalazActor[AnyRef]]
sender.send(new SortedListConfig.ResultMessage(this, value))
case containsMessage: SortedListConfig.ContainsMessage =>
val value: Int = containsMessage.value
val result: Int = if (dataList.contains(value)) 1 else 0
val sender = containsMessage.sender.asInstanceOf[ScalazActor[AnyRef]]
sender.send(new SortedListConfig.ResultMessage(this, result))
case readMessage: SortedListConfig.SizeMessage =>
val value: Int = dataList.size
val sender = readMessage.sender.asInstanceOf[ScalazActor[AnyRef]]
sender.send(new SortedListConfig.ResultMessage(this, value))
case _: SortedListConfig.EndWorkMessage =>
printf(BenchmarkRunner.argOutputFormat, "List Size", dataList.size)
exit()
case _ =>
System.err.println("Unsupported message: " + msg)
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/concsll/SortedListScalazActorBenchmark.scala | Scala | gpl-2.0 | 4,369 |
package rewriting.rules
import ir._
import ir.ast._
import lift.arithmetic.SizeVar
import opencl.executor.{Execute, TestWithExecutor}
import opencl.ir._
import org.junit.Assert._
import org.junit.Test
import rewriting.{Lower, Rewrite}
import scala.util.Random
object TestRemoveDuplicateZipArg extends TestWithExecutor
class TestRemoveDuplicateZipArg {
private val N = SizeVar("N")
private val arrayType = ArrayType(Float, N)
private val inputSize = 128
private val x = Array.tabulate(inputSize)(_ => Random.nextFloat())
private val y = Array.tabulate(inputSize)(_ => Random.nextFloat())
private def lowerExecuteAndCompare(original: Lambda, result: Lambda, args: Any*): Unit = {
val originalType = TypeChecker(original)
val resultType =TypeChecker(result)
assertEquals(originalType, resultType)
val loweredOriginal = Lower.sequential(original)
val loweredResult = Lower.sequential(result)
val (outputOriginal, _) = Execute()[Array[Float]](loweredOriginal, args:_*)
val (outputResult, _) = Execute()[Array[Float]](loweredResult, args:_*)
assertArrayEquals(outputOriginal, outputResult, 0.001f)
}
@Test
def zipResultEscapes0(): Unit = {
val f = \(arrayType,
in => Map(Id()) $ Zip(in, in)
)
TypeChecker(f)
assertFalse(SimplificationRules.removeDuplicateZipArg.isDefinedAt(f.body))
}
@Test
def zipResultEscapes1(): Unit = {
val f = \(arrayType,
in => Map(\(x => x)) $ Zip(in, in)
)
TypeChecker(f)
assertFalse(SimplificationRules.removeDuplicateZipArg.isDefinedAt(f.body))
}
@Test
def nothingShared(): Unit = {
val f = \(arrayType, arrayType,
(a, b) => Map(Id()) $ Zip(a, b)
)
TypeChecker(f)
assertFalse(SimplificationRules.removeDuplicateZipArg.isDefinedAt(f.body))
}
@Test
def noZipAfterRemovingDuplicate(): Unit = {
val f = \(arrayType,
in => Map(\(x => add(x._0, x._1))) $ Zip(in, in)
)
val result = Rewrite.applyRuleAtId(f, 0, SimplificationRules.removeDuplicateZipArg)
assertFalse(result.body.contains({ case FunCall(Zip(_), _*) => }))
lowerExecuteAndCompare(f, result, x)
}
@Test
def smallerZipAfterRemovingDuplicate(): Unit = {
val f = \(arrayType, arrayType,
(a, b) => Map(\(x => add(x._0, mult(x._1, x._2)))) $ Zip(a, b, a)
)
val result = Rewrite.applyRuleAtId(f, 0, SimplificationRules.removeDuplicateZipArg)
assertFalse(result.body.contains({ case FunCall(Zip(3), _*) => }))
assertTrue(result.body.contains({ case FunCall(Zip(2), _*) => }))
lowerExecuteAndCompare(f, result, x, y)
}
@Test
def moreComplicatedDuplicationAllowed(): Unit = {
val f = \(arrayType,
in => Map(\(x =>
Map(\(x =>
add(x._0, x._1)
)) $ Zip(x._0, x._1)
)) $ Zip(Split(64) $ in, Split(64) $ in)
)
val result = Rewrite.applyRuleUntilCannot(f, SimplificationRules.removeDuplicateZipArg)
assertFalse(result.body.contains({ case FunCall(Zip(_), _*) => }))
lowerExecuteAndCompare(f, result, x)
}
@Test
def moreComplicatedDuplicationCantTell(): Unit = {
// TODO: No equality checking for arbitrary Lambdas/Expr.
// TODO: Need to check usage of Params and whatnot
val f = \(arrayType,
in => Map(\(x => add(x._0, x._1))) $ Zip(Map(plusOne) $ in, Map(plusOne) $ in)
)
TypeChecker(f)
assertFalse(SimplificationRules.removeDuplicateZipArg.isDefinedAt(f.body))
}
@Test
def moreComplicatedNoDuplication(): Unit = {
val f = \(arrayType, Float,
(a, b) => Map(\(x =>
add(x._0, x._1)
)) $ Zip(Map(\(_ => plusOne(b))) $ a, Map(plusOne) $ a)
)
TypeChecker(f)
assertFalse(SimplificationRules.removeDuplicateZipArg.isDefinedAt(f.body))
}
}
| lift-project/lift | src/test/rewriting/rules/TestRemoveDuplicateZipArg.scala | Scala | mit | 3,771 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.api.java.JavaSparkContext._
import org.apache.spark.api.java.function.{Function2 => JFunction2}
import org.apache.spark.rdd.NewHadoopRDD
@DeveloperApi
class JavaNewHadoopRDD[K, V](rdd: NewHadoopRDD[K, V])
(implicit override val kClassTag: ClassTag[K], implicit override val vClassTag: ClassTag[V])
extends JavaPairRDD[K, V](rdd) {
/** Maps over a partition, providing the InputSplit that was used as the base of the partition. */
@DeveloperApi
def mapPartitionsWithInputSplit[R](
f: JFunction2[InputSplit, java.util.Iterator[(K, V)], java.util.Iterator[R]],
preservesPartitioning: Boolean = false): JavaRDD[R] = {
new JavaRDD(rdd.mapPartitionsWithInputSplit((a, b) => f.call(a, b.asJava).asScala,
preservesPartitioning)(fakeClassTag))(fakeClassTag)
}
}
| aokolnychyi/spark | core/src/main/scala/org/apache/spark/api/java/JavaNewHadoopRDD.scala | Scala | apache-2.0 | 1,819 |
package games.utils
import games.Quoridor
import scala.collection.mutable.ArrayBuffer
import algorithms._
import scala.math._
import java.util.Random
object QuoridorUtils {
/**
* Get children nodes for a new node
*/
def getchildNodes(board:Quoridor, player:Int): ArrayBuffer[(String, Int, Int)] = {
getSimulationMoves(board, player)
}
def get_moves(board:Quoridor, player:Int): ArrayBuffer[(String, Int, Int)] = {
getSimulationMoves(board, player)
}
def getSimulationMoves(board:Quoridor, player:Int):ArrayBuffer[(String, Int, Int)] = {
val opponent:Int = (player + 1) % 2
var path = QuoridorUtils.doBFSMoves(board, opponent) // get path of opponents
var mypath = QuoridorUtils.doBFSMoves(board, player) // my path
var moves: ArrayBuffer[(String, Int, Int)] = new ArrayBuffer[(String, Int, Int)]()
// check if BFS exists for both path
//println("Get simulation moves - Pawn Start")
if (path != null && mypath != null) {
var way:ArrayBuffer[(Int, Int)] = new ArrayBuffer[(Int, Int)]()
var myway:ArrayBuffer[(Int, Int)] = new ArrayBuffer[(Int, Int)]()
while (path != null) {
way += ((path.id._1, path.id._2))
path = path.previous
}
way = way.reverse
while (mypath != null) {
myway += ((mypath.id._1, mypath.id._2))
mypath = mypath.previous
}
myway = myway.reverse
if (myway.length > 1) {
moves += (("P", myway(1)._1, myway(1)._2))
}
// consider walls
considerWallMoves(way, myway.length, board, player).foreach(f => { moves += f})
//considerWallMovesAroundPawnOnly(board, player).foreach(f => { moves += f})
}
else
board.getLegalPawnMoves(player).foreach(f => { moves += f })
//print(moves.length + " ")
//println("Get simulation moves - End")
moves
}
def considerWallMoves(oppway:ArrayBuffer[(Int, Int)], myway:Int, board:Quoridor, player:Int):ArrayBuffer[(String, Int, Int)] = {
var moves: ArrayBuffer[(String, Int, Int)] = new ArrayBuffer[(String, Int, Int)]()
var limit:Int = oppway.length
if (oppway.length > 1)
limit = 1
// along the path of opponent
for (i <- 0 until limit) {
val x:Int = oppway(i)._1
val y:Int = oppway(i)._2
// defines positions
val positions:Array[(Int, Int)] = Array(
//(x - 2, y - 2),
//(x - 2, y - 1),
//(x - 2, y),
//(x - 2, y + 1),
(x - 1, y - 2),
(x - 1, y - 1),
(x - 1, y),
(x - 1, y + 1),
(x, y - 2),
(x, y - 1),
(x, y),
(x, y + 1),
(x + 1, y - 2),
(x + 1, y - 1),
(x + 1, y),
(x + 1, y + 1)
//(x + 2, y - 2),
//(x + 2, y - 1),
//(x + 2, y),
//(x + 2, y + 1)
)
// if player still has walls left
if (board.nbWalls(player) > 0) {
for (item <- positions) {
// if still inside the ground
if (item._1 >= 0 && item._1 < board.size - 1 && item._2 >= 0 && item._2 < board.size - 1) {
// horizon
if (!moves.exists(_ == (("WH", item._1, item._2)))) {
if (board.isWallPossibleHere((item._1, item._2), true))
moves.append(("WH", item._1, item._2))
}
// vertical
if (!moves.exists(_ == (("WV", item._1, item._2)))) {
if (board.isWallPossibleHere((item._1, item._2), false))
moves.append(("WV", item._1, item._2))
}
}
}
}
}
moves
}
def considerWallMovesAroundPawnOnly(board:Quoridor, player:Int):ArrayBuffer[(String, Int, Int)] = {
var moves: ArrayBuffer[(String, Int, Int)] = new ArrayBuffer[(String, Int, Int)]()
// along the path of opponent
val x:Int = board.pawns((player + 1) % 2)._1
val y:Int = board.pawns((player + 1) % 2)._2
// defines positions
val positions:Array[(Int, Int)] = Array(
(x - 2, y - 2),
(x - 2, y - 1),
(x - 2, y),
(x - 2, y + 1),
(x - 1, y - 2),
(x - 1, y - 1),
(x - 1, y),
(x - 1, y + 1),
(x, y - 2),
(x, y - 1),
(x, y),
(x, y + 1),
(x + 1, y - 2),
(x + 1, y - 1),
(x + 1, y),
(x + 1, y + 1),
(x + 2, y - 2),
(x + 2, y - 1),
(x + 2, y),
(x + 2, y + 1)
)
// if player still has walls left
if (board.nbWalls(player) > 0) {
for (item <- positions) {
// if still inside the ground
if (item._1 >= 0 && item._1 < board.size - 1 && item._2 >= 0 && item._2 < board.size - 1) {
// horizon
if (!moves.exists(_ == (("WH", item._1, item._2)))) {
if (board.isWallPossibleHere((item._1, item._2), true))
moves.append(("WH", item._1, item._2))
}
// vertical
if (!moves.exists(_ == (("WV", item._1, item._2)))) {
if (board.isWallPossibleHere((item._1, item._2), false))
moves.append(("WV", item._1, item._2))
}
}
}
}
moves
}
def isMoveBetter(opppath:Int, mypath:Int, newBoard:Quoridor):Boolean = {
false
}
def doBFSMoves(board:Quoridor, player:Int):Vertex = {
var start:Vertex = new Vertex(board.pawns(player), 1)
var visited:ArrayBuffer[(Int, Int)] = new ArrayBuffer[(Int, Int)]()
var queue = new Queue
queue.push(start)
while (!queue.isEmpty) {
var node:Vertex = queue.pop
// check for termination
if (node.id._1 == board.goals(player))
return node
// add to visited coordinates
visited += Pair(node.id._1, node.id._2)
// get all surrounding moves
var moves:ArrayBuffer[(Int, Int)] = board.getLegalMovesFromPoint(node.id, player)
moves.foreach(item => {
// if not visited
if (visited.find(_ == item) == None) {
// add to queue
var newVertex = new Vertex(item, 1)
newVertex.previous = node
queue.push(newVertex)
}
})
}
//println("NULL BFS!")
null
}
/**
* This method gets:
* - One random wall placement
* - All the legal pawn moves
*/
def getRandomActions(board:Quoridor, player:Int):ArrayBuffer[(String, Int, Int)] = {
/**
* val rand = new Random(System.currentTimeMillis())
var random_index = rand.nextInt(rollmove.length)
*/
// flag to stop until a correct random move is found
var isFoundAMove:Boolean = false
// a move to be returned
var move:(String, Int, Int) = ("P", -1, -1)
// array of moves
var moves:ArrayBuffer[(String, Int, Int)] = new ArrayBuffer[(String, Int, Int)]()
// pawns
var myway:ArrayBuffer[(Int, Int)] = new ArrayBuffer[(Int, Int)]()
var mypath = QuoridorUtils.doBFSMoves(board, player) // my path
if (mypath != null) {
while (mypath != null) {
myway += ((mypath.id._1, mypath.id._2))
mypath = mypath.previous
}
myway = myway.reverse
if (myway.length > 1)
moves += (("P", myway(1)._1, myway(1)._2))
}
else {
board.getLegalPawnMoves(player).foreach(f => {moves += f})
}
// board.getLegalPawnMoves(player).foreach(f => {moves += f})
//println("Get random moves! Start wall placement")
// wall placement
var counter:Int = 0 // a counter to make sure the loop will not run forever
while (isFoundAMove == false && board.nbWalls(player) > 0 && counter < 100) {
// random seed
var rand = new Random(System.currentTimeMillis())
// random variables
var random_wall_type = rand.nextInt(2)
var random_x = rand.nextInt(9)
var random_y = rand.nextInt(9)
var random_wall_string:String = "WH"
var random_wall_isHorizon:Boolean = true
if (random_wall_type != 1) {
random_wall_string = "WV"
random_wall_isHorizon = false
}
// forming the move
move = (random_wall_string, random_x, random_y)
//println("Trying " + counter + ":" + move)
// check wall possible here
if (board.isWallPossibleHere( (random_x, random_y), random_wall_isHorizon)) {
// stop finding
isFoundAMove = true
}
counter += 1
}
// if the isFoundAMove still be false, meaning it can't find a wall placement by random,
// we will use get all legal wall moves
if (isFoundAMove == false) {
board.getLegalWallMoves(player).foreach(wall => {moves += wall})
}
//println("Get random moves! Wall placement done!")
moves
}
} | minhprg/mcts_in_quoridor | src/main/scala/games/utils/QuoridorUtils.scala | Scala | gpl-2.0 | 8,626 |
package de.hska.wifl1011.seminararbeit
import scala.concurrent.{ future, Future }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{ Failure, Success }
import scala.util.Random
object Main {
def main(args: Array[String]) {
val rateDollar: Future[Double] =
Helper.getExchangeRateByFuture("US-Dollar")
val rateFranc: Future[Double] =
Helper.getExchangeRateByFuture("Swiss franc")
val selectedRate = rateDollar.fallbackTo(rateFranc)
val bookAccommodation = selectedRate.map {
rate => {
println("Got exchange-rate. It is " + rate)
if (Helper.isExchangeRateAcceptable(rate)) {
println("Book accommodation")
Helper.bookAccommodationOnline()
} else {
throw new Exception("Rate not acceptable")
}
}
}
val bookFlight = bookAccommodation.map {
successful => {
println("Book flight")
Helper.bookFlightOnline()
}
}
bookFlight.onComplete {
case Success(_) => println("Flight booked")
case Failure(ex) => println(ex.getMessage)
}
// do some unrelated work
Helper.packSuitcase()
// keep the jvm running
Thread.sleep(3000)
}
}
| fwilhe/hska-seminararbeit | code/holiday/Main.scala | Scala | mit | 1,257 |
package basic._03.stmt_for
import org.junit.Test
import org.junit.Assert._
import org.hamcrest.core.Is._
import scala.collection.JavaConversions._
/**
* Scalaの for文は、以下の処理機構を持っている。
* - フィルタ処理
* - 変換処理 (変数への束縛)
* - リスト変換
*/
class For_Test_S {
/**
* [Javaとの対比]
* for文 (index指定)
*
* シナリオ: 国名が複数格納された Listを CSV形式の文字列に変換する。
* 着眼点: get(i)部分で indexによる指定を行っている。自由度が高い記述法のためバグを生む余地がある。
*/
@Test
def testForIndex(): Unit = {
val sb = new StringBuilder();
val countryList = List("Japan", "UnitedStates", "UnitedKingdom", "France", "China", "Geraman");
for (i <- 0 until countryList.size) {
val country = countryList(i);
sb.append(country);
if (i != countryList.size -1) {
sb.append(", ");
}
}
assertThat(sb.toString(), is("Japan, UnitedStates, UnitedKingdom, France, China, Geraman"));
}
/**
* [Javaとの対比]
* for文 (Iterator指定-whileにて代替)
*
* シナリオ: 国名が複数格納された Listを CSV形式の文字列に変換する。
* 着眼点: forループ内にJavaのそれの様な(定義;判定;後処理)の区画が無い為、Iteratorを上手く利用することが出来ない。
*/
@Test
def testForInterator(): Unit = {
val sb = new StringBuilder();
val countryList = List("Japan", "UnitedStates", "UnitedKingdom", "France", "China", "Geraman");
val ite = countryList.iterator
// for文での記述は不可のため while文を利用
while (ite.hasNext) {
val country = ite.next();
sb.append(country);
if (ite.hasNext) {
sb.append(", ");
}
}
assertThat(sb.toString(), is("Japan, UnitedStates, UnitedKingdom, France, China, Geraman"));
}
/**
* [Javaとの対比]
* for文 (拡張 for文)
*
* シナリオ: 国名が複数格納された Listを CSV形式の文字列に変換する。
* 着眼点: 構文レベルで Iteratorを隠蔽している。 尚、要素の位置は判定出来ない。
*/
@Test
def testForIterable(): Unit = {
val SEP = ", ";
val sb = new StringBuilder();
val countryList = List("Japan", "UnitedStates", "UnitedKingdom", "France", "China", "Geraman");
for (country <- countryList) {
sb.append(country);
sb.append(SEP);
}
// 最後に", "を削る処理が必要
sb.delete(sb.lastIndexOf(SEP), sb.length());
assertThat(sb.toString(), is("Japan, UnitedStates, UnitedKingdom, France, China, Geraman"));
}
/**
* [Javaとの対比]
* for文 (for文と同等の処理を Java8(JoinAPI)にて実現)
*
* シナリオ: 国名が複数格納された Listを CSV形式の文字列に変換する。
* 着眼点: 用途に適合したライブラリの利用により最適化
*/
@Test
def testForStream(): Unit = {
val countryList = List("Japan", "UnitedStates", "UnitedKingdom", "France", "China", "Geraman");
val countryCSV = String.join(", ", countryList);
assertThat(countryCSV, is("Japan, UnitedStates, UnitedKingdom, France, China, Geraman"));
}
/**
* [Javaとの対比]
* フィルタリング処理 (for文)
*
* シナリオ: 国名が複数格納された Listを元に、Uから始まる国名のリストを生成する。
* 着眼点: for文による表現だが、Scalaには次の特徴があるので簡易的な表現が可能。
* - forの文の内部にif条件を組み込める
* - yieldにより式として扱うことができる
*/
@Test
def testForFilteredByScalasFor(): Unit = {
val countryList = List("Japan", "UnitedStates", "UnitedKingdom", "France", "China", "Geraman");
val countryStartWithU = for (country <- countryList; if country.startsWith("U")) yield country
assertThat(countryStartWithU, is(List("UnitedStates", "UnitedKingdom")));
}
/**
* [Javaとの対比]
* フィルタリング処理 (for文からの脱却)
*
* シナリオ: 国名が複数格納された Listを元に、Uから始まる国名のリストを生成する。
* 着眼点: 関数を利用することで、表現したい意味に近いコードとなっている。
* Javaと異なり、stream()宣言、collect(x)処理は不要。
*/
@Test
def testForFilteredByFilter(): Unit = {
val countryList = List("Japan", "UnitedStates", "UnitedKingdom", "France", "China", "Geraman");
val countryStartWithU = countryList.filter { country => country.startsWith("U") }
assertThat(countryStartWithU, is(List("UnitedStates", "UnitedKingdom")));
}
} | koooyooo/scala-java-comparison | scala-java-comparison/src/basic/_03/stmt_for/For_Test_S.scala | Scala | apache-2.0 | 5,001 |
package sbt
package std
import language.experimental.macros
import scala.reflect._
import reflect.macros._
import reflect.internal.annotations.compileTimeOnly
import Def.{ Initialize, ScopedKey }
import appmacro.ContextUtil
import complete.Parser
/** Implementation detail. The wrap methods temporarily hold inputs (as a Tree, at compile time) until a task or setting macro processes it. */
object InputWrapper {
/* The names of the wrapper methods should be obscure.
* Wrapper checking is based solely on this name, so it must not conflict with a user method name.
* The user should never see this method because it is compile-time only and only used internally by the task macro system.*/
private[std] final val WrapTaskName = "wrapTask_\\u2603\\u2603"
private[std] final val WrapInitName = "wrapInit_\\u2603\\u2603"
private[std] final val WrapInitTaskName = "wrapInitTask_\\u2603\\u2603"
private[std] final val WrapInitInputName = "wrapInitInputTask_\\u2603\\u2603"
private[std] final val WrapInputName = "wrapInputTask_\\u2603\\u2603"
private[std] final val WrapPreviousName = "wrapPrevious_\\u2603\\u2603"
@compileTimeOnly("`value` can only be called on a task within a task definition macro, such as :=, +=, ++=, or Def.task.")
def wrapTask_\\u2603\\u2603[T](in: Any): T = implDetailError
@compileTimeOnly("`value` can only be used within a task or setting macro, such as :=, +=, ++=, Def.task, or Def.setting.")
def wrapInit_\\u2603\\u2603[T](in: Any): T = implDetailError
@compileTimeOnly("`value` can only be called on a task within a task definition macro, such as :=, +=, ++=, or Def.task.")
def wrapInitTask_\\u2603\\u2603[T](in: Any): T = implDetailError
@compileTimeOnly("`value` can only be called on an input task within a task definition macro, such as := or Def.inputTask.")
def wrapInputTask_\\u2603\\u2603[T](in: Any): T = implDetailError
@compileTimeOnly("`value` can only be called on an input task within a task definition macro, such as := or Def.inputTask.")
def wrapInitInputTask_\\u2603\\u2603[T](in: Any): T = implDetailError
@compileTimeOnly("`previous` can only be called on a task within a task or input task definition macro, such as :=, +=, ++=, Def.task, or Def.inputTask.")
def wrapPrevious_\\u2603\\u2603[T](in: Any): T = implDetailError
private[this] def implDetailError = sys.error("This method is an implementation detail and should not be referenced.")
private[std] def wrapTask[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
wrapImpl[T, InputWrapper.type](c, InputWrapper, WrapTaskName)(ts, pos)
private[std] def wrapInit[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
wrapImpl[T, InputWrapper.type](c, InputWrapper, WrapInitName)(ts, pos)
private[std] def wrapInitTask[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
wrapImpl[T, InputWrapper.type](c, InputWrapper, WrapInitTaskName)(ts, pos)
private[std] def wrapInitInputTask[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
wrapImpl[T, InputWrapper.type](c, InputWrapper, WrapInitInputName)(ts, pos)
private[std] def wrapInputTask[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
wrapImpl[T, InputWrapper.type](c, InputWrapper, WrapInputName)(ts, pos)
private[std] def wrapPrevious[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[Option[T]] =
wrapImpl[Option[T], InputWrapper.type](c, InputWrapper, WrapPreviousName)(ts, pos)
// TODO 2.11 Remove this after dropping 2.10.x support.
private object HasCompat { val compat = ??? }; import HasCompat._
/**
* Wraps an arbitrary Tree in a call to the `<s>.<wrapName>` method of this module for later processing by an enclosing macro.
* The resulting Tree is the manually constructed version of:
*
* `c.universe.reify { <s>.<wrapName>[T](ts.splice) }`
*/
def wrapImpl[T: c.WeakTypeTag, S <: AnyRef with Singleton](c: Context, s: S, wrapName: String)(ts: c.Expr[Any], pos: c.Position)(implicit it: c.TypeTag[s.type]): c.Expr[T] =
{
import c.universe.{ Apply => ApplyTree, _ }
import compat._
val util = new ContextUtil[c.type](c)
val iw = util.singleton(s)
val tpe = c.weakTypeOf[T]
val nme = newTermName(wrapName).encoded
val sel = Select(Ident(iw), nme)
sel.setPos(pos) // need to set the position on Select, because that is where the compileTimeOnly check looks
val tree = ApplyTree(TypeApply(sel, TypeTree(tpe) :: Nil), ts.tree :: Nil)
tree.setPos(ts.tree.pos)
// JZ: I'm not sure why we need to do this. Presumably a caller is wrapping this tree in a
// typed tree *before* handing the whole thing back to the macro engine. One must never splice
// untyped trees under typed trees, as the type checker doesn't descend if `tree.tpe == null`.
//
// #1031 The previous attempt to fix this just set the type on `tree`, which worked in cases when the
// call to `.value` was inside a the task macro and eliminated before the end of the typer phase.
// But, if a "naked" call to `.value` left the typer, the superaccessors phase would freak out when
// if hit the untyped trees, before we could get to refchecks and the desired @compileTimeOnly warning.
val typedTree = c.typeCheck(tree)
c.Expr[T](typedTree)
}
def valueMacroImpl[T: c.WeakTypeTag](c: Context): c.Expr[T] =
ContextUtil.selectMacroImpl[T](c) { (ts, pos) =>
val tpe = ts.tree.tpe
if (tpe <:< c.weakTypeOf[Initialize[Task[T]]])
InputWrapper.wrapInitTask[T](c)(ts, pos)
else if (tpe <:< c.weakTypeOf[Initialize[T]])
InputWrapper.wrapInit[T](c)(ts, pos)
else if (tpe <:< c.weakTypeOf[Task[T]])
InputWrapper.wrapTask[T](c)(ts, pos)
else if (tpe <:< c.weakTypeOf[InputTask[T]])
InputWrapper.wrapInputTask[T](c)(ts, pos)
else if (tpe <:< c.weakTypeOf[Initialize[InputTask[T]]])
InputWrapper.wrapInitInputTask[T](c)(ts, pos)
else
c.abort(pos, s"Internal sbt error. Unexpected type ${tpe.widen}")
}
def taskValueMacroImpl[T: c.WeakTypeTag](c: Context): c.Expr[Task[T]] =
ContextUtil.selectMacroImpl[Task[T]](c) { (ts, pos) =>
val tpe = ts.tree.tpe
if (tpe <:< c.weakTypeOf[Initialize[Task[T]]])
InputWrapper.wrapInit[Task[T]](c)(ts, pos)
else
c.abort(pos, s"Internal sbt error. Unexpected type ${tpe.widen}")
}
/** Translates <task: TaskKey[T]>.previous(format) to Previous.runtime(<task>)(format).value*/
def previousMacroImpl[T: c.WeakTypeTag](c: Context)(format: c.Expr[sbinary.Format[T]]): c.Expr[Option[T]] =
{
import c.universe._
c.macroApplication match {
case a @ Apply(Select(Apply(_, t :: Nil), tp), fmt) =>
if (t.tpe <:< c.weakTypeOf[TaskKey[T]]) {
val tsTyped = c.Expr[TaskKey[T]](t)
val newTree = c.universe.reify { Previous.runtime[T](tsTyped.splice)(format.splice) }
wrapPrevious[T](c)(newTree, a.pos)
} else
c.abort(a.pos, s"Internal sbt error. Unexpected type ${t.tpe.widen}")
case x => ContextUtil.unexpectedTree(x)
}
}
}
sealed abstract class MacroTaskValue[T] {
@compileTimeOnly("`taskValue` can only be used within a setting macro, such as :=, +=, ++=, or Def.setting.")
def taskValue: Task[T] = macro InputWrapper.taskValueMacroImpl[T]
}
sealed abstract class MacroValue[T] {
@compileTimeOnly("`value` can only be used within a task or setting macro, such as :=, +=, ++=, Def.task, or Def.setting.")
def value: T = macro InputWrapper.valueMacroImpl[T]
}
sealed abstract class ParserInput[T] {
@compileTimeOnly("`parsed` can only be used within an input task macro, such as := or Def.inputTask.")
def parsed: T = macro ParserInput.parsedMacroImpl[T]
}
sealed abstract class InputEvaluated[T] {
@compileTimeOnly("`evaluated` can only be used within an input task macro, such as := or Def.inputTask.")
def evaluated: T = macro InputWrapper.valueMacroImpl[T]
}
sealed abstract class ParserInputTask[T] {
@compileTimeOnly("`parsed` can only be used within an input task macro, such as := or Def.inputTask.")
def parsed: Task[T] = macro ParserInput.parsedInputMacroImpl[T]
}
sealed abstract class MacroPrevious[T] {
@compileTimeOnly("`previous` can only be used within a task macro, such as :=, +=, ++=, or Def.task.")
def previous(implicit format: sbinary.Format[T]): Option[T] = macro InputWrapper.previousMacroImpl[T]
}
/** Implementation detail. The wrap method temporarily holds the input parser (as a Tree, at compile time) until the input task macro processes it. */
object ParserInput {
/* The name of the wrapper method should be obscure.
* Wrapper checking is based solely on this name, so it must not conflict with a user method name.
* The user should never see this method because it is compile-time only and only used internally by the task macros.*/
private[std] val WrapName = "parser_\\u2603\\u2603"
private[std] val WrapInitName = "initParser_\\u2603\\u2603"
@compileTimeOnly("`parsed` can only be used within an input task macro, such as := or Def.inputTask.")
def parser_\\u2603\\u2603[T](i: Any): T = sys.error("This method is an implementation detail and should not be referenced.")
@compileTimeOnly("`parsed` can only be used within an input task macro, such as := or Def.inputTask.")
def initParser_\\u2603\\u2603[T](i: Any): T = sys.error("This method is an implementation detail and should not be referenced.")
private[std] def wrap[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
InputWrapper.wrapImpl[T, ParserInput.type](c, ParserInput, WrapName)(ts, pos)
private[std] def wrapInit[T: c.WeakTypeTag](c: Context)(ts: c.Expr[Any], pos: c.Position): c.Expr[T] =
InputWrapper.wrapImpl[T, ParserInput.type](c, ParserInput, WrapInitName)(ts, pos)
private[std] def inputParser[T: c.WeakTypeTag](c: Context)(t: c.Expr[InputTask[T]]): c.Expr[State => Parser[Task[T]]] = c.universe.reify(t.splice.parser)
def parsedInputMacroImpl[T: c.WeakTypeTag](c: Context): c.Expr[Task[T]] =
ContextUtil.selectMacroImpl[Task[T]](c) { (p, pos) =>
import c.universe.reify
val tpe = p.tree.tpe
if (tpe <:< c.weakTypeOf[InputTask[T]]) {
val e = c.Expr[InputTask[T]](p.tree)
wrap[Task[T]](c)(inputParser(c)(e), pos)
} else if (tpe <:< c.weakTypeOf[Initialize[InputTask[T]]]) {
val e = c.Expr[Initialize[InputTask[T]]](p.tree)
wrapInit[Task[T]](c)(reify { Def.toIParser(e.splice) }, pos)
} else
c.abort(pos, s"Internal sbt error. Unexpected type ${tpe.normalize} in parsedInputMacroImpl.")
}
/** Implements `Parser[T].parsed` by wrapping the Parser with the ParserInput wrapper.*/
def parsedMacroImpl[T: c.WeakTypeTag](c: Context): c.Expr[T] =
ContextUtil.selectMacroImpl[T](c) { (p, pos) =>
import c.universe.reify
val tpe = p.tree.tpe
if (tpe <:< c.weakTypeOf[Parser[T]]) {
val e = c.Expr[Parser[T]](p.tree)
wrap[T](c)(reify { Def.toSParser(e.splice) }, pos)
} else if (tpe <:< c.weakTypeOf[State => Parser[T]])
wrap[T](c)(p, pos)
else if (tpe <:< c.weakTypeOf[Initialize[Parser[T]]]) {
val e = c.Expr[Initialize[Parser[T]]](p.tree)
val es = reify { Def.toISParser(e.splice) }
wrapInit[T](c)(es, pos)
} else if (tpe <:< c.weakTypeOf[Initialize[State => Parser[T]]])
wrapInit[T](c)(p, pos)
else
c.abort(pos, s"Internal sbt error. Unexpected type ${tpe.normalize} in parsedMacroImpl")
}
}
| pdalpra/sbt | main/settings/src/main/scala/sbt/std/InputWrapper.scala | Scala | bsd-3-clause | 11,752 |
package org.scaladebugger.api.profiles.swappable.requests.threads
import org.scaladebugger.api.lowlevel.JDIArgument
import org.scaladebugger.api.lowlevel.threads.ThreadStartRequestInfo
import org.scaladebugger.api.pipelines.Pipeline.IdentityPipeline
import org.scaladebugger.api.profiles.swappable.SwappableDebugProfileManagement
import org.scaladebugger.api.profiles.traits.requests.threads.ThreadStartRequest
import scala.util.Try
/**
* Represents a swappable profile for thread start events that redirects the
* invocation to another profile.
*/
trait SwappableThreadStartRequest extends ThreadStartRequest {
this: SwappableDebugProfileManagement =>
override def tryGetOrCreateThreadStartRequestWithData(
extraArguments: JDIArgument*
): Try[IdentityPipeline[ThreadStartEventAndData]] = {
withCurrentProfile.tryGetOrCreateThreadStartRequestWithData(extraArguments: _*)
}
override def isThreadStartRequestWithArgsPending(
extraArguments: JDIArgument*
): Boolean = {
withCurrentProfile.isThreadStartRequestWithArgsPending(extraArguments: _*)
}
override def removeThreadStartRequestWithArgs(
extraArguments: JDIArgument*
): Option[ThreadStartRequestInfo] = {
withCurrentProfile.removeThreadStartRequestWithArgs(extraArguments: _*)
}
override def removeAllThreadStartRequests(): Seq[ThreadStartRequestInfo] = {
withCurrentProfile.removeAllThreadStartRequests()
}
override def threadStartRequests: Seq[ThreadStartRequestInfo] = {
withCurrentProfile.threadStartRequests
}
}
| chipsenkbeil/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/swappable/requests/threads/SwappableThreadStartRequest.scala | Scala | apache-2.0 | 1,541 |
import sbt._
object Library {
// Versions
val akkaVersion = "2.2.1"
val sprayVersion = "1.2-20131004"
// Libraries
// Akka related
val akkaActor = "com.typesafe.akka" %% "akka-actor" % akkaVersion
// Spray related
val sprayRouting = "io.spray" % "spray-routing" % sprayVersion
val sprayCan = "io.spray" % "spray-can" % sprayVersion
val sprayHttp = "io.spray" % "spray-http" % sprayVersion
val sprayHttpX = "io.spray" % "spray-httpx" % sprayVersion
val sprayIO = "io.spray" % "spray-io" % sprayVersion
val sprayUtil = "io.spray" % "spray-util" % sprayVersion
val sprayJson = "io.spray" %% "spray-json" % "1.2.5"
// slf4j
// val slf4j = "org.slf4j" % "slf4j-simple" % "1.7.5",
val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % akkaVersion
lazy val logback = "ch.qos.logback" % "logback-classic" % "1.0.7"
// Databases
val mongoDb = "org.mongodb" %% "casbah" % "2.6.3"
val salat = "com.novus" %% "salat" % "1.9.2"
// Tests
val specs2 = "org.specs2" %% "specs2" % "2.2.3" % "test"
val scalaCheck = "org.scalacheck" %% "scalacheck" % "1.10.1" % "test"
val mockito = "org.mockito" % "mockito-all" % "1.9.5"
val akkaTestKit = "com.typesafe.akka" %% "akka-testkit" % akkaVersion
val sprayTestKit = "io.spray" % "spray-testkit" % sprayVersion % "test"
}
object Dependencies {
import Library._
val add = List (
akkaActor,
sprayRouting,
sprayCan,
sprayHttp,
sprayHttpX,
sprayIO,
sprayUtil,
sprayJson,
// slf4j,
akkaSlf4j,
logback,
mongoDb,
salat,
specs2,
scalaCheck,
mockito,
akkaTestKit,
sprayTestKit
)
}
| skaphe/math-micro-services | add/project/Dependencies.scala | Scala | mit | 1,641 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.dataload
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.sql.test.util.QueryTest
import org.apache.carbondata.processing.exception.DataLoadingException
class TestLoadDataUseAllDictionary extends QueryTest with BeforeAndAfterAll{
override def beforeAll {
sql("DROP TABLE IF EXISTS t3")
sql("""
CREATE TABLE IF NOT EXISTS t3
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'carbondata'
TBLPROPERTIES('dictionary_include'='country,name,phonetype,serialname')
""")
}
test("test load data use all dictionary, and given wrong format dictionary values") {
try {
sql(s"""
LOAD DATA LOCAL INPATH '$resourcesPath/source_without_header.csv' into table t3
options('FILEHEADER'='id,date,country,name,phonetype,serialname,salary',
'All_DICTIONARY_PATH'='$resourcesPath/dict.txt','single_pass'='true')
""")
assert(false)
} catch {
case e: DataLoadingException =>
assert(e.getMessage.equals("Data Loading failure, dictionary values are " +
"not in correct format!"))
}
}
override def afterAll {
sql("DROP TABLE IF EXISTS t3")
}
}
| sgururajshetty/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataUseAllDictionary.scala | Scala | apache-2.0 | 2,129 |
import sbt._
import Keys._
import com.typesafe.sbt.pgp.PgpKeys._
object PublishSettings {
type Sett = Project.Setting[_]
lazy val all = Seq[Sett](
pom
, publish
, publishMavenStyle := true
, publishArtifact in Test := false
, pomIncludeRepository := { _ => false }
, licenses := Seq("BSD-3-Clause" -> url("http://www.opensource.org/licenses/BSD-3-Clause"))
, homepage := Some(url("https://github.com/NICTA/trackfunction"))
, useGpg := true
)
lazy val pom: Sett =
pomExtra := (
<scm>
<url>[email protected]:NICTA/trackfunction.git</url>
<connection>scm:[email protected]:NICTA/trackfunction.git</connection>
</scm>
<developers>
<developer>
<id>tonymorris</id>
<name>Tony Morris</name>
<url>http://tmorris.net</url>
</developer>
</developers>
)
lazy val publish: Sett =
publishTo <<= version.apply(v => {
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
})
}
| NICTA/trackfunction | project/PublishSettings.scala | Scala | bsd-3-clause | 1,177 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.time.{Millis, Span}
class UnpersistSuite extends SparkFunSuite with LocalSparkContext with TimeLimits {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
implicit val defaultSignaler: Signaler = ThreadSignaler
test("unpersist RDD") {
sc = new SparkContext("local", "test")
val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
rdd.count
assert(sc.persistentRdds.isEmpty === false)
rdd.unpersist(blocking = true)
assert(sc.persistentRdds.isEmpty === true)
failAfter(Span(3000, Millis)) {
try {
while (! sc.getRDDStorageInfo.isEmpty) {
Thread.sleep(200)
}
} catch {
case _: Throwable => Thread.sleep(10)
// Do nothing. We might see exceptions because block manager
// is racing this thread to remove entries from the driver.
}
}
assert(sc.getRDDStorageInfo.isEmpty === true)
}
}
| WindCanDie/spark | core/src/test/scala/org/apache/spark/UnpersistSuite.scala | Scala | apache-2.0 | 1,850 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.integration.spark.testsuite.dataload
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
/**
* Test Class for data loading when there are null measures in data
*
*/
class TestLoadDataWithEmptyArrayColumns extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists nest13")
sql("""
CREATE TABLE nest13 (imei string,age int,
productdate timestamp,gamePointId double,
reserved6 array<string>,mobile struct<poc:string, imsi:int>)
STORED AS carbondata
""")
}
test("test carbon table data loading when there are empty array columns in data") {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT
)
sql(
s"""
LOAD DATA inpath '$resourcesPath/arrayColumnEmpty.csv'
into table nest13 options ('DELIMITER'=',', 'complex_delimiter_level_1'='/')
"""
)
checkAnswer(
sql("""
SELECT count(*) from nest13
"""),
Seq(Row(20)))
}
override def afterAll {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
sql("drop table nest13")
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataWithEmptyArrayColumns.scala | Scala | apache-2.0 | 2,371 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.api.{TableConfig, TableException}
import org.apache.flink.table.catalog.{CatalogManager, FunctionCatalog}
import org.apache.flink.table.data.RowData
import org.apache.flink.table.expressions.ApiExpressionUtils.intervalOfMillis
import org.apache.flink.table.expressions._
import org.apache.flink.table.functions.{FunctionIdentifier, UserDefinedFunctionHelper}
import org.apache.flink.table.module.ModuleManager
import org.apache.flink.table.operations.TableSourceQueryOperation
import org.apache.flink.table.planner.calcite.FlinkRelBuilder.PlannerNamedWindowProperty
import org.apache.flink.table.planner.calcite.{FlinkRelBuilder, FlinkTypeFactory}
import org.apache.flink.table.planner.delegation.PlannerContext
import org.apache.flink.table.planner.expressions.{PlannerProctimeAttribute, PlannerRowtimeAttribute, PlannerWindowReference, PlannerWindowStart}
import org.apache.flink.table.planner.functions.aggfunctions.SumAggFunction.DoubleSumAggFunction
import org.apache.flink.table.planner.functions.aggfunctions.{DenseRankAggFunction, RankAggFunction, RowNumberAggFunction}
import org.apache.flink.table.planner.functions.sql.FlinkSqlOperatorTable
import org.apache.flink.table.planner.functions.utils.AggSqlFunction
import org.apache.flink.table.planner.plan.PartialFinalType
import org.apache.flink.table.planner.plan.`trait`.{FlinkRelDistribution, FlinkRelDistributionTraitDef}
import org.apache.flink.table.planner.plan.logical.{LogicalWindow, TumblingGroupWindow}
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.calcite._
import org.apache.flink.table.planner.plan.nodes.logical._
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.nodes.physical.stream._
import org.apache.flink.table.planner.plan.schema.FlinkPreparingTableBase
import org.apache.flink.table.planner.plan.stream.sql.join.TestTemporalTable
import org.apache.flink.table.planner.plan.utils.AggregateUtil.transformToStreamAggregateInfoList
import org.apache.flink.table.planner.plan.utils._
import org.apache.flink.table.planner.utils.{CountAggFunction, Top3}
import org.apache.flink.table.runtime.operators.rank.{ConstantRankRange, RankType, VariableRankRange}
import org.apache.flink.table.types.AtomicDataType
import org.apache.flink.table.types.logical._
import org.apache.flink.table.types.utils.TypeConversions
import org.apache.flink.table.utils.CatalogManagerMocks
import com.google.common.collect.{ImmutableList, Lists}
import org.apache.calcite.jdbc.CalciteSchema
import org.apache.calcite.plan._
import org.apache.calcite.prepare.CalciteCatalogReader
import org.apache.calcite.rel._
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFieldImpl}
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.logical._
import org.apache.calcite.rel.metadata.{JaninoRelMetadataProvider, RelMetadataQuery, RelMetadataQueryBase}
import org.apache.calcite.rex._
import org.apache.calcite.schema.SchemaPlus
import org.apache.calcite.sql.SqlWindow
import org.apache.calcite.sql.`type`.SqlTypeName._
import org.apache.calcite.sql.`type`.{BasicSqlType, SqlTypeName}
import org.apache.calcite.sql.fun.SqlStdOperatorTable._
import org.apache.calcite.sql.fun.{SqlCountAggFunction, SqlStdOperatorTable}
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.calcite.util._
import org.junit.{Before, BeforeClass}
import java.math.BigDecimal
import java.util
import scala.collection.JavaConversions._
class FlinkRelMdHandlerTestBase {
val tableConfig = new TableConfig()
val rootSchema: SchemaPlus = MetadataTestUtil.initRootSchema()
val catalogManager: CatalogManager = CatalogManagerMocks.createEmptyCatalogManager()
val moduleManager = new ModuleManager
// TODO batch RelNode and stream RelNode should have different PlannerContext
// and RelOptCluster due to they have different trait definitions.
val plannerContext: PlannerContext =
new PlannerContext(
tableConfig,
new FunctionCatalog(tableConfig, catalogManager, moduleManager),
catalogManager,
CalciteSchema.from(rootSchema),
util.Arrays.asList(
ConventionTraitDef.INSTANCE,
FlinkRelDistributionTraitDef.INSTANCE,
RelCollationTraitDef.INSTANCE
)
)
val typeFactory: FlinkTypeFactory = plannerContext.getTypeFactory
val mq: FlinkRelMetadataQuery = FlinkRelMetadataQuery.instance()
var relBuilder: FlinkRelBuilder = _
var rexBuilder: RexBuilder = _
var cluster: RelOptCluster = _
var logicalTraits: RelTraitSet = _
var flinkLogicalTraits: RelTraitSet = _
var batchPhysicalTraits: RelTraitSet = _
var streamPhysicalTraits: RelTraitSet = _
@Before
def setUp(): Unit = {
relBuilder = plannerContext.createRelBuilder("default_catalog", "default_database")
rexBuilder = relBuilder.getRexBuilder
cluster = relBuilder.getCluster
logicalTraits = cluster.traitSetOf(Convention.NONE)
flinkLogicalTraits = cluster.traitSetOf(FlinkConventions.LOGICAL)
batchPhysicalTraits = cluster.traitSetOf(FlinkConventions.BATCH_PHYSICAL)
streamPhysicalTraits = cluster.traitSetOf(FlinkConventions.STREAM_PHYSICAL)
}
protected val intType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new IntType(false))
protected val doubleType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new DoubleType(false))
protected val longType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new BigIntType(false))
protected val stringType: RelDataType = typeFactory.createFieldTypeFromLogicalType(
new VarCharType(false, VarCharType.MAX_LENGTH))
protected lazy val testRel = new TestRel(
cluster, logicalTraits, createDataStreamScan(ImmutableList.of("student"), logicalTraits))
protected lazy val studentLogicalScan: LogicalTableScan =
createDataStreamScan(ImmutableList.of("student"), logicalTraits)
protected lazy val studentFlinkLogicalScan: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("student"), flinkLogicalTraits)
protected lazy val studentBatchScan: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("student"), batchPhysicalTraits)
protected lazy val studentStreamScan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("student"), streamPhysicalTraits)
protected lazy val empLogicalScan: LogicalTableScan =
createDataStreamScan(ImmutableList.of("emp"), logicalTraits)
protected lazy val empFlinkLogicalScan: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("emp"), flinkLogicalTraits)
protected lazy val empBatchScan: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("emp"), batchPhysicalTraits)
protected lazy val empStreamScan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("emp"), streamPhysicalTraits)
private lazy val valuesType = relBuilder.getTypeFactory
.builder()
.add("a", SqlTypeName.BIGINT)
.add("b", SqlTypeName.BOOLEAN)
.add("c", SqlTypeName.DATE)
.add("d", SqlTypeName.TIME)
.add("e", SqlTypeName.TIMESTAMP)
.add("f", SqlTypeName.DOUBLE)
.add("g", SqlTypeName.FLOAT)
.add("h", SqlTypeName.VARCHAR)
.build()
protected lazy val emptyValues: LogicalValues = {
relBuilder.values(valuesType)
relBuilder.build().asInstanceOf[LogicalValues]
}
protected lazy val logicalValues: LogicalValues = {
val tupleList = List(
List("1", "true", "2017-10-01", "10:00:00", "2017-10-01 00:00:00", "2.12", null, "abc"),
List(null, "false", "2017-09-01", "10:00:01", null, "3.12", null, null),
List("3", "true", null, "10:00:02", "2017-10-01 01:00:00", "3.0", null, "xyz"),
List("2", "true", "2017-10-02", "09:59:59", "2017-07-01 01:00:00", "-1", null, "F")
).map(createLiteralList(valuesType, _))
relBuilder.values(tupleList, valuesType)
relBuilder.build().asInstanceOf[LogicalValues]
}
// select id, name, score + 0.2, age - 1, height * 1.1 as h1, height / 0.9 as h2, height,
// case sex = 'M' then 1 else 2, true, 2.1, 2, cast(score as double not null) as s from student
protected lazy val logicalProject: LogicalProject = {
relBuilder.push(studentLogicalScan)
val projects = List(
// id
relBuilder.field(0),
// name
relBuilder.field(1),
// score + 0.1
relBuilder.call(PLUS, relBuilder.field(2), relBuilder.literal(0.2)),
// age - 1
relBuilder.call(MINUS, relBuilder.field(3), relBuilder.literal(1)),
// height * 1.1 as h1
relBuilder.alias(relBuilder.call(MULTIPLY, relBuilder.field(4), relBuilder.literal(1.1)),
"h1"),
// height / 0.9 as h2
relBuilder.alias(relBuilder.call(DIVIDE, relBuilder.field(4), relBuilder.literal(0.9)), "h2"),
// height
relBuilder.field(4),
// case sex = 'M' then 1 else 2
relBuilder.call(CASE, relBuilder.call(EQUALS, relBuilder.field(5), relBuilder.literal("M")),
relBuilder.literal(1), relBuilder.literal(2)),
// true
relBuilder.literal(true),
// 2.1
rexBuilder.makeLiteral(2.1D, doubleType, true),
// 2
rexBuilder.makeLiteral(2L, longType, true),
// cast(score as double not null) as s
rexBuilder.makeCast(doubleType, relBuilder.field(2))
)
relBuilder.project(projects).build().asInstanceOf[LogicalProject]
}
// filter: id < 10
// calc = filter (id < 10) + logicalProject
protected lazy val (logicalFilter, logicalCalc) = {
relBuilder.push(studentLogicalScan)
// id < 10
val expr = relBuilder.call(LESS_THAN, relBuilder.field(0), relBuilder.literal(10))
val filter = relBuilder.filter(expr).build
val calc = createLogicalCalc(
studentLogicalScan, logicalProject.getRowType, logicalProject.getProjects, List(expr))
(filter, calc)
}
// id, name, score, age, height, sex, class, 1
// id, null, score, age, height, sex, class, 4
// id, null, score, age, height, null, class, 5
protected lazy val (logicalExpand, flinkLogicalExpand, batchExpand, streamExpand) = {
val cluster = studentLogicalScan.getCluster
val expandOutputType = ExpandUtil.buildExpandRowType(
cluster.getTypeFactory, studentLogicalScan.getRowType, Array.empty[Integer])
val expandProjects = ExpandUtil.createExpandProjects(
studentLogicalScan.getCluster.getRexBuilder,
studentLogicalScan.getRowType,
expandOutputType,
ImmutableBitSet.of(1, 3, 5),
ImmutableList.of(
ImmutableBitSet.of(1, 3, 5),
ImmutableBitSet.of(3, 5),
ImmutableBitSet.of(3)),
Array.empty[Integer])
val logicalExpand = new LogicalExpand(cluster, studentLogicalScan.getTraitSet,
studentLogicalScan, expandOutputType, expandProjects, 7)
val flinkLogicalExpand = new FlinkLogicalExpand(cluster, flinkLogicalTraits,
studentFlinkLogicalScan, expandOutputType, expandProjects, 7)
val batchExpand = new BatchExecExpand(cluster, batchPhysicalTraits,
studentBatchScan, expandOutputType, expandProjects, 7)
val streamExecExpand = new StreamExecExpand(cluster, streamPhysicalTraits,
studentStreamScan, expandOutputType, expandProjects, 7)
(logicalExpand, flinkLogicalExpand, batchExpand, streamExecExpand)
}
// hash exchange on class
protected lazy val (batchExchange, streamExchange) = {
val hash6 = FlinkRelDistribution.hash(Array(6), requireStrict = true)
val batchExchange = new BatchExecExchange(
cluster,
batchPhysicalTraits.replace(hash6),
studentBatchScan,
hash6
)
val streamExchange = new StreamExecExchange(
cluster,
streamPhysicalTraits.replace(hash6),
studentStreamScan,
hash6
)
(batchExchange, streamExchange)
}
// equivalent SQL is
// select * from student order by class asc, score desc
protected lazy val (logicalSort, flinkLogicalSort, batchSort, streamSort) = {
val logicalSort = relBuilder.scan("student").sort(
relBuilder.field("class"),
relBuilder.desc(relBuilder.field("score")))
.build.asInstanceOf[LogicalSort]
val collation = logicalSort.getCollation
val flinkLogicalSort = new FlinkLogicalSort(cluster, flinkLogicalTraits.replace(collation),
studentFlinkLogicalScan, collation, null, null)
val batchSort = new BatchExecSort(cluster,
batchPhysicalTraits.replace(collation).replace(FlinkRelDistribution.SINGLETON),
studentBatchScan, collation)
val streamSort = new StreamExecSort(cluster,
streamPhysicalTraits.replace(collation).replace(FlinkRelDistribution.SINGLETON),
studentStreamScan, collation)
(logicalSort, flinkLogicalSort, batchSort, streamSort)
}
// equivalent SQL is
// select * from student limit 20 offset 10
protected lazy val (
logicalLimit,
flinkLogicalLimit,
batchLimit,
batchLocalLimit,
batchGlobalLimit,
streamLimit) = {
val logicalSort = relBuilder.scan("student").limit(10, 20)
.build.asInstanceOf[LogicalSort]
val collation = logicalSort.getCollation
val flinkLogicalSort = new FlinkLogicalSort(
cluster, flinkLogicalTraits.replace(collation), studentFlinkLogicalScan, collation,
logicalSort.offset, logicalSort.fetch)
val batchSort = new BatchExecLimit(cluster, batchPhysicalTraits.replace(collation),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), studentBatchScan,
FlinkRelDistribution.SINGLETON),
logicalSort.offset, logicalSort.fetch, true)
val batchSortLocal = new BatchExecLimit(cluster, batchPhysicalTraits.replace(collation),
studentBatchScan,
relBuilder.literal(0),
relBuilder.literal(SortUtil.getLimitEnd(logicalSort.offset, logicalSort.fetch)),
false)
val batchSortGlobal = new BatchExecLimit(cluster, batchPhysicalTraits.replace(collation),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), batchSortLocal,
FlinkRelDistribution.SINGLETON),
logicalSort.offset, logicalSort.fetch, true)
val streamSort = new StreamExecLimit(cluster, streamPhysicalTraits.replace(collation),
studentStreamScan, logicalSort.offset, logicalSort.fetch)
(logicalSort, flinkLogicalSort, batchSort, batchSortLocal, batchSortGlobal, streamSort)
}
// equivalent SQL is
// select * from student order by class asc, score desc limit 20 offset 10
protected lazy val (
logicalSortLimit,
flinkLogicalSortLimit,
batchSortLimit,
batchLocalSortLimit,
batchGlobalSortLimit,
streamSortLimit) = {
val logicalSortLimit = relBuilder.scan("student").sort(
relBuilder.field("class"),
relBuilder.desc(relBuilder.field("score")))
.limit(10, 20).build.asInstanceOf[LogicalSort]
val collection = logicalSortLimit.collation
val offset = logicalSortLimit.offset
val fetch = logicalSortLimit.fetch
val flinkLogicalSortLimit = new FlinkLogicalSort(cluster,
flinkLogicalTraits.replace(collection), studentFlinkLogicalScan, collection, offset, fetch)
val batchSortLimit = new BatchExecSortLimit(cluster, batchPhysicalTraits.replace(collection),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), studentBatchScan,
FlinkRelDistribution.SINGLETON),
collection, offset, fetch, true)
val batchSortLocalLimit = new BatchExecSortLimit(cluster,
batchPhysicalTraits.replace(collection), studentBatchScan, collection,
relBuilder.literal(0),
relBuilder.literal(SortUtil.getLimitEnd(offset, fetch)),
false)
val batchSortGlobal = new BatchExecSortLimit(cluster, batchPhysicalTraits.replace(collection),
new BatchExecExchange(
cluster, batchPhysicalTraits.replace(FlinkRelDistribution.SINGLETON), batchSortLocalLimit,
FlinkRelDistribution.SINGLETON),
collection, offset, fetch, true)
val streamSort = new StreamExecSortLimit(cluster, streamPhysicalTraits.replace(collection),
studentStreamScan, collection, offset, fetch, UndefinedStrategy)
(logicalSortLimit, flinkLogicalSortLimit,
batchSortLimit, batchSortLocalLimit, batchSortGlobal, streamSort)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// RANK() over (partition by class order by score) rk from student
// ) t where rk <= 5
protected lazy val (
logicalRank,
flinkLogicalRank,
batchLocalRank,
batchGlobalRank,
streamRank) = {
val logicalRank = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val flinkLogicalRank = new FlinkLogicalRank(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val batchLocalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
studentBatchScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = false,
isGlobal = false
)
val hash6 = FlinkRelDistribution.hash(Array(6), requireStrict = true)
val batchExchange = new BatchExecExchange(
cluster, batchLocalRank.getTraitSet.replace(hash6), batchLocalRank, hash6)
val batchGlobalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
batchExchange,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
isGlobal = true
)
val streamExchange = new BatchExecExchange(cluster,
studentStreamScan.getTraitSet.replace(hash6), studentStreamScan, hash6)
val streamRank = new StreamExecRank(
cluster,
streamPhysicalTraits,
streamExchange,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRank, flinkLogicalRank, batchLocalRank, batchGlobalRank, streamRank)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// RANK() over (partition by age order by score) rk from student
// ) t where rk <= 5 and rk >= 3
protected lazy val (
logicalRank2,
flinkLogicalRank2,
batchLocalRank2,
batchGlobalRank2,
streamRank2) = {
val logicalRank = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val flinkLogicalRank = new FlinkLogicalRank(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val batchLocalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
studentBatchScan,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(1, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = false,
isGlobal = false
)
val hash6 = FlinkRelDistribution.hash(Array(6), requireStrict = true)
val batchExchange = new BatchExecExchange(
cluster, batchLocalRank.getTraitSet.replace(hash6), batchLocalRank, hash6)
val batchGlobalRank = new BatchExecRank(
cluster,
batchPhysicalTraits,
batchExchange,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
isGlobal = true
)
val streamExchange = new BatchExecExchange(cluster,
studentStreamScan.getTraitSet.replace(hash6), studentStreamScan, hash6)
val streamRank = new StreamExecRank(
cluster,
streamPhysicalTraits,
streamExchange,
ImmutableBitSet.of(3),
RelCollations.of(2),
RankType.RANK,
new ConstantRankRange(3, 5),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRank, flinkLogicalRank, batchLocalRank, batchGlobalRank, streamRank)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// ROW_NUMBER() over (order by height) rn from student
// ) t where rk > 2 and rk < 7
protected lazy val (logicalRowNumber, flinkLogicalRowNumber, streamRowNumber) = {
val logicalRowNumber = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true
)
val flinkLogicalRowNumber = new FlinkLogicalRank(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true
)
val singleton = FlinkRelDistribution.SINGLETON
val streamExchange = new BatchExecExchange(cluster,
studentStreamScan.getTraitSet.replace(singleton), studentStreamScan, singleton)
val streamRowNumber = new StreamExecRank(
cluster,
streamPhysicalTraits,
streamExchange,
ImmutableBitSet.of(),
RelCollations.of(4),
RankType.ROW_NUMBER,
new ConstantRankRange(3, 6),
new RelDataTypeFieldImpl("rn", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRowNumber, flinkLogicalRowNumber, streamRowNumber)
}
// equivalent SQL is
// select a, b, c from (
// select a, b, c, proctime
// ROW_NUMBER() over (partition by b order by proctime) rn from TemporalTable3
// ) t where rn <= 1
//
// select a, b, c from (
// select a, b, c, proctime
// ROW_NUMBER() over (partition by b, c order by proctime desc) rn from TemporalTable3
// ) t where rn <= 1
protected lazy val (streamDeduplicateFirstRow, streamDeduplicateLastRow) = {
val scan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable3"), streamPhysicalTraits)
val hash1 = FlinkRelDistribution.hash(Array(1), requireStrict = true)
val streamExchange1 = new StreamExecExchange(
cluster, scan.getTraitSet.replace(hash1), scan, hash1)
val firstRow = new StreamExecDeduplicate(
cluster,
streamPhysicalTraits,
streamExchange1,
Array(1),
keepLastRow = false
)
val builder = typeFactory.builder()
firstRow.getRowType.getFieldList.dropRight(2).foreach(builder.add)
val projectProgram = RexProgram.create(
firstRow.getRowType,
Array(0, 1, 2).map(i => RexInputRef.of(i, firstRow.getRowType)).toList,
null,
builder.build(),
rexBuilder
)
val calcOfFirstRow = new StreamExecCalc(
cluster,
streamPhysicalTraits,
firstRow,
projectProgram,
projectProgram.getOutputRowType
)
val hash12 = FlinkRelDistribution.hash(Array(1, 2), requireStrict = true)
val streamExchange2 = new BatchExecExchange(cluster,
scan.getTraitSet.replace(hash12), scan, hash12)
val lastRow = new StreamExecDeduplicate(
cluster,
streamPhysicalTraits,
streamExchange2,
Array(1, 2),
keepLastRow = true
)
val calcOfLastRow = new StreamExecCalc(
cluster,
streamPhysicalTraits,
lastRow,
projectProgram,
projectProgram.getOutputRowType
)
(calcOfFirstRow, calcOfLastRow)
}
// equivalent SQL is
// select * from (
// select id, name, score, age, height, sex, class,
// RANK() over (partition by class order by score) rk from student
// ) t where rk <= age
protected lazy val (
logicalRankWithVariableRange,
flinkLogicalRankWithVariableRange,
streamRankWithVariableRange) = {
val logicalRankWithVariableRange = new LogicalRank(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new VariableRankRange(3),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val flinkLogicalRankWithVariableRange = new FlinkLogicalRank(
cluster,
logicalTraits,
studentFlinkLogicalScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new VariableRankRange(3),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true
)
val streamRankWithVariableRange = new StreamExecRank(
cluster,
logicalTraits,
studentStreamScan,
ImmutableBitSet.of(6),
RelCollations.of(2),
RankType.RANK,
new VariableRankRange(3),
new RelDataTypeFieldImpl("rk", 7, longType),
outputRankNumber = true,
UndefinedStrategy
)
(logicalRankWithVariableRange, flinkLogicalRankWithVariableRange, streamRankWithVariableRange)
}
protected lazy val tableAggCall = {
val top3 = new Top3
val resultTypeInfo = UserDefinedFunctionHelper.getReturnTypeOfAggregateFunction(top3)
val accTypeInfo = UserDefinedFunctionHelper.getAccumulatorTypeOfAggregateFunction(top3)
val resultDataType = TypeConversions.fromLegacyInfoToDataType(resultTypeInfo)
val accDataType = TypeConversions.fromLegacyInfoToDataType(accTypeInfo)
val builder = typeFactory.builder()
builder.add("f0", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
builder.add("f1", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
val relDataType = builder.build()
AggregateCall.create(
AggSqlFunction(
FunctionIdentifier.of("top3"),
"top3",
new Top3,
resultDataType,
accDataType,
typeFactory,
false),
false,
false,
false,
Seq(Integer.valueOf(0)).toList,
-1,
RelCollationImpl.of(),
relDataType,
""
)
}
protected lazy val (logicalTableAgg, flinkLogicalTableAgg, streamExecTableAgg) = {
val logicalTableAgg = new LogicalTableAggregate(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(0),
null,
Seq(tableAggCall))
val flinkLogicalTableAgg = new FlinkLogicalTableAggregate(
cluster,
logicalTraits,
studentLogicalScan,
ImmutableBitSet.of(0),
null,
Seq(tableAggCall)
)
val builder = typeFactory.builder()
builder.add("key", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.BIGINT))
builder.add("f0", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
builder.add("f1", new BasicSqlType(typeFactory.getTypeSystem, SqlTypeName.INTEGER))
val relDataType = builder.build()
val streamExecTableAgg = new StreamExecGroupTableAggregate(
cluster,
logicalTraits,
studentLogicalScan,
relDataType,
Array(0),
Seq(tableAggCall)
)
(logicalTableAgg, flinkLogicalTableAgg, streamExecTableAgg)
}
// equivalent Table API is
// tEnv.scan("TemporalTable1")
// .select("c, a, b, rowtime")
// .window(Tumble.over("15.minutes").on("rowtime").as("w"))
// .groupBy("a, w")
// .flatAggregate("top3(c)")
// .select("a, f0, f1, w.start, w.end, w.rowtime, w.proctime")
protected lazy val (
logicalWindowTableAgg,
flinkLogicalWindowTableAgg,
streamWindowTableAgg) = {
relBuilder.scan("TemporalTable1")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](2, 0, 1, 4).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(tableAggCall)
val logicalWindowAgg = new LogicalWindowTableAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(1),
ImmutableList.of(ImmutableBitSet.of(1)),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), flinkLogicalTraits)
val flinkLogicalWindowAgg = new FlinkLogicalWindowTableAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(1),
ImmutableList.of(ImmutableBitSet.of(1)),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val hash01 = FlinkRelDistribution.hash(Array(1), requireStrict = true)
val streamTs: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), streamPhysicalTraits)
val streamCalc = new BatchExecCalc(
cluster, streamPhysicalTraits, streamTs, program, program.getOutputRowType)
val streamExchange = new StreamExecExchange(
cluster, streamPhysicalTraits.replace(hash01), streamCalc, hash01)
val emitStrategy = WindowEmitStrategy(tableConfig, tumblingGroupWindow)
val streamWindowAgg = new StreamExecGroupWindowTableAggregate(
cluster,
streamPhysicalTraits,
streamExchange,
flinkLogicalWindowAgg.getRowType,
streamExchange.getRowType,
Array(1),
flinkLogicalWindowAgg.getAggCallList,
tumblingGroupWindow,
namedPropertiesOfWindowAgg,
inputTimeFieldIndex = 2,
emitStrategy
)
(logicalWindowAgg, flinkLogicalWindowAgg, streamWindowAgg)
}
// equivalent SQL is
// select age,
// avg(score) as avg_score,
// sum(score) as sum_score,
// max(height) as max_height,
// min(height) as min_height,
// count(id) as cnt
// from student group by age
protected lazy val (
logicalAgg,
flinkLogicalAgg,
batchLocalAgg,
batchGlobalAggWithLocal,
batchGlobalAggWithoutLocal,
streamLocalAgg,
streamGlobalAggWithLocal,
streamGlobalAggWithoutLocal) = {
val logicalAgg = relBuilder.push(studentLogicalScan).aggregate(
relBuilder.groupKey(relBuilder.field(3)),
relBuilder.avg(false, "avg_score", relBuilder.field(2)),
relBuilder.sum(false, "sum_score", relBuilder.field(2)),
relBuilder.max("max_height", relBuilder.field(4)),
relBuilder.min("min_height", relBuilder.field(4)),
relBuilder.count(false, "cnt", relBuilder.field(0))
).build().asInstanceOf[LogicalAggregate]
val flinkLogicalAgg = new FlinkLogicalAggregate(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
logicalAgg.getGroupSet,
logicalAgg.getGroupSets,
logicalAgg.getAggCallList
)
val aggCalls = logicalAgg.getAggCallList
val aggFunctionFactory = new AggFunctionFactory(
studentBatchScan.getRowType, Array.empty[Int], Array.fill(aggCalls.size())(false))
val aggCallToAggFunction = aggCalls.zipWithIndex.map {
case (call, index) => (call, aggFunctionFactory.createAggFunction(call, index))
}
val rowTypeOfLocalAgg = typeFactory.builder
.add("age", intType)
.add("sum$0", doubleType)
.add("count$1", longType)
.add("sum_score", doubleType)
.add("max_height", doubleType)
.add("min_height", doubleType)
.add("cnt", longType).build()
val rowTypeOfGlobalAgg = typeFactory.builder
.add("age", intType)
.add("avg_score", doubleType)
.add("sum_score", doubleType)
.add("max_height", doubleType)
.add("min_height", doubleType)
.add("cnt", longType).build()
val hash0 = FlinkRelDistribution.hash(Array(0), requireStrict = true)
val hash3 = FlinkRelDistribution.hash(Array(3), requireStrict = true)
val batchLocalAgg = new BatchExecLocalHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
studentBatchScan,
rowTypeOfLocalAgg,
studentBatchScan.getRowType,
Array(3),
auxGrouping = Array(),
aggCallToAggFunction)
val batchExchange1 = new BatchExecExchange(
cluster, batchLocalAgg.getTraitSet.replace(hash0), batchLocalAgg, hash0)
val batchGlobalAgg = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
rowTypeOfGlobalAgg,
batchExchange1.getRowType,
batchLocalAgg.getInput.getRowType,
Array(0),
auxGrouping = Array(),
aggCallToAggFunction,
isMerge = true)
val batchExchange2 = new BatchExecExchange(cluster,
studentBatchScan.getTraitSet.replace(hash3), studentBatchScan, hash3)
val batchGlobalAggWithoutLocal = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
rowTypeOfGlobalAgg,
batchExchange2.getRowType,
batchExchange2.getRowType,
Array(3),
auxGrouping = Array(),
aggCallToAggFunction,
isMerge = false)
val needRetractionArray = AggregateUtil.getNeedRetractions(
1, needRetraction = false, null, aggCalls)
val localAggInfoList = transformToStreamAggregateInfoList(
aggCalls,
studentStreamScan.getRowType,
needRetractionArray,
needInputCount = false,
isStateBackendDataViews = false)
val streamLocalAgg = new StreamExecLocalGroupAggregate(
cluster,
streamPhysicalTraits,
studentStreamScan,
rowTypeOfLocalAgg,
Array(3),
aggCalls,
localAggInfoList,
PartialFinalType.NONE)
val streamExchange1 = new StreamExecExchange(
cluster, streamLocalAgg.getTraitSet.replace(hash0), streamLocalAgg, hash0)
val globalAggInfoList = transformToStreamAggregateInfoList(
aggCalls,
streamExchange1.getRowType,
needRetractionArray,
needInputCount = false,
isStateBackendDataViews = true)
val streamGlobalAgg = new StreamExecGlobalGroupAggregate(
cluster,
streamPhysicalTraits,
streamExchange1,
streamExchange1.getRowType,
rowTypeOfGlobalAgg,
Array(0),
localAggInfoList,
globalAggInfoList,
PartialFinalType.NONE)
val streamExchange2 = new StreamExecExchange(cluster,
studentStreamScan.getTraitSet.replace(hash3), studentStreamScan, hash3)
val streamGlobalAggWithoutLocal = new StreamExecGroupAggregate(
cluster,
streamPhysicalTraits,
streamExchange2,
rowTypeOfGlobalAgg,
Array(3),
aggCalls)
(logicalAgg, flinkLogicalAgg,
batchLocalAgg, batchGlobalAgg, batchGlobalAggWithoutLocal,
streamLocalAgg, streamGlobalAgg, streamGlobalAggWithoutLocal)
}
// equivalent SQL is
// select avg(score) as avg_score,
// sum(score) as sum_score,
// count(id) as cnt
// from student group by id, name, height
protected lazy val (
logicalAggWithAuxGroup,
flinkLogicalAggWithAuxGroup,
batchLocalAggWithAuxGroup,
batchGlobalAggWithLocalWithAuxGroup,
batchGlobalAggWithoutLocalWithAuxGroup) = {
val logicalAggWithAuxGroup = relBuilder.push(studentLogicalScan).aggregate(
relBuilder.groupKey(relBuilder.field(0)),
relBuilder.aggregateCall(FlinkSqlOperatorTable.AUXILIARY_GROUP, relBuilder.field(1)),
relBuilder.aggregateCall(FlinkSqlOperatorTable.AUXILIARY_GROUP, relBuilder.field(4)),
relBuilder.avg(false, "avg_score", relBuilder.field(2)),
relBuilder.sum(false, "sum_score", relBuilder.field(2)),
relBuilder.count(false, "cnt", relBuilder.field(0))
).build().asInstanceOf[LogicalAggregate]
val flinkLogicalAggWithAuxGroup = new FlinkLogicalAggregate(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
logicalAggWithAuxGroup.getGroupSet,
logicalAggWithAuxGroup.getGroupSets,
logicalAggWithAuxGroup.getAggCallList
)
val aggCalls = logicalAggWithAuxGroup.getAggCallList.filter {
call => call.getAggregation != FlinkSqlOperatorTable.AUXILIARY_GROUP
}
val aggFunctionFactory = new AggFunctionFactory(
studentBatchScan.getRowType, Array.empty[Int], Array.fill(aggCalls.size())(false))
val aggCallToAggFunction = aggCalls.zipWithIndex.map {
case (call, index) => (call, aggFunctionFactory.createAggFunction(call, index))
}
val rowTypeOfLocalAgg = typeFactory.builder
.add("id", intType)
.add("name", stringType)
.add("height", doubleType)
.add("sum$0", doubleType)
.add("count$1", longType)
.add("sum_score", doubleType)
.add("cnt", longType).build()
val batchLocalAggWithAuxGroup = new BatchExecLocalHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
studentBatchScan,
rowTypeOfLocalAgg,
studentBatchScan.getRowType,
Array(0),
auxGrouping = Array(1, 4),
aggCallToAggFunction)
val hash0 = FlinkRelDistribution.hash(Array(0), requireStrict = true)
val batchExchange = new BatchExecExchange(cluster,
batchLocalAggWithAuxGroup.getTraitSet.replace(hash0), batchLocalAggWithAuxGroup, hash0)
val rowTypeOfGlobalAgg = typeFactory.builder
.add("id", intType)
.add("name", stringType)
.add("height", doubleType)
.add("avg_score", doubleType)
.add("sum_score", doubleType)
.add("cnt", longType).build()
val batchGlobalAggWithAuxGroup = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange,
rowTypeOfGlobalAgg,
batchExchange.getRowType,
batchLocalAggWithAuxGroup.getInput.getRowType,
Array(0),
auxGrouping = Array(1, 2),
aggCallToAggFunction,
isMerge = true)
val batchExchange2 = new BatchExecExchange(cluster,
studentBatchScan.getTraitSet.replace(hash0), studentBatchScan, hash0)
val batchGlobalAggWithoutLocalWithAuxGroup = new BatchExecHashAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
rowTypeOfGlobalAgg,
batchExchange2.getRowType,
batchExchange2.getRowType,
Array(0),
auxGrouping = Array(1, 4),
aggCallToAggFunction,
isMerge = false)
(logicalAggWithAuxGroup, flinkLogicalAggWithAuxGroup,
batchLocalAggWithAuxGroup, batchGlobalAggWithAuxGroup, batchGlobalAggWithoutLocalWithAuxGroup)
}
// For window start/end/proc_time the windowAttribute inferred type is a hard code val,
// only for row_time we distinguish by batch row time, for what we hard code DataTypes.TIMESTAMP,
// which is ok here for testing.
private lazy val windowRef: PlannerWindowReference =
PlannerWindowReference.apply("w$", Some(new TimestampType(3)))
protected lazy val tumblingGroupWindow: LogicalWindow =
TumblingGroupWindow(
windowRef,
new FieldReferenceExpression(
"rowtime",
new AtomicDataType(new TimestampType(true, TimestampKind.ROWTIME, 3)),
0,
4),
intervalOfMillis(900000)
)
protected lazy val namedPropertiesOfWindowAgg: Seq[PlannerNamedWindowProperty] =
Seq(PlannerNamedWindowProperty("w$start", PlannerWindowStart(windowRef)),
PlannerNamedWindowProperty("w$end", PlannerWindowStart(windowRef)),
PlannerNamedWindowProperty("w$rowtime", PlannerRowtimeAttribute(windowRef)),
PlannerNamedWindowProperty("w$proctime", PlannerProctimeAttribute(windowRef)))
// equivalent SQL is
// select a, b, count(c) as s,
// TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as w$start,
// TUMBLE_END(rowtime, INTERVAL '15' MINUTE) as w$end,
// TUMBLE_ROWTIME(rowtime, INTERVAL '15' MINUTE) as w$rowtime,
// TUMBLE_PROCTIME(rowtime, INTERVAL '15' MINUTE) as w$proctime
// from TemporalTable1 group by a, b, TUMBLE(rowtime, INTERVAL '15' MINUTE)
protected lazy val (
logicalWindowAgg,
flinkLogicalWindowAgg,
batchLocalWindowAgg,
batchGlobalWindowAggWithLocalAgg,
batchGlobalWindowAggWithoutLocalAgg,
streamWindowAgg) = {
relBuilder.scan("TemporalTable1")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](0, 1, 4, 2).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(AggregateCall.create(
new SqlCountAggFunction("COUNT"), false, false, List[Integer](3), -1, 2, project, null, "s"))
// TUMBLE(rowtime, INTERVAL '15' MINUTE))
val logicalWindowAgg = new LogicalWindowAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(0, 1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), flinkLogicalTraits)
val flinkLogicalWindowAgg = new FlinkLogicalWindowAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(0, 1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val batchTs: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), batchPhysicalTraits)
val batchCalc = new BatchExecCalc(
cluster, batchPhysicalTraits, batchTs, program, program.getOutputRowType)
val hash01 = FlinkRelDistribution.hash(Array(0, 1), requireStrict = true)
val batchExchange1 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash01), batchCalc, hash01)
val (_, _, aggregates) =
AggregateUtil.transformToBatchAggregateFunctions(
flinkLogicalWindowAgg.getAggCallList, batchExchange1.getRowType)
val aggCallToAggFunction = flinkLogicalWindowAgg.getAggCallList.zip(aggregates)
val localWindowAggTypes =
(Array(0, 1).map(batchCalc.getRowType.getFieldList.get(_).getType) ++ // grouping
Array(longType) ++ // assignTs
aggCallOfWindowAgg.map(_.getType)).toList // agg calls
val localWindowAggNames =
(Array(0, 1).map(batchCalc.getRowType.getFieldNames.get(_)) ++ // grouping
Array("assignedWindow$") ++ // assignTs
Array("count$0")).toList // agg calls
val localWindowAggRowType = typeFactory.createStructType(
localWindowAggTypes, localWindowAggNames)
val batchLocalWindowAgg = new BatchExecLocalHashWindowAggregate(
batchCalc.getCluster,
relBuilder,
batchPhysicalTraits,
batchCalc,
localWindowAggRowType,
batchCalc.getRowType,
Array(0, 1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false)
val batchExchange2 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash01), batchLocalWindowAgg, hash01)
val batchWindowAggWithLocal = new BatchExecHashWindowAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
flinkLogicalWindowAgg.getRowType,
batchExchange2.getRowType,
batchCalc.getRowType,
Array(0, 1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = true
)
val batchWindowAggWithoutLocal = new BatchExecHashWindowAggregate(
batchExchange1.getCluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
flinkLogicalWindowAgg.getRowType,
batchExchange1.getRowType,
batchExchange1.getRowType,
Array(0, 1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = false
)
val streamTs: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), streamPhysicalTraits)
val streamCalc = new BatchExecCalc(
cluster, streamPhysicalTraits, streamTs, program, program.getOutputRowType)
val streamExchange = new StreamExecExchange(
cluster, streamPhysicalTraits.replace(hash01), streamCalc, hash01)
val emitStrategy = WindowEmitStrategy(tableConfig, tumblingGroupWindow)
val streamWindowAgg = new StreamExecGroupWindowAggregate(
cluster,
streamPhysicalTraits,
streamExchange,
flinkLogicalWindowAgg.getRowType,
streamExchange.getRowType,
Array(0, 1),
flinkLogicalWindowAgg.getAggCallList,
tumblingGroupWindow,
namedPropertiesOfWindowAgg,
inputTimeFieldIndex = 2,
emitStrategy
)
(logicalWindowAgg, flinkLogicalWindowAgg, batchLocalWindowAgg, batchWindowAggWithLocal,
batchWindowAggWithoutLocal, streamWindowAgg)
}
// equivalent SQL is
// select b, count(a) as s,
// TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as w$start,
// TUMBLE_END(rowtime, INTERVAL '15' MINUTE) as w$end,
// TUMBLE_ROWTIME(rowtime, INTERVAL '15' MINUTE) as w$rowtime,
// TUMBLE_PROCTIME(rowtime, INTERVAL '15' MINUTE) as w$proctime
// from TemporalTable1 group by b, TUMBLE(rowtime, INTERVAL '15' MINUTE)
protected lazy val (
logicalWindowAgg2,
flinkLogicalWindowAgg2,
batchLocalWindowAgg2,
batchGlobalWindowAggWithLocalAgg2,
batchGlobalWindowAggWithoutLocalAgg2,
streamWindowAgg2) = {
relBuilder.scan("TemporalTable1")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](0, 1, 4).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(AggregateCall.create(
new SqlCountAggFunction("COUNT"), false, false, List[Integer](0), -1, 1, project, null, "s"))
// TUMBLE(rowtime, INTERVAL '15' MINUTE))
val logicalWindowAgg = new LogicalWindowAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), flinkLogicalTraits)
val flinkLogicalWindowAgg = new FlinkLogicalWindowAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(1),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val batchTs: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), batchPhysicalTraits)
val batchCalc = new BatchExecCalc(
cluster, batchPhysicalTraits, batchTs, program, program.getOutputRowType)
val hash1 = FlinkRelDistribution.hash(Array(1), requireStrict = true)
val batchExchange1 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash1), batchCalc, hash1)
val (_, _, aggregates) =
AggregateUtil.transformToBatchAggregateFunctions(
flinkLogicalWindowAgg.getAggCallList, batchExchange1.getRowType)
val aggCallToAggFunction = flinkLogicalWindowAgg.getAggCallList.zip(aggregates)
val localWindowAggTypes =
(Array(batchCalc.getRowType.getFieldList.get(1).getType) ++ // grouping
Array(longType) ++ // assignTs
aggCallOfWindowAgg.map(_.getType)).toList // agg calls
val localWindowAggNames =
(Array(batchCalc.getRowType.getFieldNames.get(1)) ++ // grouping
Array("assignedWindow$") ++ // assignTs
Array("count$0")).toList // agg calls
val localWindowAggRowType = typeFactory.createStructType(
localWindowAggTypes, localWindowAggNames)
val batchLocalWindowAgg = new BatchExecLocalHashWindowAggregate(
batchCalc.getCluster,
relBuilder,
batchPhysicalTraits,
batchCalc,
localWindowAggRowType,
batchCalc.getRowType,
Array(1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false)
val batchExchange2 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash1), batchLocalWindowAgg, hash1)
val batchWindowAggWithLocal = new BatchExecHashWindowAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
flinkLogicalWindowAgg.getRowType,
batchExchange2.getRowType,
batchCalc.getRowType,
Array(0),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = true
)
val batchWindowAggWithoutLocal = new BatchExecHashWindowAggregate(
batchExchange1.getCluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
flinkLogicalWindowAgg.getRowType,
batchExchange1.getRowType,
batchExchange1.getRowType,
Array(1),
Array.empty,
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = false
)
val streamTs: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable1"), streamPhysicalTraits)
val streamCalc = new BatchExecCalc(
cluster, streamPhysicalTraits, streamTs, program, program.getOutputRowType)
val streamExchange = new StreamExecExchange(
cluster, streamPhysicalTraits.replace(hash1), streamCalc, hash1)
val emitStrategy = WindowEmitStrategy(tableConfig, tumblingGroupWindow)
val streamWindowAgg = new StreamExecGroupWindowAggregate(
cluster,
streamPhysicalTraits,
streamExchange,
flinkLogicalWindowAgg.getRowType,
streamExchange.getRowType,
Array(1),
flinkLogicalWindowAgg.getAggCallList,
tumblingGroupWindow,
namedPropertiesOfWindowAgg,
inputTimeFieldIndex = 2,
emitStrategy
)
(logicalWindowAgg, flinkLogicalWindowAgg, batchLocalWindowAgg, batchWindowAggWithLocal,
batchWindowAggWithoutLocal, streamWindowAgg)
}
// equivalent SQL is
// select a, c, count(b) as s,
// TUMBLE_START(rowtime, INTERVAL '15' MINUTE) as w$start,
// TUMBLE_END(rowtime, INTERVAL '15' MINUTE) as w$end,
// TUMBLE_ROWTIME(rowtime, INTERVAL '15' MINUTE) as w$rowtime,
// TUMBLE_PROCTIME(rowtime, INTERVAL '15' MINUTE) as w$proctime
// from TemporalTable2 group by a, c, TUMBLE(rowtime, INTERVAL '15' MINUTE)
protected lazy val (
logicalWindowAggWithAuxGroup,
flinkLogicalWindowAggWithAuxGroup,
batchLocalWindowAggWithAuxGroup,
batchGlobalWindowAggWithLocalAggWithAuxGroup,
batchGlobalWindowAggWithoutLocalAggWithAuxGroup) = {
relBuilder.scan("TemporalTable2")
val ts = relBuilder.peek()
val project = relBuilder.project(relBuilder.fields(Seq[Integer](0, 2, 4, 1).toList))
.build().asInstanceOf[Project]
val program = RexProgram.create(
ts.getRowType, project.getProjects, null, project.getRowType, rexBuilder)
val aggCallOfWindowAgg = Lists.newArrayList(
AggregateCall.create(FlinkSqlOperatorTable.AUXILIARY_GROUP, false, false,
List[Integer](1), -1, 1, project, null, "c"),
AggregateCall.create(new SqlCountAggFunction("COUNT"), false, false,
List[Integer](3), -1, 2, project, null, "s"))
// TUMBLE(rowtime, INTERVAL '15' MINUTE))
val logicalWindowAggWithAuxGroup = new LogicalWindowAggregate(
ts.getCluster,
ts.getTraitSet,
project,
ImmutableBitSet.of(0),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val flinkLogicalTs: FlinkLogicalDataStreamTableScan =
createDataStreamScan(ImmutableList.of("TemporalTable2"), flinkLogicalTraits)
val flinkLogicalWindowAggWithAuxGroup = new FlinkLogicalWindowAggregate(
ts.getCluster,
logicalTraits,
new FlinkLogicalCalc(ts.getCluster, flinkLogicalTraits, flinkLogicalTs, program),
ImmutableBitSet.of(0),
aggCallOfWindowAgg,
tumblingGroupWindow,
namedPropertiesOfWindowAgg)
val batchTs: BatchExecBoundedStreamScan =
createDataStreamScan(ImmutableList.of("TemporalTable2"), batchPhysicalTraits)
val batchCalc = new BatchExecCalc(
cluster, batchPhysicalTraits, batchTs, program, program.getOutputRowType)
val hash0 = FlinkRelDistribution.hash(Array(0), requireStrict = true)
val batchExchange1 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash0), batchCalc, hash0)
val aggCallsWithoutAuxGroup = flinkLogicalWindowAggWithAuxGroup.getAggCallList.drop(1)
val (_, _, aggregates) =
AggregateUtil.transformToBatchAggregateFunctions(
aggCallsWithoutAuxGroup, batchExchange1.getRowType)
val aggCallToAggFunction = aggCallsWithoutAuxGroup.zip(aggregates)
val localWindowAggTypes =
(Array(batchCalc.getRowType.getFieldList.get(0).getType) ++ // grouping
Array(longType) ++ // assignTs
Array(batchCalc.getRowType.getFieldList.get(1).getType) ++ // auxGrouping
aggCallsWithoutAuxGroup.map(_.getType)).toList // agg calls
val localWindowAggNames =
(Array(batchCalc.getRowType.getFieldNames.get(0)) ++ // grouping
Array("assignedWindow$") ++ // assignTs
Array(batchCalc.getRowType.getFieldNames.get(1)) ++ // auxGrouping
Array("count$0")).toList // agg calls
val localWindowAggRowType = typeFactory.createStructType(
localWindowAggTypes, localWindowAggNames)
val batchLocalWindowAggWithAuxGroup = new BatchExecLocalHashWindowAggregate(
batchCalc.getCluster,
relBuilder,
batchPhysicalTraits,
batchCalc,
localWindowAggRowType,
batchCalc.getRowType,
Array(0),
Array(1),
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false)
val batchExchange2 = new BatchExecExchange(
cluster, batchPhysicalTraits.replace(hash0), batchLocalWindowAggWithAuxGroup, hash0)
val batchWindowAggWithLocalWithAuxGroup = new BatchExecHashWindowAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
batchExchange2,
flinkLogicalWindowAggWithAuxGroup.getRowType,
batchExchange2.getRowType,
batchCalc.getRowType,
Array(0),
Array(2), // local output grouping keys: grouping + assignTs + auxGrouping
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = true
)
val batchWindowAggWithoutLocalWithAuxGroup = new BatchExecHashWindowAggregate(
batchExchange1.getCluster,
relBuilder,
batchPhysicalTraits,
batchExchange1,
flinkLogicalWindowAggWithAuxGroup.getRowType,
batchExchange1.getRowType,
batchExchange1.getRowType,
Array(0),
Array(1),
aggCallToAggFunction,
tumblingGroupWindow,
inputTimeFieldIndex = 2,
inputTimeIsDate = false,
namedPropertiesOfWindowAgg,
enableAssignPane = false,
isMerge = false
)
(logicalWindowAggWithAuxGroup, flinkLogicalWindowAggWithAuxGroup,
batchLocalWindowAggWithAuxGroup, batchWindowAggWithLocalWithAuxGroup,
batchWindowAggWithoutLocalWithAuxGroup)
}
// equivalent SQL is
// select id, name, score, age, class,
// row_number() over(partition by class order by name) as rn,
// rank() over (partition by class order by score) as rk,
// dense_rank() over (partition by class order by score) as drk,
// avg(score) over (partition by class order by score) as avg_score,
// max(score) over (partition by age) as max_score,
// count(id) over (partition by age) as cnt
// from student
protected lazy val (flinkLogicalOverAgg, batchOverAgg) = {
val types = Map(
"id" -> longType,
"name" -> stringType,
"score" -> doubleType,
"age" -> intType,
"class" -> intType,
"rn" -> longType,
"rk" -> longType,
"drk" -> longType,
"avg_score" -> doubleType,
"count$0_score" -> longType,
"sum$0_score" -> doubleType,
"max_score" -> doubleType,
"cnt" -> longType
)
def createRowType(selectFields: String*): RelDataType = {
val builder = typeFactory.builder
selectFields.foreach { f =>
builder.add(f, types.getOrElse(f, throw new IllegalArgumentException(s"$f does not exist")))
}
builder.build()
}
val rowTypeOfCalc = createRowType("id", "name", "score", "age", "class")
val rexProgram = RexProgram.create(
studentFlinkLogicalScan.getRowType,
Array(0, 1, 2, 3, 6).map(i => RexInputRef.of(i, studentFlinkLogicalScan.getRowType)).toList,
null,
rowTypeOfCalc,
rexBuilder
)
val rowTypeOfWindowAgg = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk",
"count$0_score", "sum$0_score", "max_score", "cnt")
val flinkLogicalOverAgg = new FlinkLogicalOverAggregate(
cluster,
flinkLogicalTraits,
new FlinkLogicalCalc(cluster, flinkLogicalTraits, studentFlinkLogicalScan, rexProgram),
ImmutableList.of(),
rowTypeOfWindowAgg,
overAggGroups
)
val rowTypeOfWindowAggOutput = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk", "avg_score", "max_score", "cnt")
val projectProgram = RexProgram.create(
flinkLogicalOverAgg.getRowType,
(0 until flinkLogicalOverAgg.getRowType.getFieldCount).flatMap { i =>
if (i < 8 || i >= 10) {
Array[RexNode](RexInputRef.of(i, flinkLogicalOverAgg.getRowType))
} else if (i == 8) {
Array[RexNode](rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE,
RexInputRef.of(8, flinkLogicalOverAgg.getRowType),
RexInputRef.of(9, flinkLogicalOverAgg.getRowType)))
} else {
Array.empty[RexNode]
}
}.toList,
null,
rowTypeOfWindowAggOutput,
rexBuilder
)
val flinkLogicalOverAggOutput = new FlinkLogicalCalc(
cluster,
flinkLogicalTraits,
flinkLogicalOverAgg,
projectProgram
)
val calc = new BatchExecCalc(
cluster, batchPhysicalTraits, studentBatchScan, rexProgram, rowTypeOfCalc)
val hash4 = FlinkRelDistribution.hash(Array(4), requireStrict = true)
val exchange1 = new BatchExecExchange(cluster, calc.getTraitSet.replace(hash4), calc, hash4)
// sort class, name
val collection1 = RelCollations.of(
FlinkRelOptUtil.ofRelFieldCollation(4), FlinkRelOptUtil.ofRelFieldCollation(1))
val newSortTrait1 = exchange1.getTraitSet.replace(collection1)
val sort1 = new BatchExecSort(cluster, newSortTrait1, exchange1,
newSortTrait1.getTrait(RelCollationTraitDef.INSTANCE))
val outputRowType1 = createRowType("id", "name", "score", "age", "class", "rn")
val innerWindowAgg1 = new BatchExecOverAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
sort1,
outputRowType1,
sort1.getRowType,
Array(4),
Array(1),
Array(true),
Array(false),
Seq((overAggGroups(0), Seq(
(AggregateCall.create(SqlStdOperatorTable.ROW_NUMBER, false, ImmutableList.of(), -1,
longType, "rn"),
new RowNumberAggFunction())))),
flinkLogicalOverAgg
)
// sort class, score
val collation2 = RelCollations.of(
FlinkRelOptUtil.ofRelFieldCollation(4), FlinkRelOptUtil.ofRelFieldCollation(2))
val newSortTrait2 = innerWindowAgg1.getTraitSet.replace(collation2)
val sort2 = new BatchExecSort(cluster, newSortTrait2, innerWindowAgg1,
newSortTrait2.getTrait(RelCollationTraitDef.INSTANCE))
val outputRowType2 = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk", "count$0_score", "sum$0_score")
val innerWindowAgg2 = new BatchExecOverAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
sort2,
outputRowType2,
sort2.getRowType,
Array(4),
Array(2),
Array(true),
Array(false),
Seq((overAggGroups(1), Seq(
(AggregateCall.create(SqlStdOperatorTable.RANK, false, ImmutableList.of(), -1, longType,
"rk"),
new RankAggFunction(Array(new VarCharType(VarCharType.MAX_LENGTH)))),
(AggregateCall.create(SqlStdOperatorTable.DENSE_RANK, false, ImmutableList.of(), -1,
longType, "drk"),
new DenseRankAggFunction(Array(new VarCharType(VarCharType.MAX_LENGTH)))),
(AggregateCall.create(SqlStdOperatorTable.COUNT, false,
ImmutableList.of(Integer.valueOf(2)), -1, longType, "count$0_socre"),
new CountAggFunction()),
(AggregateCall.create(SqlStdOperatorTable.SUM, false,
ImmutableList.of(Integer.valueOf(2)), -1, doubleType, "sum$0_score"),
new DoubleSumAggFunction())
))),
flinkLogicalOverAgg
)
val hash3 = FlinkRelDistribution.hash(Array(3), requireStrict = true)
val exchange2 = new BatchExecExchange(
cluster, innerWindowAgg2.getTraitSet.replace(hash3), innerWindowAgg2, hash3)
val outputRowType3 = createRowType(
"id", "name", "score", "age", "class", "rn", "rk", "drk",
"count$0_score", "sum$0_score", "max_score", "cnt")
val batchWindowAgg = new BatchExecOverAggregate(
cluster,
relBuilder,
batchPhysicalTraits,
exchange2,
outputRowType3,
exchange2.getRowType,
Array(3),
Array.empty,
Array.empty,
Array.empty,
Seq((overAggGroups(2), Seq(
(AggregateCall.create(SqlStdOperatorTable.MAX, false,
ImmutableList.of(Integer.valueOf(2)), -1, longType, "max_score"),
new CountAggFunction()),
(AggregateCall.create(SqlStdOperatorTable.COUNT, false,
ImmutableList.of(Integer.valueOf(0)), -1, doubleType, "cnt"),
new DoubleSumAggFunction())
))),
flinkLogicalOverAgg
)
val batchWindowAggOutput = new BatchExecCalc(
cluster,
batchPhysicalTraits,
batchWindowAgg,
projectProgram,
projectProgram.getOutputRowType
)
(flinkLogicalOverAggOutput, batchWindowAggOutput)
}
// equivalent SQL is
// select id, name, score, age, class,
// rank() over (partition by class order by score) as rk,
// dense_rank() over (partition by class order by score) as drk,
// avg(score) over (partition by class order by score) as avg_score
// from student
protected lazy val streamOverAgg: StreamPhysicalRel = {
val types = Map(
"id" -> longType,
"name" -> stringType,
"score" -> doubleType,
"age" -> intType,
"class" -> intType,
"rk" -> longType,
"drk" -> longType,
"avg_score" -> doubleType,
"count$0_score" -> longType,
"sum$0_score" -> doubleType
)
def createRowType(selectFields: String*): RelDataType = {
val builder = typeFactory.builder
selectFields.foreach { f =>
builder.add(f, types.getOrElse(f, throw new IllegalArgumentException(s"$f does not exist")))
}
builder.build()
}
val rowTypeOfCalc = createRowType("id", "name", "score", "age", "class")
val rexProgram = RexProgram.create(
studentFlinkLogicalScan.getRowType,
Array(0, 1, 2, 3, 6).map(i => RexInputRef.of(i, studentFlinkLogicalScan.getRowType)).toList,
null,
rowTypeOfCalc,
rexBuilder
)
val rowTypeOfWindowAgg = createRowType(
"id", "name", "score", "age", "class", "rk", "drk", "count$0_score", "sum$0_score")
val flinkLogicalOverAgg = new FlinkLogicalOverAggregate(
cluster,
flinkLogicalTraits,
new FlinkLogicalCalc(cluster, flinkLogicalTraits, studentFlinkLogicalScan, rexProgram),
ImmutableList.of(),
rowTypeOfWindowAgg,
util.Arrays.asList(overAggGroups.get(1))
)
val streamScan: StreamExecDataStreamScan =
createDataStreamScan(ImmutableList.of("student"), streamPhysicalTraits)
val calc = new StreamExecCalc(
cluster, streamPhysicalTraits, streamScan, rexProgram, rowTypeOfCalc)
val hash4 = FlinkRelDistribution.hash(Array(4), requireStrict = true)
val exchange = new StreamExecExchange(cluster, calc.getTraitSet.replace(hash4), calc, hash4)
val windowAgg = new StreamExecOverAggregate(
cluster,
streamPhysicalTraits,
exchange,
rowTypeOfWindowAgg,
exchange.getRowType,
flinkLogicalOverAgg
)
val rowTypeOfWindowAggOutput = createRowType(
"id", "name", "score", "age", "class", "rk", "drk", "avg_score")
val projectProgram = RexProgram.create(
flinkLogicalOverAgg.getRowType,
(0 until flinkLogicalOverAgg.getRowType.getFieldCount).flatMap { i =>
if (i < 7) {
Array[RexNode](RexInputRef.of(i, flinkLogicalOverAgg.getRowType))
} else if (i == 7) {
Array[RexNode](rexBuilder.makeCall(SqlStdOperatorTable.DIVIDE,
RexInputRef.of(7, flinkLogicalOverAgg.getRowType),
RexInputRef.of(8, flinkLogicalOverAgg.getRowType)))
} else {
Array.empty[RexNode]
}
}.toList,
null,
rowTypeOfWindowAggOutput,
rexBuilder
)
val streamWindowAggOutput = new StreamExecCalc(
cluster,
streamPhysicalTraits,
windowAgg,
projectProgram,
projectProgram.getOutputRowType
)
streamWindowAggOutput
}
// row_number() over(partition by class order by name) as rn,
// rank() over (partition by class order by score) as rk,
// dense_rank() over (partition by class order by score) as drk,
// avg(score) over (partition by class order by score) as avg_score,
// max(score) over (partition by age) as max_score,
// count(id) over (partition by age) as cnt
private lazy val overAggGroups = {
ImmutableList.of(
new Window.Group(
ImmutableBitSet.of(5),
true,
RexWindowBound.create(SqlWindow.createUnboundedPreceding(new SqlParserPos(0, 0)), null),
RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(0, 0)), null),
RelCollationImpl.of(new RelFieldCollation(
1, RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)),
ImmutableList.of(
new Window.RexWinAggCall(
SqlStdOperatorTable.ROW_NUMBER,
longType,
ImmutableList.of[RexNode](),
0,
false
)
)
),
new Window.Group(
ImmutableBitSet.of(5),
false,
RexWindowBound.create(SqlWindow.createUnboundedPreceding(new SqlParserPos(4, 15)), null),
RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(0, 0)), null),
RelCollationImpl.of(new RelFieldCollation(
2, RelFieldCollation.Direction.ASCENDING, RelFieldCollation.NullDirection.FIRST)),
ImmutableList.of(
new Window.RexWinAggCall(
SqlStdOperatorTable.RANK,
longType,
ImmutableList.of[RexNode](),
1,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.DENSE_RANK,
longType,
ImmutableList.of[RexNode](),
2,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.COUNT,
longType,
util.Arrays.asList(new RexInputRef(2, longType)),
3,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.SUM,
doubleType,
util.Arrays.asList(new RexInputRef(2, doubleType)),
4,
false
)
)
),
new Window.Group(
ImmutableBitSet.of(),
false,
RexWindowBound.create(SqlWindow.createUnboundedPreceding(new SqlParserPos(7, 19)), null),
RexWindowBound.create(SqlWindow.createUnboundedFollowing(new SqlParserPos(0, 0)), null),
RelCollations.EMPTY,
ImmutableList.of(
new Window.RexWinAggCall(
SqlStdOperatorTable.MAX,
doubleType,
util.Arrays.asList(new RexInputRef(2, doubleType)),
5,
false
),
new Window.RexWinAggCall(
SqlStdOperatorTable.COUNT,
longType,
util.Arrays.asList(new RexInputRef(0, longType)),
6,
false
)
)
)
)
}
protected lazy val flinkLogicalSnapshot: FlinkLogicalSnapshot = {
new FlinkLogicalSnapshot(
cluster,
flinkLogicalTraits,
studentFlinkLogicalScan,
relBuilder.call(FlinkSqlOperatorTable.PROCTIME))
}
// SELECT * FROM student AS T JOIN TemporalTable
// FOR SYSTEM_TIME AS OF T.proctime AS D ON T.a = D.id
protected lazy val (batchLookupJoin, streamLookupJoin) = {
val temporalTableSource = new TestTemporalTable
val batchSourceOp = new TableSourceQueryOperation[RowData](temporalTableSource, true)
val batchScan = relBuilder.queryOperation(batchSourceOp).build().asInstanceOf[TableScan]
val batchLookupJoin = new BatchExecLookupJoin(
cluster,
batchPhysicalTraits,
studentBatchScan,
batchScan.getTable,
None,
JoinInfo.of(ImmutableIntList.of(0), ImmutableIntList.of(0)),
JoinRelType.INNER
)
val streamSourceOp = new TableSourceQueryOperation[RowData](temporalTableSource, false)
val streamScan = relBuilder.queryOperation(streamSourceOp).build().asInstanceOf[TableScan]
val streamLookupJoin = new StreamExecLookupJoin(
cluster,
streamPhysicalTraits,
studentBatchScan,
streamScan.getTable,
None,
JoinInfo.of(ImmutableIntList.of(0), ImmutableIntList.of(0)),
JoinRelType.INNER
)
(batchLookupJoin, streamLookupJoin)
}
// select * from MyTable1 join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalInnerJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalInnerJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalInnerJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalInnerJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.b = MyTable2.b and MyTable1.a > MyTable2.a
protected lazy val logicalInnerJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalInnerJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalInnerJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.INNER,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 left join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalLeftJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalLeftJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalLeftJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 left join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalLeftJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 left join MyTable2 on
// MyTable1.b = MyTable2.b and MyTable1.a > MyTable2.a
protected lazy val logicalLeftJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalLeftJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 left join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalLeftJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.LEFT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 right join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalRightJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalRightJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalRightJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 right join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalRightJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 right join MyTable2 on
// MyTable1.b = MyTable2.b and MyTable1.a > MyTable2.a
protected lazy val logicalRightJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalRightJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 right join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalRightJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.RIGHT,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 full join MyTable4 on MyTable1.b = MyTable4.a
protected lazy val logicalFullJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.a = MyTable2.a
protected lazy val logicalFullJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.b = MyTable2.b
protected lazy val logicalFullJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable2 full join MyTable1 on MyTable2.b = MyTable1.b
protected lazy val logicalFullJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.b = MyTable2.b and MyTable1.a >
// MyTable2.a
protected lazy val logicalFullJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.a > MyTable2.a
protected lazy val logicalFullJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build
// select * from MyTable1 full join MyTable2 on MyTable1.e = MyTable2.e
protected lazy val logicalFullJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 full join MyTable2 on true
protected lazy val logicalFullJoinWithoutCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.FULL, relBuilder.literal(true))
.build
// select * from MyTable1 b in (select a from MyTable4)
protected lazy val logicalSemiJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 a in (select a from MyTable2)
protected lazy val logicalSemiJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 b in (select b from MyTable2)
protected lazy val logicalSemiJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable2 a in (select b from MyTable1)
protected lazy val logicalSemiJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable1 b in (select b from MyTable2 where MyTable1.a > MyTable2.a)
protected lazy val logicalSemiJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI, relBuilder.call(AND,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 exists (select * from MyTable2 where MyTable1.a > MyTable2.a)
protected lazy val logicalSemiJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(GREATER_THAN, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 where e in (select e from MyTable2)
protected lazy val logicalSemiJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.SEMI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// select * from MyTable1 not exists (select * from MyTable4 where MyTable1.b = MyTable4.a)
protected lazy val logicalAntiJoinOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable4")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 not exists (select * from MyTable2 where MyTable1.a = MyTable2.a)
protected lazy val logicalAntiJoinNotOnUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0)))
.build()
// select * from MyTable1 not exists (select * from MyTable2 where MyTable1.b = MyTable2.b)
protected lazy val logicalAntiJoinOnLHSUniqueKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable2 not exists (select * from MyTable1 where MyTable1.b = MyTable2.b)
protected lazy val logicalAntiJoinOnRHSUniqueKeys: RelNode = relBuilder
.scan("MyTable2")
.scan("MyTable1")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))
.build()
// select * from MyTable1 b not in (select b from MyTable2 where MyTable1.a = MyTable2.a)
// notes: the nullable of b is true
protected lazy val logicalAntiJoinWithEquiAndNonEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI, relBuilder.call(AND,
relBuilder.call(OR,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.isNull(
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))),
relBuilder.call(EQUALS, relBuilder.field(2, 0, 0), relBuilder.field(2, 1, 0))))
.build
// select * from MyTable1 b not in (select b from MyTable2)
// notes: the nullable of b is true
protected lazy val logicalAntiJoinWithoutEquiCond: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI, relBuilder.call(OR,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)),
relBuilder.isNull(
relBuilder.call(EQUALS, relBuilder.field(2, 0, 1), relBuilder.field(2, 1, 1)))))
.build
// select * from MyTable1 where not exists (select e from MyTable2 where MyTable1.e = MyTable2.e)
protected lazy val logicalAntiJoinOnDisjointKeys: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.join(JoinRelType.ANTI,
relBuilder.call(EQUALS, relBuilder.field(2, 0, 4), relBuilder.field(2, 1, 4)))
.build
// SELECT * FROM MyTable1 UNION ALL SELECT * MyTable2
protected lazy val logicalUnionAll: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.union(true).build()
// SELECT * FROM MyTable1 UNION ALL SELECT * MyTable2
protected lazy val logicalUnion: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.union(false).build()
// SELECT * FROM MyTable1 INTERSECT ALL SELECT * MyTable2
protected lazy val logicalIntersectAll: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.intersect(true).build()
// SELECT * FROM MyTable1 INTERSECT SELECT * MyTable2
protected lazy val logicalIntersect: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.intersect(false).build()
// SELECT * FROM MyTable1 MINUS ALL SELECT * MyTable2
protected lazy val logicalMinusAll: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.minus(true).build()
// SELECT * FROM MyTable1 MINUS SELECT * MyTable2
protected lazy val logicalMinus: RelNode = relBuilder
.scan("MyTable1")
.scan("MyTable2")
.minus(false).build()
protected def createDataStreamScan[T](
tableNames: util.List[String], traitSet: RelTraitSet): T = {
val table = relBuilder
.getRelOptSchema
.asInstanceOf[CalciteCatalogReader]
.getTable(tableNames)
.asInstanceOf[FlinkPreparingTableBase]
val conventionTrait = traitSet.getTrait(ConventionTraitDef.INSTANCE)
val scan = conventionTrait match {
case Convention.NONE =>
relBuilder.clear()
val scan = relBuilder.scan(tableNames).build()
scan.copy(traitSet, scan.getInputs)
case FlinkConventions.LOGICAL =>
new FlinkLogicalDataStreamTableScan(cluster, traitSet, table)
case FlinkConventions.BATCH_PHYSICAL =>
new BatchExecBoundedStreamScan(cluster, traitSet, table, table.getRowType)
case FlinkConventions.STREAM_PHYSICAL =>
new StreamExecDataStreamScan(cluster, traitSet, table, table.getRowType)
case _ => throw new TableException(s"Unsupported convention trait: $conventionTrait")
}
scan.asInstanceOf[T]
}
protected def createLiteralList(
rowType: RelDataType,
literalValues: Seq[String]): util.List[RexLiteral] = {
require(literalValues.length == rowType.getFieldCount)
val rexBuilder = relBuilder.getRexBuilder
literalValues.zipWithIndex.map {
case (v, index) =>
val fieldType = rowType.getFieldList.get(index).getType
if (v == null) {
rexBuilder.makeNullLiteral(fieldType)
} else {
fieldType.getSqlTypeName match {
case BIGINT => rexBuilder.makeLiteral(v.toLong, fieldType, true)
case INTEGER => rexBuilder.makeLiteral(v.toInt, fieldType, true)
case BOOLEAN => rexBuilder.makeLiteral(v.toBoolean)
case DATE => rexBuilder.makeDateLiteral(new DateString(v))
case TIME => rexBuilder.makeTimeLiteral(new TimeString(v), 0)
case TIMESTAMP => rexBuilder.makeTimestampLiteral(new TimestampString(v), 0)
case DOUBLE => rexBuilder.makeApproxLiteral(BigDecimal.valueOf(v.toDouble))
case FLOAT => rexBuilder.makeApproxLiteral(BigDecimal.valueOf(v.toFloat))
case VARCHAR => rexBuilder.makeLiteral(v)
case _ => throw new TableException(s"${fieldType.getSqlTypeName} is not supported!")
}
}.asInstanceOf[RexLiteral]
}.toList
}
protected def createLogicalCalc(
input: RelNode,
outputRowType: RelDataType,
projects: util.List[RexNode],
conditions: util.List[RexNode]): Calc = {
val predicate: RexNode = if (conditions == null || conditions.isEmpty) {
null
} else {
RexUtil.composeConjunction(rexBuilder, conditions, true)
}
val program = RexProgram.create(
input.getRowType,
projects,
predicate,
outputRowType,
rexBuilder)
FlinkLogicalCalc.create(input, program)
}
protected def makeLiteral(
value: Any,
internalType: LogicalType,
isNullable: Boolean = false,
allowCast: Boolean = true): RexNode = {
rexBuilder.makeLiteral(
value,
typeFactory.createFieldTypeFromLogicalType(internalType.copy(isNullable)),
allowCast
)
}
}
class TestRel(
cluster: RelOptCluster,
traits: RelTraitSet,
input: RelNode) extends SingleRel(cluster, traits, input) {
override def computeSelfCost(planner: RelOptPlanner, mq: RelMetadataQuery): RelOptCost = {
planner.getCostFactory.makeCost(1.0, 1.0, 1.0)
}
}
object FlinkRelMdHandlerTestBase {
@BeforeClass
def beforeAll(): Unit = {
RelMetadataQueryBase
.THREAD_PROVIDERS
.set(JaninoRelMetadataProvider.of(FlinkDefaultRelMetadataProvider.INSTANCE))
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdHandlerTestBase.scala | Scala | apache-2.0 | 92,020 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.content
import org.scalatest.{Matchers, FunSpec}
import play.api.libs.json._
class ErrorContentSpec extends FunSpec with Matchers {
val errorJson: JsValue = Json.parse("""
{
"ename": "<STRING>",
"evalue": "<STRING>",
"traceback": ["<STRING>"]
}
""")
val error = ErrorContent("<STRING>", "<STRING>", List("<STRING>"))
describe("ErrorContent") {
describe("#toTypeString") {
it("should return correct type") {
ErrorContent.toTypeString should be ("error")
}
}
describe("implicit conversions") {
it("should implicitly convert from valid json to a ErrorContent instance") {
// This is the least safe way to convert as an error is thrown if it fails
errorJson.as[ErrorContent] should be (error)
}
it("should also work with asOpt") {
// This is safer, but we lose the error information as it returns
// None if the conversion fails
val newCompleteRequest = errorJson.asOpt[ErrorContent]
newCompleteRequest.get should be (error)
}
it("should also work with validate") {
// This is the safest as it collects all error information (not just first error) and reports it
val CompleteRequestResults = errorJson.validate[ErrorContent]
CompleteRequestResults.fold(
(invalid: Seq[(JsPath, Seq[JsonValidationError])]) => println("Failed!"),
(valid: ErrorContent) => valid
) should be (error)
}
it("should implicitly convert from a ErrorContent instance to valid json") {
Json.toJson(error) should be (errorJson)
}
}
}
}
| lresende/incubator-toree | protocol/src/test/scala/org/apache/toree/kernel/protocol/v5/content/ErrorContentSpec.scala | Scala | apache-2.0 | 2,492 |
/*
* RichVector2iSpec.scala
*
* Copyright (c) 2013 Lonnie Pryor III
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fulcrum.math
import org.scalatest.FunSpec
import org.scalatest.matchers.ShouldMatchers
/**
* Test case for [[fulcrum.math.RichVector2i]].
*
* @author Lonnie Pryor III ([email protected])
*/
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class RichVector2iSpec extends FunSpec with ShouldMatchers {
describe("RichVector2i") {
it("should support unary operators from Int") {
val v = Vector2(1, 2)
~v should equal(Vector2(~1, ~2))
+v should equal(Vector2(+1, +2))
-v should equal(Vector2(-1, -2))
}
it("should support shift operators from Int") {
val v = Vector2(2, 4)
v << 1 should equal(Vector2(4, 8))
v << Vector2(1, 1) should equal(Vector2(4, 8))
v >> 1 should equal(Vector2(1, 2))
v >> Vector2(1, 1) should equal(Vector2(1, 2))
v >>> 1 should equal(Vector2(1, 2))
v >>> Vector2(1, 1) should equal(Vector2(1, 2))
}
it("should support comparison operators from Int") {
import Vector2._
val v = Vector2(1, 2)
v === 1 should equal(Vector2(true, false))
v === Vector2(1, 2) should equal(Vector2(true, true))
v === 2 should equal(Vector2(false, true))
v === Vector2(2, 1) should equal(Vector2(false, false))
v =!= 1 should equal(Vector2(false, true))
v =!= Vector2(1, 2) should equal(Vector2(false, false))
v =!= 2 should equal(Vector2(true, false))
v =!= Vector2(2, 1) should equal(Vector2(true, true))
v < 1 should equal(Vector2(false, false))
v < Vector2(1, 2) should equal(Vector2(false, false))
v < 3 should equal(Vector2(true, true))
v < Vector2(3, 4) should equal(Vector2(true, true))
v <= 1 should equal(Vector2(true, false))
v <= Vector2(1, 2) should equal(Vector2(true, true))
v <= 3 should equal(Vector2(true, true))
v <= Vector2(3, 4) should equal(Vector2(true, true))
v > 1 should equal(Vector2(false, true))
v > Vector2(1, 2) should equal(Vector2(false, false))
v > 0 should equal(Vector2(true, true))
v > Vector2(0, 1) should equal(Vector2(true, true))
v >= 1 should equal(Vector2(true, true))
v >= Vector2(1, 2) should equal(Vector2(true, true))
v >= 3 should equal(Vector2(false, false))
v >= Vector2(3, 4) should equal(Vector2(false, false))
}
it("should support bitwise operators from Int") {
val v = Vector2(0x0F, 0xF0)
v | 0xF0 should equal(Vector2(0xFF, 0xF0))
v | Vector2(0xF0, 0x0F) should equal(Vector2(0xFF, 0xFF))
v & 0xF0 should equal(Vector2(0x00, 0xF0))
v & Vector2(0xF0, 0x0F) should equal(Vector2(0x00, 0x00))
v ^ 0xF0 should equal(Vector2(0xFF, 0x00))
v ^ Vector2(0xF0, 0x0F) should equal(Vector2(0xFF, 0xFF))
}
it("should support arithemetic operators from Int") {
import Vector2._
val v1 = Vector2(1, 2)
val v2 = Vector2(3, 4)
v1 + 1 should equal(Vector2(2, 3))
v1 + v2 should equal(Vector2(4, 6))
v1 :+ 1 should equal(Vector2(2, 3))
v1 :+ v2 should equal(Vector2(4, 6))
v2 - 1 should equal(Vector2(2, 3))
v2 - v1 should equal(Vector2(2, 2))
v1 * 2 should equal(Vector2(2, 4))
v1 * v2 should equal(Vector2(3, 8))
v2 / 2 should equal(Vector2(1, 2))
v2 / v1 should equal(Vector2(3, 2))
v2 % 2 should equal(Vector2(1, 0))
v2 % v1 should equal(Vector2(0, 0))
}
it("should create immutable copies by converting elements") {
val v = Vector2(1, 0)
v.toVector2b should equal(immutable.Vector2(true, false))
v.toVector2f should equal(immutable.Vector2(1f, 0f))
}
}
} | lpryor/fulcrum | math/src/test/scala/fulcrum/math/RichVector2iSpec.scala | Scala | apache-2.0 | 4,292 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.types.{DataType, IntegerType}
/**
* A test suite for testing [[ExpressionEvalHelper]].
*
* Yes, we should write test cases for test harnesses, in case
* they have behaviors that are easy to break.
*/
class ExpressionEvalHelperSuite extends SparkFunSuite with ExpressionEvalHelper {
test("SPARK-16489 checkEvaluation should fail if expression reuses variable names") {
val e = intercept[RuntimeException] { checkEvaluation(BadCodegenExpression(), 10) }
assert(e.getMessage.contains("some_variable"))
}
}
/**
* An expression that generates bad code (variable name "some_variable" is not unique across
* instances of the expression.
*/
case class BadCodegenExpression() extends LeafExpression {
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = 10
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
ev.copy(code =
s"""
|int some_variable = 11;
|int ${ev.value} = 10;
""".stripMargin)
}
override def dataType: DataType = IntegerType
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelperSuite.scala | Scala | apache-2.0 | 2,113 |
package github.keylity
import org.specs2.mutable._
class MutableOnceSpec extends Specification {
"MutableOnce behavior" should {
"Create, mute once and twice like" in {
val id = new MutableOnce(0)
id() must_== 0
id.set(1)
id() must_== 1
id.set(2) must throwA[IllegalStateException]
}
}
}
| sorra/keylity | src/test/scala/github/keylity/MutableOnceSpec.scala | Scala | mit | 334 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
/**
* Estimates of various statistics. The default estimation logic simply lazily multiplies the
* corresponding statistic produced by the children. To override this behavior, override
* `statistics` and assign it an overridden version of `Statistics`.
*
* 各种统计数据的估计,默认的估计逻辑只是懒洋洋地乘以孩子们产生的相应统计量,要覆盖此行为,
* 请覆盖`statistics`并为其分配一个重写的`Statistics`版本
*
* '''NOTE''': concrete and/or overridden versions of statistics fields should pay attention to the
* performance of the implementations. The reason is that estimations might get triggered in
* performance-critical processes, such as query plan planning.
*
* Note that we are using a BigInt here since it is easy to overflow a 64-bit integer in
* cardinality estimation (e.g. cartesian joins).
*
* @param sizeInBytes Physical size in bytes. For leaf operators this defaults to 1, otherwise it
* defaults to the product of children's `sizeInBytes`.
*/
private[sql] case class Statistics(sizeInBytes: BigInt)
| tophua/spark1.52 | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala | Scala | apache-2.0 | 1,961 |
package jsonslim
import org.scalatest.FunSpec
import org.json4s.JsonDSL._
import org.json4s.{JArray, JValue}
class SrcTest extends FunSpec {
describe("Src") {
it ("should apply to strings") {
assert(Src("[]") == Some(JArray(Nil)))
}
it ("should apply to jvalues") {
val value: JValue = ("foo" -> 1) ~ ("bar" -> "boom")
assert(Src(value) == Some(value))
}
}
}
| meetup/json-slim | src/test/scala/SrcTest.scala | Scala | mit | 399 |
package com.regblanc.sgl.board
package core
import sgl._
import geometry._
import scene._
import scene.ui._
import util._
trait ScreensComponent {
this: GraphicsProvider with InputProvider with SystemProvider with WindowProvider
with GameStateComponent with LoggingProvider with ViewportComponent =>
private implicit val LogTag = Logger.Tag("main-screen")
class BoardScreen extends GameScreen {
override def name: String = "board-screen"
val BoardSize: Int = 1
val WorldHeight: Int = 10
val WorldWidth: Float = Window.width*(WorldHeight/Window.height.toFloat)
val viewport = new Viewport(Window.width, Window.height)
viewport.setCamera(0, 0, WorldWidth, WorldHeight)
viewport.scalingStrategy = Viewport.Fit
val p = (0f, 0f)
def handleEvent(e: Input.InputEvent): Unit = e match {
case Input.KeyDownEvent(Input.Keys.Down) =>
viewport.translateCamera(0, 0.5f)
case Input.KeyDownEvent(Input.Keys.Up) =>
viewport.translateCamera(0, -0.5f)
case Input.KeyDownEvent(Input.Keys.Left) =>
viewport.translateCamera(-0.5f, 0)
case Input.KeyDownEvent(Input.Keys.Right) =>
viewport.translateCamera(0.5f, 0)
case _ =>
}
Input.setEventProcessor(handleEvent)
override def update(dt: Long): Unit = { }
override def render(canvas: Graphics.Canvas): Unit = {
canvas.drawColor(Graphics.Color.Red)
viewport.withViewport(canvas) {
for(i <- 0 until 100) {
for(j <- 0 until 100) {
val color = if((i+j) % 2 == 0) Graphics.Color.White else Graphics.Color.Black
canvas.drawRect(j, i, 1f, 1f, Graphics.defaultPaint.withColor(color))
}
}
canvas.drawCircle(p._1 + 0.5f, p._2 + 0.5f, 0.5f, Graphics.defaultPaint.withColor(Graphics.Color.Green))
}
}
}
}
| regb/scala-game-library | examples/board/core/src/main/scala/MainScreen.scala | Scala | mit | 1,806 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.lang.{Byte => JByte}
import java.util.Properties
import kafka.network.SocketServer
import kafka.security.authorizer.AclEntry
import org.apache.kafka.common.message.{DescribeClusterRequestData, DescribeClusterResponseData}
import org.apache.kafka.common.protocol.ApiKeys
import org.apache.kafka.common.requests.{DescribeClusterRequest, DescribeClusterResponse}
import org.apache.kafka.common.resource.ResourceType
import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.{BeforeEach, Test}
import scala.jdk.CollectionConverters._
class DescribeClusterRequestTest extends BaseRequestTest {
override def brokerPropertyOverrides(properties: Properties): Unit = {
properties.setProperty(KafkaConfig.OffsetsTopicPartitionsProp, "1")
properties.setProperty(KafkaConfig.DefaultReplicationFactorProp, "2")
properties.setProperty(KafkaConfig.RackProp, s"rack/${properties.getProperty(KafkaConfig.BrokerIdProp)}")
}
@BeforeEach
override def setUp(): Unit = {
doSetup(createOffsetsTopic = false)
}
@Test
def testDescribeClusterRequestIncludingClusterAuthorizedOperations(): Unit = {
testDescribeClusterRequest(true)
}
@Test
def testDescribeClusterRequestExcludingClusterAuthorizedOperations(): Unit = {
testDescribeClusterRequest(false)
}
def testDescribeClusterRequest(includeClusterAuthorizedOperations: Boolean): Unit = {
val expectedBrokers = servers.map { server =>
new DescribeClusterResponseData.DescribeClusterBroker()
.setBrokerId(server.config.brokerId)
.setHost("localhost")
.setPort(server.socketServer.boundPort(listenerName))
.setRack(server.config.rack.orNull)
}.toSet
val expectedControllerId = servers.filter(_.kafkaController.isActive).last.config.brokerId
val expectedClusterId = servers.last.clusterId
val expectedClusterAuthorizedOperations = if (includeClusterAuthorizedOperations) {
Utils.to32BitField(
AclEntry.supportedOperations(ResourceType.CLUSTER)
.map(_.code.asInstanceOf[JByte]).asJava)
} else {
Int.MinValue
}
for (version <- ApiKeys.DESCRIBE_CLUSTER.oldestVersion to ApiKeys.DESCRIBE_CLUSTER.latestVersion) {
val describeClusterRequest = new DescribeClusterRequest.Builder(new DescribeClusterRequestData()
.setIncludeClusterAuthorizedOperations(includeClusterAuthorizedOperations))
.build(version.toShort)
val describeClusterResponse = sentDescribeClusterRequest(describeClusterRequest)
assertEquals(expectedControllerId, describeClusterResponse.data.controllerId)
assertEquals(expectedClusterId, describeClusterResponse.data.clusterId)
assertEquals(expectedClusterAuthorizedOperations, describeClusterResponse.data.clusterAuthorizedOperations)
assertEquals(expectedBrokers, describeClusterResponse.data.brokers.asScala.toSet)
}
}
private def sentDescribeClusterRequest(request: DescribeClusterRequest, destination: Option[SocketServer] = None): DescribeClusterResponse = {
connectAndReceive[DescribeClusterResponse](request, destination = destination.getOrElse(anySocketServer))
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala | Scala | apache-2.0 | 4,035 |
package ru.zconstz.shortener.service
import akka.actor.{ActorRef, Props}
import ru.zconstz.shortener.Boot
trait ServiceRefs {
lazy val tokenActor: ActorRef = Boot.system.actorOf(Props[TokenActor])
lazy val linkActor: ActorRef = Boot.system.actorOf(Props[LinkActor])
lazy val clickActor: ActorRef = Boot.system.actorOf(Props[ClickActor])
lazy val folderActor: ActorRef = Boot.system.actorOf(Props[FolderActor])
}
| konstantin-zlobin/url-shortener | src/main/scala/ru/zconstz/shortener/service/ServiceRefs.scala | Scala | apache-2.0 | 423 |
class SimpleClass {
def foo = 239
}
| scala/scala | test/files/scalap/simpleClass.scala | Scala | apache-2.0 | 38 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator
package control
import Prelude._
import scala.annotation.implicitNotFound
/** @tparam F
* @author Carlo Micieli
* @since 0.0.1
*/
@implicitNotFound("The type ${F} was not made instance of the Applicative type class")
trait Applicative[F[_]] extends Functor[F] { self =>
/** Lifts a value
* @param x
* @tparam A
* @return
*/
def pure[A](x: A): F[A]
/** Sequential application.
*
* @param x
* @param f
* @tparam A
* @tparam B
* @return
*/
def <*>[A, B](x: F[A])(f: F[A => B]): F[B]
def *>[A, B](fa: F[A])(fb: F[B]): F[B] = fb
def <*[A, B](fa: F[A])(fb: F[B]): F[A] = fa
trait ApplicativeLaws {
def identity[A](v: A): Boolean = {
false
}
}
}
object Applicative {
def apply[F[_]](implicit ev: Applicative[F]): Applicative[F] = ev
}
| CarloMicieli/hascalator | core/src/main/scala/io/hascalator/control/Applicative.scala | Scala | apache-2.0 | 1,438 |
package varys.util
/**
* Created by franklab on 15-4-3.
*/
import java.net.{DatagramPacket, InetAddress, DatagramSocket}
import akka.actor.{ActorRef, Props, Actor}
import varys.Logging
import scala.sys.process._
import varys.framework._
class DNBD (
val p: Int,
val eth: String)
extends Actor with Logging{
val port = p
val interface = eth
var isStart = false
var bandwidth: Int = 0
class sendTask(sender: ActorRef, host: String) extends Runnable {
val sender_ = sender
val host_ = host
override def run: Unit = {
val bn = send(host)
sender_ ! bn
}
}
class getRemainningBWTask(sender: ActorRef, isTx: Boolean) extends Runnable {
val sender_ = sender
override def run: Unit = {
val bd = new Bandwidth(interface)
if (isTx) {
val transRate = bd.readTx()
sender_ ! bandwidth - transRate
} else {
val transRate = bd.readRx()
sender_ ! bandwidth - transRate
}
}
}
override def receive = {
case GetBottleNeck(host) =>
//never block in actor !!!
val start = new Thread(new sendTask(sender, host))
start.run()
case GetRemainingTX =>
val start = new Thread(new getRemainningBWTask(sender, true))
start.run()
case GetRemainingRX =>
val start = new Thread(new getRemainningBWTask(sender, false))
start.run()
case StartServer =>
if (!isStart) {
val start = new Thread(new Runnable {
override def run(): Unit = {
bind(port)
}
})
start.start()
isStart = true
}
case _ => logError("DNBD receive something wrong !!!")
}
override def preStart(): Unit = {
val bd = new Bandwidth(interface)
bandwidth = bd.getBW()
logInfo("DNBD bandwidth of host is: %d bps".format(bandwidth))
}
def send(host: String): Int = {
try {
val s = new DatagramSocket()
val addr = InetAddress.getByName(host)
//get remaining bw of this end host
val bd = new Bandwidth(interface)
val transRate = bd.readTx()
val data = (bandwidth - transRate).toString.getBytes
logInfo("DNBD TX bandwidth of Source: %d".format(bandwidth - transRate))
//println("client:")
//data.foreach(print)
val packet = new DatagramPacket(data, data.length, addr, port)
s.send(packet)
//wait for the return
val recvData = new Array[Byte](1024)
val recvPacket = new DatagramPacket(recvData, recvData.length)
s.receive(recvPacket)
val buf = new String(recvPacket.getData)
val bwPattern = "[0-9]+".r
//convert to int
val size = bwPattern.findFirstIn(buf).getOrElse(0).toString.toInt
logInfo("DNBD bottleneck of Network: %d bps".format(size))
return size
} catch {
case e: Exception => e.printStackTrace()
return 0
}
}
def bind(port: Int): Boolean = {
logInfo("DNBD Server is listening at %s : %d".format(interface, port))
val serverSock = new DatagramSocket(port)
val recvBuff = new Array[Byte](1024)
while (true) {
for (i <- 0 until recvBuff.length)
recvBuff(i) = 0
val recvPacket = new DatagramPacket(recvBuff, recvBuff.length)
serverSock.receive(recvPacket)
val buf = new String(recvPacket.getData)
val bwPattern = "[0-9]+".r
var size = bwPattern.findFirstIn(buf).getOrElse(0).toString.toInt
logInfo("DNBD destination receive bandwidth: %d".format(size))
val bd = new Bandwidth(interface)
val recvRate = bd.readRx();
val rxRate = bandwidth - recvRate;
logInfo("DNBD RX bandwidth of destination: %d".format(rxRate))
if (size > rxRate)
size = rxRate
//TODO it's better to add ip info in log
logInfo("DNBD the bottleneck of the link: %d".format(size))
//send the bottleneck back to the client
val clientAddr = recvPacket.getAddress;
val clientPort = recvPacket.getPort;
val sendPacket = new DatagramPacket(size.toString.getBytes, size.toString.getBytes.length, clientAddr, clientPort)
serverSock.send(sendPacket)
}
true
}
class Bandwidth(val interface: String) {
val eth = interface
def readRx(): Int = {
var rx: Int = 0
val rx0Str = ("cat /sys/class/net/%s/statistics/rx_bytes".format(interface) !!)
val pattern = "[0-9]+".r
//TODO it's may not safe here
val rx0 = pattern.findFirstIn(rx0Str).getOrElse(0).toString
//println(rx0)
//rx0Str.foreach(println)
Thread.sleep(100)
val rx1Str = ("cat /sys/class/net/%s/statistics/rx_bytes".format(interface) !!)
//TODO it's may not safe here
val rx1 = pattern.findFirstIn(rx1Str).getOrElse(0).toString
//println(rx1Str)
rx = stringMinus(rx1, rx0) * 10
//println(rx)
return rx * 8
}
def readTx(): Int = {
var tx: Int = 0
val tx0Str = ("cat /sys/class/net/%s/statistics/tx_bytes".format(interface) !!)
val pattern = "[0-9]+".r
//TODO it's may not safe here
val tx0 = pattern.findFirstIn(tx0Str).getOrElse(0).toString
Thread.sleep(100)
val tx1Str = ("cat /sys/class/net/%s/statistics/tx_bytes".format(interface) !!)
//TODO it's may not safe here
val tx1 = pattern.findFirstIn(tx1Str).getOrElse(0).toString
//println(rx1Str)
tx = stringMinus(tx1, tx0) * 10
//println(tx)
return tx * 8
}
def getBW(): Int = {
//val res = "echo 05806056966" #| "sudo -S ethtool eth0" #| "grep Speed" !
val buffer = new StringBuffer()
val cmd = Seq("ethtool", eth)
val lines = cmd lines_! ProcessLogger(buffer append _)
//println(lines)
var bwStr = ""
for (s <- lines if s.contains("Speed")) bwStr = s
val bwPattern = "[0-9]+".r
val bw = bwPattern.findFirstIn(bwStr).getOrElse(0).toString.toInt
val ret = bw * 1024 * 1024
//println(ret)
ret
}
def stringMinus(l: String, r: String): Int = {
var res = 0
var size = l.size
if (r.size < size)
size = r.size
for (x <- size -1 to 0 by -1) {
res = res + (l.charAt(x) - r.charAt(x)) * (scala.math.pow(10, (size - 1 - x)).toInt)
}
res
}
}
}
| frankfzw/varys | core/src/main/scala/varys/util/DNBD.scala | Scala | apache-2.0 | 6,283 |
package uk.gov.gds.common.testutil
import uk.gov.gds.common.logging.Logging
import java.lang.ProcessBuilder
import collection.JavaConversions._
import sys.process.Process
trait PlayRunner extends LazyStop with Logging {
override protected def definitionOfSoonInSeconds = 10
val playAppName: String
val playAppPort: String
def projectRoot = "./"
def startCommand = List(projectRoot + "playctl.sh", playAppName, "start", playAppPort)
def stopCommand = List(projectRoot + "playctl.sh", playAppName, "stop")
override def doStart = {
val builder = new ProcessBuilder
logger.info("Running " + startCommand)
builder.command(startCommand)
val p = builder.start()
p.waitFor()
}
override def doStop = {
logger.info("Running " + projectRoot + "playctl.sh " + playAppName + " stop ")
Process(stopCommand).!
}
} | alphagov/gds-scala-common | scala-utils/src/main/scala/uk/gov/gds/common/testutil/PlayRunner.scala | Scala | mit | 858 |
package be.studiocredo
import be.studiocredo.aws.DownloadService
import be.studiocredo.account._
import be.studiocredo.reservations.ReservationEngineMonitorService
import com.google.inject.{AbstractModule, Singleton}
import net.codingwell.scalaguice.{ScalaModule, ScalaMultibinder}
import play.api.Play
object Modules {
class BaseModule extends AbstractModule with ScalaModule {
def configure() {
binder().disableCircularProxies()
bind[EventService].in[Singleton]
bind[UserService].in[Singleton]
bind[VenueService].in[Singleton]
bind[ShowService].in[Singleton]
bind[OrderService].in[Singleton]
bind[PreReservationService].in[Singleton]
bind[NotificationService].in[Singleton]
bind[PaymentService].in[Singleton]
bind[TicketService].in[Singleton]
bind[ReservationEngineMonitorService].asEagerSingleton()
bind[DownloadService].asEagerSingleton()
val accountStatementImportServiceUploadType = Play.current.configuration.getString(AccountStatementImportConfigurationKeys.uploadType)
if (accountStatementImportServiceUploadType.isDefined) {
accountStatementImportServiceUploadType match {
case Some("axa") => bind[TransactionImporter].to[AXATransactionImporter].asEagerSingleton()
case _ => bind[TransactionImporter].to[NullTransactionImporter].asEagerSingleton()
}
bind[AccountStatementImportService].to[UploadAccountStatementImportService].asEagerSingleton()
} else if (Play.current.configuration.getString(AccountStatementImportConfigurationKeys.codaboxClient).isDefined) {
bind[AccountStatementImportService].to[CodaboxAccountStatementImportService].asEagerSingleton()
bind[CodaboxSyncService].asEagerSingleton()
} else {
bind[AccountStatementImportService].to[NullAccountStatementImportService].asEagerSingleton()
}
bind[controllers.Application].in[Singleton]
bind[controllers.admin.EventDetails].in[Singleton]
bind[controllers.admin.Events].in[Singleton]
bind[controllers.admin.UserDetails].in[Singleton]
bind[controllers.admin.Venues].in[Singleton]
bind[controllers.admin.Orders].in[Singleton]
bind[controllers.Orders].in[Singleton]
bind[controllers.Events].in[Singleton]
}
}
class AuthModule extends AbstractModule with ScalaModule {
def configure() {
import be.studiocredo.auth._
binder().disableCircularProxies()
bind[AuthenticatorService].in[Singleton]
// bind[AuthTokenStore].to[CacheAuthTokenStore].in[Singleton]
bind[AuthTokenStore].to[DbAuthTokenStore].in[Singleton]
bind[IdentityService].in[Singleton]
bind[AuthTokenExpireService].asEagerSingleton()
// todo replace with method that doesn't suck
val multi = ScalaMultibinder.newSetBinder[Service](binder)
multi.addBinding().to[AuthTokenExpireService]
multi.addBinding().to[ReservationEngineMonitorService]
multi.addBinding().to[DownloadService]
multi.addBinding().to[AccountStatementImportService]
if (Play.current.configuration.getString(AccountStatementImportConfigurationKeys.codaboxClient).isDefined) {
multi.addBinding().to[CodaboxSyncService]
}
bind[controllers.auth.Auth].in[Singleton]
bind[controllers.auth.LoginPage].in[Singleton]
bind[controllers.auth.PasswordChange].in[Singleton]
bind[controllers.auth.SignUp].in[Singleton]
bind[controllers.auth.PasswordReset].in[Singleton]
}
}
}
| studiocredo/ticket-reservation | app/be/studiocredo/Modules.scala | Scala | apache-2.0 | 3,538 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.secondaryindex.command
import java.util
import scala.collection.JavaConverters._
import org.apache.log4j.Logger
import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.execution.command.{AlterTableModel, AtomicRunnableCommand}
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.secondaryindex.events.{LoadTableSIPostExecutionEvent, LoadTableSIPreExecutionEvent}
import org.apache.spark.sql.secondaryindex.load.CarbonInternalLoaderUtil
import org.apache.spark.sql.secondaryindex.util.{CarbonInternalScalaUtil, SecondaryIndexUtil}
import org.apache.spark.sql.util.CarbonException
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnarFormatVersion}
import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatus, SegmentStatusManager}
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
case class SIRebuildSegmentCommand(
alterTableModel: AlterTableModel,
tableInfoOp: Option[TableInfo] = None,
operationContext: OperationContext = new OperationContext)
extends AtomicRunnableCommand {
val LOGGER: Logger = LogServiceFactory.getLogService(this.getClass.getName)
var indexTable: CarbonTable = _
override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
val tableName = alterTableModel.tableName.toLowerCase
val dbName = alterTableModel.dbName.getOrElse(sparkSession.catalog.currentDatabase)
indexTable = if (tableInfoOp.isDefined) {
CarbonTable.buildFromTableInfo(tableInfoOp.get)
} else {
val relation = CarbonEnv.getInstance(sparkSession).carbonMetaStore
.lookupRelation(Option(dbName), tableName)(sparkSession).asInstanceOf[CarbonRelation]
relation.carbonTable
}
setAuditTable(indexTable)
if (!indexTable.getTableInfo.isTransactionalTable) {
throw new MalformedCarbonCommandException("Unsupported operation on non transactional table")
}
if (!indexTable.isIndexTable) {
throw new UnsupportedOperationException("Unsupported operation on carbon table")
}
val version = CarbonUtil.getFormatVersion(indexTable)
val isOlderVersion = version == ColumnarFormatVersion.V1 ||
version == ColumnarFormatVersion.V2
if (isOlderVersion) {
throw new MalformedCarbonCommandException(
"Unsupported rebuild operation on carbon table: Merge data files is not supported on V1 " +
"V2 store segments")
}
// check if the list of given segments in the command are valid
val segmentIds: List[String] = {
if (alterTableModel.customSegmentIds.isDefined) {
alterTableModel.customSegmentIds.get
} else {
List.empty
}
}
if (segmentIds.nonEmpty) {
val segmentStatusManager = new SegmentStatusManager(indexTable.getAbsoluteTableIdentifier)
val validSegments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala
.map(_.getSegmentNo)
segmentIds.foreach(segmentId =>
if (!validSegments.contains(segmentId)) {
throw new RuntimeException(s"Rebuild index by segment id is failed. " +
s"Invalid ID: $segmentId")
}
)
}
Seq.empty
}
override def processData(sparkSession: SparkSession): Seq[Row] = {
LOGGER.info( s"SI segment compaction request received for table " +
s"${ indexTable.getDatabaseName}.${indexTable.getTableName}")
val metaStore = CarbonEnv.getInstance(sparkSession)
.carbonMetaStore
val mainTable = metaStore
.lookupRelation(Some(indexTable.getDatabaseName),
CarbonInternalScalaUtil.getParentTableName(indexTable))(sparkSession)
.asInstanceOf[CarbonRelation]
.carbonTable
val lock = CarbonLockFactory.getCarbonLockObj(
mainTable.getAbsoluteTableIdentifier,
LockUsage.COMPACTION_LOCK)
var segmentList: List[String] = null
val segmentFileNameMap: java.util.Map[String, String] = new util.HashMap[String, String]()
var segmentIdToLoadStartTimeMapping: scala.collection.mutable.Map[String, java.lang.Long] =
scala.collection.mutable.Map()
var loadMetadataDetails: Array[LoadMetadataDetails] = null
try {
if (lock.lockWithRetries()) {
LOGGER.info("Acquired the compaction lock for table" +
s" ${mainTable.getDatabaseName}.${mainTable.getTableName}")
val operationContext = new OperationContext
val loadTableSIPreExecutionEvent: LoadTableSIPreExecutionEvent =
LoadTableSIPreExecutionEvent(sparkSession,
new CarbonTableIdentifier(indexTable.getDatabaseName, indexTable.getTableName, ""),
null,
indexTable)
OperationListenerBus.getInstance
.fireEvent(loadTableSIPreExecutionEvent, operationContext)
if (alterTableModel.customSegmentIds.isDefined) {
segmentList = alterTableModel.customSegmentIds.get
}
SegmentStatusManager.readLoadMetadata(mainTable.getMetadataPath) collect {
case loadDetails if null == segmentList ||
segmentList.contains(loadDetails.getLoadName) =>
segmentFileNameMap
.put(loadDetails.getLoadName,
String.valueOf(loadDetails.getLoadStartTime))
}
loadMetadataDetails = SegmentStatusManager
.readLoadMetadata(indexTable.getMetadataPath)
.filter(loadMetadataDetail =>
(null == segmentList || segmentList.contains(loadMetadataDetail.getLoadName)) &&
(loadMetadataDetail.getSegmentStatus ==
SegmentStatus.SUCCESS ||
loadMetadataDetail.getSegmentStatus ==
SegmentStatus.LOAD_PARTIAL_SUCCESS))
segmentIdToLoadStartTimeMapping = CarbonInternalLoaderUtil
.getSegmentToLoadStartTimeMapping(loadMetadataDetails)
.asScala
val carbonLoadModelForMergeDataFiles = SecondaryIndexUtil
.getCarbonLoadModel(indexTable,
loadMetadataDetails.toList.asJava,
System.currentTimeMillis(), CarbonInternalScalaUtil
.getCompressorForIndexTable(indexTable.getDatabaseName, indexTable.getTableName,
mainTable.getTableName)(sparkSession))
SecondaryIndexUtil.mergeDataFilesSISegments(segmentIdToLoadStartTimeMapping, indexTable,
loadMetadataDetails.toList.asJava, carbonLoadModelForMergeDataFiles,
isRebuildCommand = true)(sparkSession.sqlContext)
val loadTableSIPostExecutionEvent: LoadTableSIPostExecutionEvent =
LoadTableSIPostExecutionEvent(sparkSession,
indexTable.getCarbonTableIdentifier,
null,
indexTable)
OperationListenerBus.getInstance
.fireEvent(loadTableSIPostExecutionEvent, operationContext)
LOGGER.info(s"SI segment compaction request completed for table " +
s"${indexTable.getDatabaseName}.${indexTable.getTableName}")
} else {
LOGGER.error(s"Not able to acquire the compaction lock for table" +
s" ${indexTable.getDatabaseName}.${indexTable.getTableName}")
CarbonException.analysisException(
"Table is already locked for compaction. Please try after some time.")
}
} catch {
case ex: Exception =>
LOGGER.error(s"SI segment compaction request failed for table " +
s"${indexTable.getDatabaseName}.${indexTable.getTableName}")
case ex: NoSuchTableException =>
throw ex
} finally {
lock.unlock()
}
Seq.empty
}
override protected def opName: String = "SI Compact/Rebuild within segment"
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/command/SIRebuildSegmentCommand.scala | Scala | apache-2.0 | 8,991 |
/*
* Copyright (c) 2021. StulSoft
*/
package com.stulsoft.areas
/**
* @author Yuriy Stul
*/
object ShapeArea {
def areaOf[A](a: A)(implicit shape: Area[A]): Double = shape.area(a)
}
| ysden123/poc | cats-examples/first-steps/src/main/scala/com/stulsoft/areas/ShapeArea.scala | Scala | mit | 190 |
package com.outr.arango.api
import com.outr.arango.api.model._
import io.youi.client.HttpClient
import io.youi.http.HttpMethod
import io.youi.net._
import io.circe.Json
import scala.concurrent.{ExecutionContext, Future}
object APIQueryProperties {
def get(client: HttpClient)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Get)
.path(path"/_api/query/properties", append = true)
.call[Json]
def put(client: HttpClient, body: PutApiQueryProperties)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Put)
.path(path"/_api/query/properties", append = true)
.restful[PutApiQueryProperties, Json](body)
} | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/APIQueryProperties.scala | Scala | mit | 689 |
/**
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.streaming.api.messaging
import com.stratio.streaming.commons.constants.ColumnType
class ColumnNameType(var columnName: String, var columnType: ColumnType)
| b-cuts/streaming-cep-engine | api/src/main/scala/com/stratio/streaming/api/messaging/ColumnNameType.scala | Scala | apache-2.0 | 798 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.snippet.administration
import net.liftweb._
import http._
import common._
import util.Helpers._
import js._
import JsCmds._
import JE._
import scala.xml.NodeSeq
import collection.mutable.Buffer
import com.normation.utils.NetUtils.isValidNetwork
import com.normation.rudder.domain.Constants
import com.normation.rudder.web.model.CurrentUser
import com.normation.rudder.services.servers.PolicyServerManagementService
import com.normation.rudder.batch.AsyncDeploymentAgent
import com.normation.rudder.domain.eventlog.UpdatePolicyServer
import com.normation.eventlog.EventLogDetails
import com.normation.rudder.batch.AutomaticStartDeployment
import com.normation.rudder.domain.eventlog.AuthorizedNetworkModification
import com.normation.rudder.repository.EventLogRepository
import com.normation.utils.StringUuidGenerator
import com.normation.eventlog.ModificationId
import bootstrap.liftweb.RudderConfig
import com.normation.inventory.domain.NodeId
class EditPolicyServerAllowedNetwork extends DispatchSnippet with Loggable {
private[this] val psService = RudderConfig.policyServerManagementService
private[this] val eventLogService = RudderConfig.eventLogRepository
private[this] val asyncDeploymentAgent = RudderConfig.asyncDeploymentAgent
private[this] val uuidGen = RudderConfig.stringUuidGenerator
private[this] val nodeInfoService = RudderConfig.nodeInfoService
/*
* We are forced to use that class to deals with multiple request
* not processed in order (ex: one request add an item, the second remove
* item at index 0, the third remove item at index 0. Now, any order for
* these requests have to be considered and lead to the same result.
*/
private[this] case class VH(id:Long = nextNum, var net : String = "") {
override def hashCode = id.hashCode
override def equals(x:Any) = x match {
case VH(i, _) => id == i
case _ => false
}
}
private[this] val policyServers = nodeInfoService.getAllSystemNodeIds()
// we need to store that out of the form, so that the changes are persisted at redraw
private[this] val allowedNetworksMap = scala.collection.mutable.Map[NodeId, Buffer[VH]]()
def dispatch = {
case "render" =>
policyServers match {
case e:EmptyBox => errorMessage(e)
case Full(seq) =>
// we need to order the seq to have root first
val sortedSeq = Constants.ROOT_POLICY_SERVER_ID +: seq.filter(x => x != Constants.ROOT_POLICY_SERVER_ID)
xml:NodeSeq => {
sortedSeq.foldLeft(NodeSeq.Empty)((result, id) => result++renderForm(id).apply(xml)) }
}
}
def errorMessage(b:EmptyBox) = {
val error = b ?~! "Error when processing allowed network"
logger.debug(error.messageChain, b)
"#allowedNetworksForm *" #> { (x:NodeSeq) =>
<div class="error">
<p>An error occured when trying to get the list of existing allowed networks</p>
{
b match {
case Failure(m,_,_) => <p>Error message was: {m}</p>
case _ => <p>No error message was left</p>
}
}
</div>
}
}
def renderForm(policyServerId: NodeId) : IdMemoizeTransform = SHtml.idMemoize { outerXml =>
val allowedNetworksFormId = "allowedNetworksForm" + policyServerId.value
val currentNets = psService.getAuthorizedNetworks(policyServerId)
val policyServerName = nodeInfoService.getNodeInfo(policyServerId) match {
case Full(Some(nodeInfo)) =>
<span>{nodeInfo.hostname}</span>
case eb: EmptyBox =>
val e = eb ?~! s"Could not get details for Policy Server ID ${policyServerId.value}"
logger.error(e.messageChain)
<span class="error">Unknown hostname</span>
case Full(None) =>
logger.error(s"Could not get details for Policy Server ID ${policyServerId.value}, the details were not found for that ID")
<span class="error">Unknown hostname</span>
}
val allowedNetworks = allowedNetworksMap.getOrElseUpdate(policyServerId,
Buffer() ++ currentNets.getOrElse(Nil).map(n => VH(net = n)))
// our process method returns a
// JsCmd which will be sent back to the browser
// as part of the response
def process(): JsCmd = {
//clear errors
S.clearCurrentNotices
val goodNets = Buffer[String]()
allowedNetworks.foreach { case v@VH(i,net) =>
val netWithoutSpaces = net.replaceAll("""\\s""", "")
if(netWithoutSpaces.length != 0) {
if(!isValidNetwork(netWithoutSpaces)) {
S.error("errornetwork_"+ i, "Bad format for given network")
} else {
goodNets += netWithoutSpaces
}
}
}
//if no errors, actually save
if(S.errors.isEmpty) {
val modId = ModificationId(uuidGen.newUuid)
(for {
currentNetworks <- psService.getAuthorizedNetworks(policyServerId) ?~! s"Error when getting the list of current authorized networks for policy server ${policyServerId.value}"
changeNetwork <- psService.setAuthorizedNetworks(policyServerId, goodNets, modId, CurrentUser.getActor) ?~! "Error when saving new allowed networks for policy server ${policyServerId.value}"
modifications = UpdatePolicyServer.buildDetails(AuthorizedNetworkModification(currentNetworks, goodNets))
eventSaved <- eventLogService.saveEventLog(modId,
UpdatePolicyServer(EventLogDetails(
modificationId = None
, principal = CurrentUser.getActor
, details = modifications
, reason = None))) ?~! "Unable to save the user event log for modification on authorized networks for policy server ${policyServerId.value}"
} yield {
}) match {
case Full(_) =>
asyncDeploymentAgent ! AutomaticStartDeployment(modId, CurrentUser.getActor)
Replace(allowedNetworksFormId, outerXml.applyAgain) &
successPopup
case e:EmptyBox => SetHtml(allowedNetworksFormId,errorMessage(e)(outerXml.applyAgain))
}
} else Noop
}
def delete(i:Long) : JsCmd = {
allowedNetworks -= VH(i)
allowedNetworksMap.put(policyServerId, allowedNetworks)
Replace(allowedNetworksFormId, outerXml.applyAgain)
}
def add() : JsCmd = {
allowedNetworks.append(VH())
allowedNetworksMap.put(policyServerId, allowedNetworks)
Replace(allowedNetworksFormId, outerXml.applyAgain)
}
//process the list of networks
"#allowedNetworksForm [id]" #> allowedNetworksFormId andThen
"#policyServerDetails" #> <h3>{"Allowed networks for policy server "}{policyServerName} {s"(Rudder ID: ${policyServerId.value})"}</h3> &
"#allowNetworkFields *" #> { (xml:NodeSeq) =>
allowedNetworks.flatMap { case VH(i,net) =>
val id = "network_"+ i
(
".deleteNetwork" #> SHtml.ajaxSubmit("-", () => delete(i)) &
"#errorNetworkField" #> <td id="errorNetworkField"><span class={"lift:Msg?errorClass=bs-text-danger;id=errornetwork_"+i}>[error]</span></td> &
".networkField [name]" #> id andThen
".networkField" #> SHtml.text(net, {x =>
allowedNetworks.find { case VH(y,_) => y==i }.foreach{ v => v.net = x }
}, "id" -> id)
)(xml)
}
} &
"#addNetworkButton" #> SHtml.ajaxSubmit("Add a network", add _) &
"#submitAllowedNetwork" #> {
SHtml.ajaxSubmit("Save changes", process _,("id","submitAllowedNetwork"), ("class","btn btn-default")) ++ Script(
OnLoad (
JsRaw("""$(".networkField").keydown( function(event) {
processKey(event , 'submitAllowedNetwork')
} );
""")
) )
}
}
///////////// success pop-up ///////////////
private[this] def successPopup : JsCmd = {
JsRaw(""" callPopupWithTimeout(200, "successConfirmationDialog")""")
}
}
| armeniaca/rudder | rudder-web/src/main/scala/com/normation/rudder/web/snippet/administration/EditPolicyServerAllowedNetwork.scala | Scala | gpl-3.0 | 9,760 |
package models
import slick.session.Database
import play.api.db.DB
import play.api.Application
import slick.driver.ExtendedProfile
trait SlickDriven {
val SLICK_DRIVER = "slick.db.driver"
val DEFAULT_SLICK_DRIVER = "scala.slick.driver.H2Driver"
var isSql = false
/**
* Loads the proper database driver based on play configuration
*/
def getDal(implicit app: Application): DAL = {
val driverClass = app.configuration.getString(SLICK_DRIVER).getOrElse(DEFAULT_SLICK_DRIVER)
val driver = singleton[ExtendedProfile](driverClass)
isSql = driverClass contains "SQL"
new DAL(driver)
}
def getDb(implicit app: Application) = {
Database.forDataSource(DB.getDataSource())
}
private def singleton[T](name: String)(implicit man: Manifest[T]): T =
Class.forName(name + "$").getField("MODULE$").get(man.runtimeClass).asInstanceOf[T]
} | soupytwist/knit | app/models/SlickDriven.scala | Scala | gpl-3.0 | 875 |
package me.eax.examples.thrift.tests
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.nio.charset.StandardCharsets
import me.eax.examples.thrift.game._
import me.eax.examples.thrift.tests.gen._
import org.apache.thrift.protocol._
import org.apache.thrift.transport._
import org.scalatest._
import org.scalatest.prop._
class JSONProtocol extends FunSpec with Matchers with GeneratorDrivenPropertyChecks {
describe("Thrift") {
it("serializes and deserializes using TJSONProtocol") {
forAll { (data1: Hero) =>
val str = {
val out = new ByteArrayOutputStream()
data1.write(new TJSONProtocol(new TIOStreamTransport(out)))
new String(out.toByteArray, StandardCharsets.UTF_8)
}
val data2 = {
val stream = new ByteArrayInputStream(str.getBytes(StandardCharsets.UTF_8))
Hero.decode(new TJSONProtocol(new TIOStreamTransport(stream)))
}
data1 shouldBe data2
}
}
}
} | afiskon/scala-thrift-example | src/test/scala/me/eax/examples/thrift/tests/JSONProtocol.scala | Scala | mit | 997 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.query
import org.geotools.data.Query
import org.geotools.data.simple.SimpleFeatureCollection
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithDataStore
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geohash.VincentyModel
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeature
import org.specs2.runner.JUnitRunner
import scala.util.Random
@RunWith(classOf[JUnitRunner])
class KNearestNeighborSearchProcessTest extends TestWithDataStore {
sequential
override val spec: String = "label:String,*geom:Point:srid=4326"
override val dtgField: Option[String] = None
val knn = new KNearestNeighborSearchProcess()
val rng = new Random(0)
val centerLat = 38.149894
val centerLon = -79.073639
val width = 0.30
val uvaLawn = Seq(
ScalaSimpleFeature.create(sft, "rotunda", "cville", "POINT( -78.503547 38.035475 )"),
ScalaSimpleFeature.create(sft, "pavilion I", "cville", "POINT( -78.503923 38.035536 )"),
ScalaSimpleFeature.create(sft, "pavilion II", "cville", "POINT( -78.503109 38.035278 )"),
ScalaSimpleFeature.create(sft, "pavilion III", "cville", "POINT( -78.504059 38.035308 )"),
ScalaSimpleFeature.create(sft, "pavilion IV", "cville", "POINT( -78.503180 38.035039 )"),
ScalaSimpleFeature.create(sft, "pavilion V", "cville", "POINT( -78.504276 38.034971 )"),
ScalaSimpleFeature.create(sft, "pavilion VI", "cville", "POINT( -78.503424 38.034721 )"),
ScalaSimpleFeature.create(sft, "pavilion VII", "cville", "POINT( -78.504424 38.034628 )"),
ScalaSimpleFeature.create(sft, "pavilion VIII", "cville", "POINT( -78.503601 38.034343 )"),
ScalaSimpleFeature.create(sft, "pavilion IX", "cville", "POINT( -78.504617 38.034208 )"),
ScalaSimpleFeature.create(sft, "pavilion X", "cville", "POINT( -78.503833 38.033938 )"),
ScalaSimpleFeature.create(sft, "cabell", "cville", "POINT( -78.505152 38.032704 )"),
ScalaSimpleFeature.create(sft, "beams", "cville", "POINT( -78.510295 38.034283 )"),
ScalaSimpleFeature.create(sft, "mccormick", "cville", "POINT( -78.522288 38.032844 )"),
ScalaSimpleFeature.create(sft, "hep", "cville", "POINT( -78.520019 38.034511 )")
)
// random points about a central point
// note that these points will be uniform in cartesian space only
val distributedPoints = Seq.tabulate(1000) { i =>
val lon = centerLon + width * (rng.nextDouble() - 0.5)
val lat = centerLat + width * (rng.nextDouble() - 0.5)
ScalaSimpleFeature.create(sft, (100000 + i).toString, "cville", s"POINT($lon $lat)")
}
val diagonalFeatures = Seq.tabulate[SimpleFeature](90) { lat =>
ScalaSimpleFeature.create(sft, s"$lat", "diagonal", f"POINT($lat%d $lat%d)")
}
val polarFeatures = Seq.range(-180, 181).map { lon =>
ScalaSimpleFeature.create(sft, s"$lon", "polar", f"POINT($lon%d 89.9)")
}
step {
addFeatures(uvaLawn)
addFeatures(distributedPoints)
addFeatures(diagonalFeatures)
addFeatures(polarFeatures)
}
def collection(features: SimpleFeature*): SimpleFeatureCollection = {
val fc = new DefaultFeatureCollection()
features.foreach(fc.add)
fc
}
// generates a single SimpleFeature
def queryFeature(label: String, lon: Double, lat: Double): SimpleFeature =
ScalaSimpleFeature.create(sft, label, label, f"POINT($lon $lat)")
// generates a very loose query
def wideQuery: Query = {
val lat = 38.0
val lon = -78.50
val siteSize = 5.0
val filter = s"BBOX(geom, ${lon - siteSize}, ${lat - siteSize}, ${lon + siteSize}, ${lat + siteSize})"
new Query(sftName, ECQL.toFilter(filter))
}
"GeoMesaKNearestNeighborSearch" should {
"handle an empty query point collection" in {
val inputFeatures = collection()
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, 100, 500d, 5000d).features()).toList
res must beEmpty
}
"handle non-point geometries in inputFeatures by ignoring them" in {
val sft = SimpleFeatureTypes.createType("lineStringKnn", "geom:LineString:srid=4326")
val inputFeatures = collection(ScalaSimpleFeature.create(sft, "route 29", "LINESTRING(-78.491 38.062, -78.474 38.082)"))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, 100, 500d, 5000d)).toList
res must beEmpty
}
"find nothing within 10km of a single query point " in {
val inputFeatures = collection(queryFeature("fan mountain", -78.692649, 37.878219))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, 5, 1000d, 10000d)).toList
res must beEmpty
}
"find 11 points within 400m of a point when k is set to 15 " in {
val inputFeatures = collection(queryFeature("madison", -78.502720, 38.036871))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, 15, 100d, 400d)).toList
res must containTheSameElementsAs(uvaLawn.take(11))
}
"find nearest features around Charlottesville" in {
val inputFeatures = collection(queryFeature("madison", -78.502720, 38.036871))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures(wideQuery)
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, 15, 500d, 2500d)).toList
res must containTheSameElementsAs(uvaLawn)
}
"find nearest features around Staunton" in {
val k = 10
val referenceFeature = queryFeature("blackfriars", -79.070569, 38.149185)
val inputFeatures = collection(referenceFeature)
val dataFeatures = ds.getFeatureSource(sftName).getFeatures(wideQuery)
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, k, 5000d, 50000d)).toList
val directFeatures = SelfClosingIterator(ds.getFeatureSource(sftName).getFeatures().features).toList.sortBy { f =>
VincentyModel.getDistanceBetweenTwoPoints(referenceFeature.point, f.point).getDistanceInMeters
}
res must containTheSameElementsAs(directFeatures.take(k))
}
"handle three query points, one of which will return nothing" in {
val inputFeatures = collection(
queryFeature("madison", -78.502720, 38.036871),
queryFeature("fan mountain", -78.692649, 37.878219),
queryFeature("blackfriars", -79.070569, 38.149185)
)
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, 5, 500d, 5000d)).toList
res must haveLength(10)
res must containAllOf(uvaLawn.take(5))
val directFeatures = collection(uvaLawn ++ distributedPoints: _*)
val direct = SelfClosingIterator(knn.execute(inputFeatures, directFeatures, 5, 500d, 5000d)).toList
res must containTheSameElementsAs(direct)
}
"find features close to the equator" in {
val k = 10
val inputFeatures = collection(queryFeature("", 0.1, 0.2))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, k, 1000000d, 2000000d)).toList
res.map(_.getID) must containTheSameElementsAs((0 until k).map(_.toString))
}
"find features close to Southwest Russia" in {
val k = 10
val inputFeatures = collection(queryFeature("", 45.1, 45.1))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, k, 500000d, 1000000d)).toList
res.map(_.getID) must containTheSameElementsAs((41 to 50).map(_.toString))
}
"find features close to the North Pole" in {
val k = 10
val inputFeatures = collection(queryFeature("", 89.9, 89.9))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures(new Query(sftName, ECQL.toFilter("label = 'diagonal'")))
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, k, 700000d, 2000000d)).toList
res.map(_.getID) must containTheSameElementsAs((80 to 89).map(_.toString))
}
"find northern features close to the North Pole" in {
val k = 10
val inputFeatures = collection(queryFeature("", 89.9, 89.9))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures(new Query(sftName, ECQL.toFilter("label = 'polar'")))
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, k, 100000d, 1000000d)).toList
res.map(_.getID) must containTheSameElementsAs((85 to 94).map(_.toString))
}
"find more things near the north polar region" in {
val k = 10
val inputFeatures = collection(queryFeature("", 0.0001, 89.9))
val dataFeatures = ds.getFeatureSource(sftName).getFeatures()
val res = SelfClosingIterator(knn.execute(inputFeatures, dataFeatures, k, 100000d, 1000000d)).toList
res.map(_.getID) must containTheSameElementsAs((-4 to 5).map(_.toString))
}
}
}
| elahrvivaz/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/process/query/KNearestNeighborSearchProcessTest.scala | Scala | apache-2.0 | 9,951 |
package com.moolileo.meetup.websocket
import com.moolileo.meetup.model.MeetupRsvp
import org.apache.spark.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.receiver.Receiver
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scalawebsocket.WebSocket
class WebSocketReader(url: String, storageLevel: StorageLevel)
extends Receiver[MeetupRsvp](storageLevel) with Logging
{
@volatile private var webSocket: WebSocket = _
def onStart() {
try{
logInfo("Connecting to WebSocket: " + url)
val newWebSocket = WebSocket().open(url).onTextMessage({ msg: String => parseJson(msg) })
setWebSocket(newWebSocket)
logInfo("Connected to: WebSocket" + url)
} catch {
case e: Exception => restart("Error starting WebSocket stream", e)
}
}
def onStop() {
setWebSocket(null)
logInfo("WebSocket receiver stopped")
}
private def setWebSocket(newWebSocket: WebSocket) = synchronized {
if (webSocket != null) {
webSocket.shutdown()
}
webSocket = newWebSocket
}
private def parseJson(jsonStr: String): Unit =
{
implicit lazy val formats = DefaultFormats
try {
var json = parse(jsonStr)
val rsvp = json.extract[MeetupRsvp]
store(rsvp)
} catch {
case e: MappingException => logError("Unable to map JSON message to MeetupRsvp object:" + e.msg)
case e: Exception => logError("Unable to map JSON message to MeetupRsvp object")
}
}
}
| oscarruesga/utad-pebd-proyecto | src/main/scala/com/moolileo/meetup/websocket/WebSocketReader.scala | Scala | gpl-3.0 | 1,505 |
package edu.gemini.model.p1.immutable
import edu.gemini.model.p1.{mutable => M}
import scala.collection.JavaConverters._
object NiciBlueprintCoronagraphic {
def apply(m: M.NiciBlueprintCoronagraphic): NiciBlueprintCoronagraphic =
new NiciBlueprintCoronagraphic(m)
}
case class NiciBlueprintCoronagraphic(fpm: NiciFpm, dichroic: NiciDichroic, redFilters: List[NiciRedFilter], blueFilters: List[NiciBlueFilter]) extends NiciBlueprintBase {
def name: String = "NICI Coronagraphic %s %s%s".format(fpm.value, dichroic.value, formatFilters)
def this(m: M.NiciBlueprintCoronagraphic) = this(
m.getFpm,
m.getDichroic,
m.getRedFilter.asScala.toList,
m.getBlueFilter.asScala.toList
)
def mutable(n: Namer) = {
val m = Factory.createNiciBlueprintCoronagraphic
m.setId(n.nameOf(this))
m.setName(name)
m.setFpm(fpm)
m.setDichroic(dichroic)
m.getRedFilter.addAll(redFilters.asJava)
m.getBlueFilter.addAll(blueFilters.asJava)
m
}
def toChoice(n: Namer) = {
val m = Factory.createNiciBlueprintChoice
m.setCoronagraphic(mutable(n))
m
}
} | arturog8m/ocs | bundle/edu.gemini.model.p1/src/main/scala/edu/gemini/model/p1/immutable/NiciBlueprintCoronagraphic.scala | Scala | bsd-3-clause | 1,106 |
package infcalcs
import Tree._
import Orderings._
import scala.util.Random.nextInt
/**
* Structure for input-output (dose-response) data.
*
* Both the signal and response points are entered into Vectors whose
* elements are themselves Vectors of length N, where N is the number
* of distinct signal or response types of the system (i.e. given a
* channel which simultaneously takes signal A and signal B, the input
* Vector would have elements of length 2.)
*
* @param sig 2D vector of signal values
* @param resp 2D vector of response values
*/
class DRData(calcConfig: CalcConfig)
(val sig: Vector[NTuple[Double]], val resp: Vector[NTuple[Double]]) {
val numObs = sig.length
require(numObs == resp.length, "each signal must have a corresponding response")
require(checkSize(sig) == 1 && checkSize(resp) == 1,
"all signals and responses must have consistent dimensionality")
val zippedVals: Vector[Pair[NTuple[Double]]] = (sig, resp).zipped.toVector
lazy val sigVals = calcConfig.srParameters("signalValues")
lazy val respVals = calcConfig.srParameters("responseValues")
lazy val numUniqueSigVals = sigVals match {
case None => sig.toSet.size
case Some(x) => x.size
}
lazy val numUniqueRespVals = respVals match {
case None => resp.toSet.size
case Some(x) => x.size
}
val sigDim = dim(sig)
val respDim = dim(resp)
lazy val isEmpty: Boolean = numObs == 0
/** List of fractions for subsampling the data set */
val fracs = genSubFracs()
val repsPerFrac = 0 until calcConfig.numParameters("repsPerFraction").toInt
val fracList = (repsPerFrac.toList map (_ => 1.0)) ++ (for {
f <- fracs
x <- repsPerFrac
} yield f)
/**
* Generates fractions for subsampling the data which are uniformly distributed
* in inverse sample space
*/
private def genSubFracs() = {
val numFracs = calcConfig.numParameters("numFractions").toInt
val maxInvSample = 1.0 / (calcConfig.numParameters("lowFraction") * numObs)
val minInvSample = 1.0 / numObs
val incr = (maxInvSample - minInvSample) / numFracs.toDouble
val invs = (1 to numFracs) map (x => (x * incr) + minInvSample)
invs.toVector map (x => 1.0 / (numObs * x))
}
/**
* Confirms that each data point has the same dimensionality
*
* @param d Vector of n-tuple data points
* @return number of different dimensioned points (should be 1)
*/
private def checkSize(d: Vector[NTuple[Double]]): Int =
(d map (_.length)).toSet.size
/**
* Checks the dimensionality of the data assuming each point has
* the same number of dimensions (see [[checkSize]])
*
* @param d Vector of n-tuple data points
* @return number of dimensions
*/
private def dim(d: Vector[NTuple[Double]]): Int = d.head.length
/**
* Returns binary tree giving the values delimiting the bounds of each bin.
*
* @param v The list of doubles to be partitioned.
* @param numBins The number of bins to divide the list into.
* @return Binary tree of [[Bin]] instances.
*/
private def getBinDelims(v: Vector[Double], numBins: Int): Tree[Bin] = {
val delimList = CTBuild.partitionList(v, numBins)
buildTree(buildOrderedNodeList(delimList))
}
/**
* Calculates bin delimiters for arbitrary dimensioned data points
*
* @param values determines if explicit value sets are present for data
* @param data data points
* @param numBins number of bins per dimension
* @return Vector of trees ([[Tree]])
*/
private def delims(
values: Option[Vector[NTuple[Double]]],
data: Vector[NTuple[Double]],
numBins: NTuple[Int]): NTuple[Tree[Bin]] =
values match {
case None => {
val dt = data.transpose
((0 until dim(data)) map (x => getBinDelims(dt(x), numBins(x)))).toVector
}
case Some(v) => {
val valsByDim = v.transpose map (_.toSet.toVector.sorted)
val bins = valsByDim map (x => x.indices.toList map (y =>
Bin(y, List(x(y)), if (y == 0) Double.NegativeInfinity else x(y - 1))))
def toTree: List[Bin] => Tree[Bin] = buildOrderedNodeList[Bin] _ andThen buildTree[Bin] _
bins map toTree
}
}
/**
* Calculates key for vector of bin-delimiting trees
*
* @param trees vector of partition trees (corresponding to ordered pair data
* points)
* @return vector of bin index vectors
*
*/
private def keys(
trees: NTuple[Tree[Bin]]): Map[NTuple[Int], Int] = {
val dimLengths = trees map (_.toList.length)
val vtups = CTBuild.keyFromDimLengths(dimLengths)
(vtups.indices map (x => (vtups(x), x))).toMap
}
/**
* Generates a new [[DRData]] instance by resampling values with replacement
* @return [[DRData]]
*/
private def resample_with_replacement(): DRData = {
val (rsig, rresp) = ((0 until numObs) map (x => zippedVals(nextInt(numObs)))).toVector.unzip
new DRData(calcConfig)(rsig, rresp)
}
/**
* Delimiters for signal space
*
* @param numBins n-dimensional vector of bin numbers
* @return n-dimensional vector of bin-delimiting [[Tree]]s
*/
def sigDelims(numBins: NTuple[Int]): NTuple[Tree[Bin]] =
delims(sigVals, sig, numBins)
/**
* Delimiters for response space
*
* @param numBins n-dimensional vector of bin numbers
* @return n-dimensional vector of bin-delimiting [[Tree]]s
*/
def respDelims(numBins: NTuple[Int]): NTuple[Tree[Bin]] =
delims(respVals, resp, numBins)
/**
* Index key for signal space
*
* @param numBins n-dimensional vector of bin numbers
* @return mapping of n-dimensional indices to 1-dimensional indices
*/
def sigKey(numBins: NTuple[Int]): Map[NTuple[Int], Int] =
keys(sigDelims(numBins))
/**
* Index key for response space
*
* @param numBins n-dimensional vector of bin numbers
* @return mapping of n-dimensional indices to 1-dimensional indices
*/
def respKey(numBins: NTuple[Int]): Map[NTuple[Int], Int] =
keys(respDelims(numBins))
/**
* Writes data to stdout
*/
override def toString() =
((0 until sig.length) map { x =>
s"${sig(x)}\\t${resp(x)}\\n"
}).mkString
/**
* Writes data to file
*
* @param f file name
*/
def toFile(f: String) = {
val writer =
new java.io.BufferedWriter(new java.io.FileWriter(new java.io.File(f)))
for (i <- (0 until sig.length).toList) {
writer.write(s"${sig(i).mkString(" ")}\\t${resp(i).mkString(" ")}")
writer.newLine()
}
writer.flush()
writer.close()
}
/**
* Generate a bootstrap sample from a data set. Sample size is defined as a
* command line argument
*
* @return vector of [[DRData]] instances
*/
def bootstrap_sample(): Vector[DRData] =
((0 until calcConfig.numParameters("numForBootstrap").toInt) map (x =>
resample_with_replacement())).toVector
} | ryants/EstCC | src/main/scala/infcalcs/DRData.scala | Scala | mit | 6,918 |
/*
* Copyright 2011 javaBin
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package no.java.submitit.ems
import no.java.ems._
import no.java.ems.domain.{Event,Session,Person,Speaker => EmsSpeaker,EmailAddress,Binary => EmsBinary,ByteArrayBinary}
import _root_.java.io.InputStream
import scala.collection.JavaConversions._
import no.java.submitit.common.Implicits._
import no.java.submitit.common.{IOUtils, LoggHandling}
import no.java.submitit.model._
import no.java.submitit.common.IOUtils._
import org.joda.time.Interval
import org.joda.time.format.DateTimeFormat
import scala.xml.XML
class EmsConverter extends LoggHandling {
private def getStatus(state: Session.State) = state match {
case Session.State.Approved => Status.Approved
case Session.State.Pending => Status.Pending
case Session.State.Rejected => Status.NotApproved
case l => unknownEnumValue(l, Status.Pending)
}
def toPerson(speaker: Speaker): Person = {
val person = new Person(speaker.name)
person.setDescription(speaker.bio)
person.setEmailAddresses(new EmailAddress(speaker.email) :: Nil)
person
}
def updateSession(presentation: Presentation, session: Session) {
session.setTitle(presentation.title)
session.setLead(presentation.summary)
session.setBody(presentation.abstr)
session.setOutline(presentation.outline)
session.setEquipment(presentation.equipment)
session.setExpectedAudience(presentation.expectedAudience)
session.setFeedback(presentation.feedback)
session.setKeywords(presentation.keywords)
session.setSpeakers(presentation.speakers.map(speaker => toEmsSpeaker(speaker)))
val language = presentation.language match {
case Language.Norwegian => new domain.Language("no")
case Language.English => new domain.Language("en")
case l => unknownEnumValue(l, new domain.Language("no"))
}
val level = presentation.level match {
case Level.Beginner => Session.Level.Introductory
case Level.Intermediate => Session.Level.Intermediate
case Level.Advanced => Session.Level.Intermediate_Advanced
case Level.Hardcore => Session.Level.Advanced
case l => unknownEnumValue(l, Session.Level.Introductory)
}
val format = presentation.format match {
case PresentationFormat.Presentation => Session.Format.Presentation
case PresentationFormat.LightningTalk => Session.Format.Quickie
case l => unknownEnumValue(l, Session.Format.Presentation)
}
session.setLanguage(language)
session.setLevel(level)
session.setFormat(format)
val attachments = presentation.slideset.toList ::: presentation.pdfSlideset.toList
session.setAttachements(attachments.map(toEmsBinary))
}
def toPresentation(session: Session): Presentation = {
val pres = new Presentation()
pres.sessionId = session.getId
pres.title = session.getTitle
pres.abstr = session.getBody
pres.speakers = session.getSpeakers.toList.map(speaker => fromEmsSpeaker(speaker))
pres.summary = session.getLead
pres.outline = session.getOutline
pres.equipment = session.getEquipment
pres.expectedAudience = session.getExpectedAudience
pres.feedback = session.getFeedback
pres.keywords = session.getKeywords.toList
pres.room = if (session.getRoom != null) session.getRoom.getName else null
pres.timeslot = if (session.getTimeslot != null) formatInterval(session.getTimeslot) else null
def formatInterval(interval: Interval) = {
val dateFormatter = DateTimeFormat.shortDate();
val timeFormatter = DateTimeFormat.shortTime();
dateFormatter.print(interval.getStart()) + " " + timeFormatter.print(interval.getStart()) + " - " + timeFormatter.print(interval.getEnd());
}
pres.language = session.getLanguage.getIsoCode match {
case "no" => Language.Norwegian
case "en" => Language.English
case l => unknownEnumValue(l, Language.Norwegian)
}
pres.level = session.getLevel match {
case Session.Level.Introductory => Level.Beginner
case Session.Level.Intermediate => Level.Intermediate
case Session.Level.Intermediate_Advanced => Level.Advanced
case Session.Level.Advanced => Level.Hardcore
case l => unknownEnumValue(l, Level.Intermediate)
}
pres.format = session.getFormat match {
case Session.Format.Presentation => PresentationFormat.Presentation
case Session.Format.Quickie => PresentationFormat.LightningTalk
case l => unknownEnumValue(l, PresentationFormat.Presentation)
}
pres.status = getStatus(session.getState)
val attachments = session.getAttachements.toList.map(toBinary(_, false))
attachments.foreach(_ match {
case Some(b) if b.name.toLowerCase.endsWith(".pdf") => pres.pdfSlideset = Some(b)
case Some(b) => pres.slideset = Some(b)
case None =>
})
pres
}
def toEmsSpeaker(speaker: Speaker): EmsSpeaker = {
val result = new EmsSpeaker(speaker.personId, speaker.name)
result.setDescription(speaker.bio)
if (speaker.picture.isDefined) result.setPhoto(toEmsBinary(speaker.picture.get))
result
}
def fromEmsSpeaker(speaker: EmsSpeaker): Speaker = {
val result = new Speaker
result.personId = speaker.getPersonId
result.name = speaker.getName
result.bio = speaker.getDescription
result.picture = toBinary(speaker.getPhoto, true)
result
}
def toEmsBinary(binary: Binary): EmsBinary = {
// EMS requires a non-empty array here.
val content = if(binary.isNew) binary.content else new Array[Byte](0)
new ByteArrayBinary(binary.id, binary.name, binary.contentType, content)
}
def toBinary(binary: EmsBinary, fetchData: Boolean): Option[Binary] = {
if (binary != null) {
var content: Option[(InputStream)] = None
if(fetchData) {
content = Some(binary.getDataStream)
}
Some(Binary(binary.getId, binary.getFileName, binary.getMimeType, content))
} else {
None
}
}
private def unknownEnumValue[T](v: Any, r: T): T = {
logger.error("Unknown enum value '" + v + "', defaulting to '" + r + "'")
r
}
}
| javaBin/submitit | submitit-ems-client/src/main/scala/no/java/submitit/ems/EmsConverter.scala | Scala | mit | 6,737 |
import sbt._
import Keys._
object Common {
val localMavenPath = Path.userHome.absolutePath + "/.m2/repository"
val versions = Map[String, String](
"java" -> "1.8",
"akka" -> "2.3.3",
"config" -> "1.2.1",
"grizzled" -> "1.0.2",
"logback" -> "1.1.2",
"suiryc-scala" -> "0.0.2-SNAPSHOT",
"maven-compiler-plugin" -> "3.1",
"maven-surefire-plugin" -> "2.17",
"scala-maven-plugin" -> "3.1.6"
)
val settings: Seq[Setting[_]] =
org.scalastyle.sbt.ScalastylePlugin.Settings ++
Seq(
organization := "suiryc",
version := versions("suiryc-scala"),
scalaVersion := "2.11.1",
scalacOptions ++= Seq("-deprecation", "-feature", "-optimize", "-unchecked", "-Yinline-warnings"),
scalacOptions in (Compile, doc) ++= Seq("-diagrams", "-implicits"),
org.scalastyle.sbt.PluginKeys.config := file("project/scalastyle-config.xml"),
resolvers += "Local Maven Repository" at "file://" + localMavenPath,
publishMavenStyle := true,
publishTo := Some(Resolver.file("file", new File(localMavenPath)))
)
val pomExtra = (
<properties>
<encoding>UTF-8</encoding>
</properties>
<build>
<sourceDirectory>src/main/scala</sourceDirectory>
<testSourceDirectory>src/test/scala</testSourceDirectory>
<plugins>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>{ versions("scala-maven-plugin") }</version>
<configuration>
<args>
<arg>-deprecation</arg>
<arg>-feature</arg>
<arg>-Yinline-warnings</arg>
<arg>-optimize</arg>
<arg>-unchecked</arg>
</args>
</configuration>
<executions>
<execution>
<goals>
<goal>compile</goal>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>{ versions("maven-compiler-plugin") }</version>
<configuration>
<source>{ versions("java") }</source>
<target>{ versions("java") }</target>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>{ versions("maven-surefire-plugin") }</version>
<configuration>
<includes>
<include>**/*Suite.class</include>
</includes>
</configuration>
</plugin>
</plugins>
</build>
)
}
| swhgoon/suiryc-scala | project/Common.scala | Scala | gpl-3.0 | 2,686 |
/*
* Copyright (C) FuseSource, Inc.
* http://fusesource.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.fabric.api.monitor
import org.fusesource.fabric.service.JmxTemplateSupport
/**
* <p>
* </p>
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
trait Monitor {
/**
* Updates the monitor's configuration with the data sources that need
* to be monitored.
*/
def configure( value:Traversable[MonitoredSetDTO] ):Unit
def close:Unit
def fetch( fetch:FetchMonitoredViewDTO ):Option[MonitoredViewDTO]
def list: Array[MonitoredSetDTO]
var poller_factories:Seq[PollerFactory]
} | Jitendrakry/fuse | fabric/fabric-core/src/main/scala/org/fusesource/fabric/api/monitor/Monitor.scala | Scala | apache-2.0 | 1,163 |
package io.flow.build
case class Application(
organization: String,
application: String,
version: String
) {
val isLatest: Boolean = version == Application.Latest
val applicationVersionLabel: String = if (isLatest) {
s"$application:latest"
} else {
s"$application:$version"
}
val label: String = s"$organization/$applicationVersionLabel"
}
object Application {
val Latest = "latest"
def latest(organization: String, application: String): Application = {
Application(
organization = organization,
application = application,
version = Latest
)
}
def parse(value: String): Option[Application] = {
value.split("/").map(_.trim).toList match {
case org :: app :: Nil => {
app.split(":").map(_.trim).toList match {
case Nil => {
None
}
case name :: Nil => {
Some(Application(org, name, Latest))
}
case name :: version :: Nil => {
Some(Application(org, name, version))
}
case _ => {
None
}
}
}
case _ => {
None
}
}
}
}
| flowcommerce/api-build | src/main/scala/io/flow/build/Application.scala | Scala | mit | 1,167 |
package fi.jori.slick.db
import scala.slick.driver.H2Driver.simple._
import java.sql.Date
object TaskExample extends App {
case class User(
name: String,
email: String
)
case class Task(
id: Int,
topic: String,
description: String,
started: Date,
finished: Option[Date],
user: String)
class Users(tag: Tag) extends Table[User](tag, "user") {
def name = column[String]("name")
def email = column[String]("email", O.PrimaryKey)
def * = (name,email) <> (User.tupled, User.unapply _)
}
val users = TableQuery[Users]
class Tasks(tag: Tag) extends Table[Task](tag, "task") {
def id = column[Int]("id", O.PrimaryKey)
def topic = column[String]("topic")
def description = column[String]("description")
def started = column[Date]("started")
def finished = column[Option[Date]]("finished", O.Nullable)
def user = column[String]("user")
def userEmail = foreignKey("user", user, users)(_.email)
def * = (id,topic,description,started,finished,user) <> (Task.tupled, Task.unapply _)
}
val tasks = TableQuery[Tasks]
Database.forURL("jdbc:h2:mem:test1", driver = "org.h2.Driver") withSession {
implicit session =>
(tasks.ddl ++ users.ddl).create
tasks.ddl.createStatements foreach(println)
users.ddl.createStatements foreach(println)
users += User("jori","[email protected]")
users += User("jori2","[email protected]")
tasks += Task(1,"otsikko","selostus",new Date(System.currentTimeMillis()),None,"[email protected]")
tasks += Task(2,"otsikko","selostus",new Date(System.currentTimeMillis()),None,"[email protected]")
tasks += Task(3,"otsikko","selostus",new Date(System.currentTimeMillis()-5000),Some(new Date(System.currentTimeMillis())),"[email protected]")
println("all tasks")
println(tasks.selectStatement)
tasks.foreach(println)
def finishTask(id: Int) = tasks filter(t => t.id === id) map (t => t.finished) update (Some(new Date(System.currentTimeMillis())))
val unfinished = tasks filter (t => t.finished.isNull)
val finished = tasks filter (t => t.finished.isNotNull)
val byUser = for {
u <- users if u.name === "jori2"
t <- tasks filter (t => t.user === u.email)
} yield t
println("unfinished tasks")
println(unfinished.selectStatement)
unfinished.foreach(println)
println("finished tasks")
println(finished.selectStatement)
finished.foreach(println)
println("by user jori2")
println(byUser.selectStatement)
byUser.foreach(println)
tasks += Task(4,"otsikko","selostus",new Date(System.currentTimeMillis()-25000),Some(new Date(System.currentTimeMillis())),"[email protected]")
println("two finished tasks")
println(finished.selectStatement)
finished.foreach(println)
finishTask(1)
println(tasks filter(t => t.id === 1) map (t => t.finished) updateStatement)
println("all but one tasks finished")
println(finished.selectStatement)
finished.foreach(println)
}
} | jorilytter/slick-test | src/main/scala/fi/jori/slick/db/TaskExample.scala | Scala | mit | 3,169 |
package org.jetbrains.plugins.scala
package codeInsight.generation
import com.intellij.openapi.editor.{Editor, ScrollType}
import com.intellij.openapi.project.Project
import com.intellij.psi.codeStyle.CodeStyleManager
import com.intellij.psi.{PsiDocumentManager, PsiFile}
import org.jetbrains.plugins.scala.codeInsight.generation.GenerationUtil.classOrTraitAtCaret
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil.getCompanionModule
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTypeDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.{createNewLine, createObjectWithContext}
/**
* Nikolay.Tropin
* 8/17/13
*/
class ScalaGenerateCompanionObjectAction extends ScalaBaseGenerateAction(new ScalaGenerateCompanionObjectHandler)
class ScalaGenerateCompanionObjectHandler extends ScalaCodeInsightActionHandler {
import ScalaGenerateCompanionObjectHandler._
override def isValidFor(editor: Editor, file: PsiFile): Boolean =
super.isValidFor(editor, file) &&
classOrTraitAtCaret(editor, file).exists(canAddCompanionObject)
def invoke(project: Project, editor: Editor, file: PsiFile): Unit = {
for (clazz <- classOrTraitAtCaret(editor, file)) {
val obj = createCompanionObject(clazz)
val parent = clazz.getParent
val addedObj = parent.addAfter(obj, clazz)
parent.addAfter(createNewLine()(clazz.getManager), clazz)
val document = editor.getDocument
PsiDocumentManager.getInstance(project).doPostponedOperationsAndUnblockDocument(document)
val offset = addedObj.getTextRange.getStartOffset
val lineInside = document.getLineNumber(offset) + 1
CodeStyleManager.getInstance(project).adjustLineIndent(document, document.getLineStartOffset(lineInside))
editor.getCaretModel.moveToOffset(document.getLineEndOffset(lineInside))
editor.getScrollingModel.scrollToCaret(ScrollType.MAKE_VISIBLE)
}
}
}
object ScalaGenerateCompanionObjectHandler {
private def canAddCompanionObject(clazz: ScTypeDefinition): Boolean =
getCompanionModule(clazz).isEmpty
private def createCompanionObject(clazz: ScTypeDefinition): ScObject = {
if (canAddCompanionObject(clazz)) {
createObjectWithContext(s"object ${clazz.name} {\\n \\n}", clazz.getContext, clazz)
}
else throw new IllegalArgumentException("Cannot create companion object")
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/generation/ScalaGenerateCompanionObjectAction.scala | Scala | apache-2.0 | 2,410 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.c.model
import de.ust.skill.generator.c.GeneralOutputMaker
/**
* @author Fabian Harth, Timm Felden
* @todo rename skill state to skill file
* @todo ensure 80 characters margin
*/
trait TypeDeclarationHeaderMaker extends GeneralOutputMaker {
abstract override def make {
super.make
val out = files.open(s"model/${prefix}type_declaration.h")
val prefixCapital = packagePrefix.toUpperCase
out.write(s"""#ifndef ${prefixCapital}TYPE_DECLARATION_H_
#define ${prefixCapital}TYPE_DECLARATION_H_
#include <glib.h>
#include "../model/${prefix}types.h"
struct ${prefix}type_declaration_struct;
typedef struct ${prefix}type_declaration_struct *${prefix}type_declaration;
/**
* Instances are not deleted directly, but just get their skill-id set to 0.
* Therefore, we need a cleanup function to set references deleted instances to
* null.
*/
typedef void ${prefix}cleanup_function(${prefix}skill_type instance);
/**
* This stores information about a user-defined type. There is no order defined
* on the fields here, as the order may vary between different binary files.
* Also, a binary file may add additional fields, which are not known by the
* binding.
*/
typedef struct ${prefix}type_declaration_struct {
char *name;
${prefix}cleanup_function *remove_null_references;
//! The super-type, if it has one.
${prefix}type_declaration super_type;
//! This is a mapping field-name -> field_information for all its fields.
GHashTable *fields;
//! The number of bytes of one instance of this type in memory.
int64_t size;
} ${prefix}type_declaration_struct;
${prefix}type_declaration ${prefix}type_declaration_new();
void ${prefix}type_declaration_destroy(${prefix}type_declaration instance);
#endif /* TYPE_DECLARATION_H */
""")
out.close()
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/c/model/TypeDeclarationHeaderMaker.scala | Scala | bsd-3-clause | 2,251 |
package play.api.db.evolutions
import javax.inject.{Inject, Singleton}
import play.api.{Configuration, Environment}
@Singleton
class SubFolderEvolutionsReader @Inject() (env: Environment, config: Configuration) extends EnvironmentEvolutionsReader(env) {
override def evolutions(db: String): Seq[Evolution] = {
config.getOptional[Seq[String]](s"evolutions.$db.folders")
.getOrElse(Nil)
.flatMap(folder => super.evolutions(s"$db/$folder"))
.zipWithIndex
.map {
case (ev, index) => ev.copy(revision = index)
}
}
}
| giabao/subfolder-evolutions | module/src/main/scala/play/api/db/evolutions/SubFolderEvolutionsReader.scala | Scala | apache-2.0 | 559 |
/*
* GESeq.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape
package graph
package impl
import de.sciss.fscape.Graph.{ProductReader, RefMapIn}
import scala.collection.immutable.{IndexedSeq => Vec}
object GESeq extends ProductReader[GESeq] {
override def read(in: RefMapIn, key: String, arity: Int): GESeq = {
require (arity == 1)
val _in = in.readVec(in.readGE())
new GESeq(_in)
}
}
final case class GESeq(elems: Vec[GE]) extends GE {
private[fscape] def expand(implicit b: UGenGraph.Builder): UGenInLike =
UGenInGroup(elems.map(_.expand))
override def toString: String = elems.mkString("GESeq(", ",", ")")
} | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/graph/impl/GESeq.scala | Scala | agpl-3.0 | 881 |
package slick.jdbc
import scala.concurrent.ExecutionContext
import slick.SlickException
import slick.ast._
import slick.ast.TypeUtil._
import slick.basic.Capability
import slick.dbio._
import slick.compiler.{Phase, CompilerState}
import slick.jdbc.meta.MTable
import slick.lifted._
import slick.relational.RelationalCapabilities
import slick.sql.SqlCapabilities
import slick.util.MacroSupport.macroSupportInterpolation
/** Slick profile for Derby/JavaDB.
*
* This profile implements [[slick.jdbc.JdbcProfile]]
* ''without'' the following capabilities:
*
* <ul>
* <li>[[slick.relational.RelationalCapabilities.functionDatabase]]:
* <code>Functions.database</code> is not available in Derby. Slick
* will return an empty string instead.</li>
* <li>[[slick.relational.RelationalCapabilities.replace]],
* [[slick.relational.RelationalCapabilities.reverse]]:
* These String functions are not available in Derby.</li>
* <li>[[slick.relational.RelationalCapabilities.pagingNested]]:
* See <a href="https://issues.apache.org/jira/browse/DERBY-5911"
* target="_parent">DERBY-5911</a>.</li>
* <li>[[slick.jdbc.JdbcCapabilities.returnInsertOther]]:
* When returning columns from an INSERT operation, only a single column
* may be specified which must be the table's AutoInc column.</li>
* <li>[[slick.sql.SqlCapabilities.sequenceCurr]]:
* <code>Sequence.curr</code> to get the current value of a sequence is
* not supported by Derby. Trying to generate SQL code which uses this
* feature throws a SlickException.</li>
* <li>[[slick.sql.SqlCapabilities.sequenceCycle]]:
* Sequence cycling is supported but does not conform to SQL:2008
* semantics. Derby cycles back to the START value instead of MINVALUE or
* MAXVALUE.</li>
* <li>[[slick.relational.RelationalCapabilities.zip]]:
* Ordered sub-queries and window functions with orderings are currently
* not supported by Derby. These are required by <code>zip</code> and
* <code>zipWithIndex</code>. Trying to generate SQL code which uses this
* feature causes the DB to throw an exception. We do not prevent these
* queries from being generated because we expect future Derby versions to
* support them with the standard SQL:2003 syntax (see
* <a href="http://wiki.apache.org/db-derby/OLAPRowNumber" target="_parent"
* >http://wiki.apache.org/db-derby/OLAPRowNumber</a>).</li>
* <li>[[slick.relational.RelationalCapabilities.joinFull]]:
* Full outer joins are emulated because there is not native support
* for them.</li>
* <li>[[slick.jdbc.JdbcCapabilities.insertOrUpdate]]:
* InsertOrUpdate operations are emulated on the client side because there
* is no native support for them (but there is work in progress: see
* <a href="https://issues.apache.org/jira/browse/DERBY-3155"
* target="_parent" >DERBY-3155</a>).</li>
* <li>[[slick.jdbc.JdbcCapabilities.booleanMetaData]]:
* Derby <= 10.6 doesn't have booleans, so Slick maps to SMALLINT instead.
* Other jdbc drivers like MySQL map TINYINT(1) back to a Scala
* Boolean. Derby maps SMALLINT to an Integer and that's how it shows
* up in the jdbc meta data, thus the original type is lost.</li>
* <li>[[slick.jdbc.JdbcCapabilities.supportsByte]]:
* Derby doesn't have a corresponding type for Byte.
* SMALLINT is used instead and mapped to Short in the Slick model.</li>
* <li>[[slick.relational.RelationalCapabilities.repeat]]:
* There's not builtin string function repeat in Derby.
* <a href="https://db.apache.org/derby/docs/10.10/ref/rrefsqlj29026.html" target="_parent"
* >https://db.apache.org/derby/docs/10.10/ref/rrefsqlj29026.html</a></li>
* </ul>
*/
trait DerbyProfile extends JdbcProfile {
override protected def computeCapabilities: Set[Capability] = (super.computeCapabilities
- RelationalCapabilities.functionDatabase
- RelationalCapabilities.pagingNested
- JdbcCapabilities.returnInsertOther
- SqlCapabilities.sequenceCurr
// Cycling is broken in Derby. It cycles to the start value instead of min or max
- SqlCapabilities.sequenceCycle
- RelationalCapabilities.zip
- RelationalCapabilities.joinFull
- JdbcCapabilities.insertOrUpdate
- RelationalCapabilities.replace
- RelationalCapabilities.reverse
- JdbcCapabilities.booleanMetaData
- JdbcCapabilities.supportsByte
- RelationalCapabilities.repeat
)
class ModelBuilder(mTables: Seq[MTable], ignoreInvalidDefaults: Boolean)(implicit ec: ExecutionContext) extends JdbcModelBuilder(mTables, ignoreInvalidDefaults) {
override def createTableNamer(mTable: MTable): TableNamer = new TableNamer(mTable) {
override def schema = super.schema.filter(_ != "APP") // remove default schema
}
}
override def createModelBuilder(tables: Seq[MTable], ignoreInvalidDefaults: Boolean)(implicit ec: ExecutionContext): JdbcModelBuilder =
new ModelBuilder(tables, ignoreInvalidDefaults)
override def defaultTables(implicit ec: ExecutionContext): DBIO[Seq[MTable]] =
MTable.getTables(None, None, None, Some(Seq("TABLE")))
override protected def computeQueryCompiler = super.computeQueryCompiler + Phase.rewriteBooleans + Phase.specializeParameters
override val columnTypes = new JdbcTypes
override def createQueryBuilder(n: Node, state: CompilerState): QueryBuilder = new QueryBuilder(n, state)
override def createTableDDLBuilder(table: Table[_]): TableDDLBuilder = new TableDDLBuilder(table)
override def createColumnDDLBuilder(column: FieldSymbol, table: Table[_]): ColumnDDLBuilder = new ColumnDDLBuilder(column)
override def createSequenceDDLBuilder(seq: Sequence[_]): SequenceDDLBuilder[_] = new SequenceDDLBuilder(seq)
override def defaultSqlTypeName(tmd: JdbcType[_], sym: Option[FieldSymbol]): String = tmd.sqlType match {
case java.sql.Types.BOOLEAN => "SMALLINT"
/* Derby does not have a TINYINT type, so we use SMALLINT instead. */
case java.sql.Types.TINYINT => "SMALLINT"
case _ => super.defaultSqlTypeName(tmd, sym)
}
override val scalarFrom = Some("sysibm.sysdummy1")
class QueryBuilder(tree: Node, state: CompilerState) extends super.QueryBuilder(tree, state) {
override protected val concatOperator = Some("||")
override protected val supportsTuples = false
override protected val supportsLiteralGroupBy = true
override protected val quotedJdbcFns = Some(Vector(Library.User))
override protected def buildForUpdateClause(forUpdate: Boolean) = {
super.buildForUpdateClause(forUpdate)
if (forUpdate) {
b" with RS "
}
}
override def expr(c: Node, skipParens: Boolean = false): Unit = c match {
case Library.Cast(ch @ _*) =>
/* Work around DERBY-2072 by casting numeric values first to CHAR and
* then to VARCHAR. */
val (toVarchar, tn) = {
val tn =
(if(ch.length == 2) ch(1).asInstanceOf[LiteralNode].value.asInstanceOf[String]
else jdbcTypeFor(c.nodeType).sqlTypeName(None)).toLowerCase
if(tn == "varchar") (true, columnTypes.stringJdbcType.sqlTypeName(None))
else if(tn.startsWith("varchar")) (true, tn)
else (false, tn)
}
if(toVarchar && jdbcTypeFor(ch(0).nodeType).isInstanceOf[NumericTypedType])
b"trim(cast(cast(${ch(0)} as char(30)) as $tn))"
else b"cast(${ch(0)} as $tn)"
case Library.IfNull(l, r) =>
/* Derby does not support IFNULL so we use COALESCE instead,
* and it requires NULLs to be casted to a suitable type */
b"coalesce(cast($l as ${jdbcTypeFor(c.nodeType).sqlTypeName(None)}),!$r)"
case Library.SilentCast(LiteralNode(None)) :@ JdbcType(ti, _) if currentPart == SelectPart =>
// Cast NULL to the correct type
b"cast(null as ${ti.sqlTypeName(None)})"
case LiteralNode(None) :@ JdbcType(ti, _) if currentPart == SelectPart =>
// Cast NULL to the correct type
b"cast(null as ${ti.sqlTypeName(None)})"
case (c @ LiteralNode(v)) :@ JdbcType(ti, option) if currentPart == SelectPart =>
/* The Derby embedded driver has a bug (DERBY-4671) which results in a
* NullPointerException when using bind variables in a SELECT clause.
* This should be fixed in Derby 10.6.1.1. The workaround is to add an
* explicit type annotation (in the form of a CAST expression). */
if(c.volatileHint || !ti.hasLiteralForm) {
b"cast("
b +?= { (p, idx, param) => if(option) ti.setOption(v.asInstanceOf[Option[Any]], p, idx) else ti.setValue(v, p, idx) }
b" as ${ti.sqlTypeName(None)})"
} else super.expr(c, skipParens)
case Library.NextValue(SequenceNode(name)) => b"(next value for `$name)"
case Library.CurrentValue(_*) => throw new SlickException("Derby does not support CURRVAL")
case Union(left, right, all) =>
b"\{"
buildFrom(left, None, true)
if(all) b"\nunion all " else b"\nunion "
b"\["
buildFrom(right, None, true)
b"\]"
b"\}"
case _ => super.expr(c, skipParens)
}
}
class TableDDLBuilder(table: Table[_]) extends super.TableDDLBuilder(table) {
override protected def createIndex(idx: Index) = {
if(idx.unique) {
/* Create a UNIQUE CONSTRAINT (with an automatically generated backing
* index) because Derby does not allow a FOREIGN KEY CONSTRAINT to
* reference columns which have a UNIQUE INDEX but not a nominal UNIQUE
* CONSTRAINT. */
val sb = new StringBuilder append "ALTER TABLE " append quoteIdentifier(table.tableName) append " ADD "
sb append "CONSTRAINT " append quoteIdentifier(idx.name) append " UNIQUE("
addIndexColumnList(idx.on, sb, idx.table.tableName)
sb append ")"
sb.toString
} else super.createIndex(idx)
}
}
class ColumnDDLBuilder(column: FieldSymbol) extends super.ColumnDDLBuilder(column) {
override protected def appendOptions(sb: StringBuilder) {
if(defaultLiteral ne null) sb append " DEFAULT " append defaultLiteral
if(notNull) sb append " NOT NULL"
if(primaryKey) sb append " PRIMARY KEY"
if(autoIncrement) sb append " GENERATED BY DEFAULT AS IDENTITY"
if( unique ) sb append " UNIQUE"
}
}
class SequenceDDLBuilder[T](seq: Sequence[T]) extends super.SequenceDDLBuilder(seq) {
override def buildDDL: DDL = {
import seq.integral._
val increment = seq._increment.getOrElse(one)
val desc = increment < zero
val b = new StringBuilder append "CREATE SEQUENCE " append quoteIdentifier(seq.name)
/* Set the START value explicitly because it defaults to the data type's
* min/max value instead of the more conventional 1/-1. */
b append " START WITH " append seq._start.getOrElse(if(desc) -1 else 1)
seq._increment.foreach { b append " INCREMENT BY " append _ }
seq._maxValue.foreach { b append " MAXVALUE " append _ }
seq._minValue.foreach { b append " MINVALUE " append _ }
/* Cycling is supported but does not conform to SQL:2008 semantics. Derby
* cycles back to the START value instead of MINVALUE or MAXVALUE. No good
* workaround available AFAICT. */
if(seq._cycle) b append " CYCLE"
DDL(b.toString, "DROP SEQUENCE " + quoteIdentifier(seq.name))
}
}
class JdbcTypes extends super.JdbcTypes {
override val booleanJdbcType = new BooleanJdbcType
override val uuidJdbcType = new UUIDJdbcType
/* Derby does not have a proper BOOLEAN type. The suggested workaround is
* SMALLINT with constants 1 and 0 for TRUE and FALSE. */
class BooleanJdbcType extends super.BooleanJdbcType {
override def valueToSQLLiteral(value: Boolean) = if(value) "1" else "0"
}
class UUIDJdbcType extends super.UUIDJdbcType {
override def sqlType = java.sql.Types.BINARY
override def sqlTypeName(sym: Option[FieldSymbol]) = "CHAR(16) FOR BIT DATA"
}
}
}
object DerbyProfile extends DerbyProfile
| Asamsig/slick | slick/src/main/scala/slick/jdbc/DerbyProfile.scala | Scala | bsd-2-clause | 12,204 |
package com.ubirch.avatar.model.rest.device
import org.joda.time.DateTime
import org.json4s.JValue
/**
* we might use this draft in the future
*/
case class DeviceDraft(id: String,
name: Option[String],
hwType: Option[String],
hwId: Option[String],
syncState: Option[Int], // 0 = out of sync, 1 = in sync, 100 = unknown
tags: Option[Set[String]],
properties: Option[JValue],
subscriptions: Option[Set[String]],
config: Option[JValue],
avatar: Option[AvatarDraft],
created: Option[DateTime],
updated: Option[DateTime],
lastActive: Option[DateTime]
)
| ubirch/ubirch-avatar-service | model-rest/src/main/scala/com/ubirch/avatar/model/rest/device/DeviceDraft.scala | Scala | apache-2.0 | 853 |
package scodec
package protocols
package mpeg
package transport
import scodec.Codec
import scodec.codecs.uint
case class Pid(value: Int) {
require(value >= Pid.MinValue && value <= Pid.MaxValue)
}
object Pid {
val MinValue = 0
val MaxValue = 8191
implicit val codec: Codec[Pid] = uint(13).as[Pid]
}
case class PidStamped[+A](pid: Pid, value: A) {
def map[B](f: A => B): PidStamped[B] = copy(value = f(value))
}
object PidStamped {
/**
* Combinator that converts a `Transform[I, O]` in to a `Transform[PidStamped[I], PidStamped[O]]` such that
* pidstamps are preserved on elements that flow through the stream.
*/
def preserve[I, O](t: Transform[I, O]): Transform.Aux[t.S, PidStamped[I], PidStamped[O]] =
t.lens(_.value, (psi, o) => psi.copy(value = o))
}
| scodec/scodec-protocols | src/main/scala/scodec/protocols/mpeg/transport/Pid.scala | Scala | bsd-3-clause | 789 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.