code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package fds.scala.cache
import com.zink.cache.{CacheFactory, Cache}
/**
* Created by nigel on 02/12/2015.
*/
object CacheClient {
val cache: Cache = CacheFactory.connect("192.168.99.100")
cache.set("BBC1", "http://www.bbc.co.uk/iplayer/tv/bbc_one")
System.out.println(cache.get("BBC1"))
System.out.println(cache.get("BBC2"))
cache.setnx("BBC1", "junk")
System.out.println(cache.get("BBC1"))
cache.del("BBC1")
System.out.println(cache.get("BBC1"))
cache.setnx("BBC1", "http://www.bbc.co.uk/iplayer/tv/bbc_one")
cache.expire("BBC1", 100)
System.out.println(cache.get("BBC1"))
Thread.sleep(100)
System.out.println(cache.get("BBC1"))
}
| Vigil365/fds-project | src/main/scala/fds/scala/cache/CacheClient.scala | Scala | cc0-1.0 | 693 |
/*
* Copyright 2016-2017 original author or authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package handlers
import javax.inject.Inject
import models.Results.StringListResult
import play.api.Logger
import tap.analysis.athanor.AthanorClient
import scala.concurrent.Future
/**
* Created by [email protected] on 19/9/17.
*/
class ExternalAnalysisHandler @Inject() (athanorClient: AthanorClient) {
val logger: Logger = Logger(this.getClass)
def analyseWithAthanor(text:String,grammar:Option[String],start:Long):Future[StringListResult] = {
val parameter = "?grammar=" + grammar.getOrElse("analytic")
logger.info(s"Creating request with parameter: $parameter")
athanorClient.process(text,parameter,start)
}
} | uts-cic/tap | src/main/scala/handlers/ExternalAnalysisHandler.scala | Scala | apache-2.0 | 1,264 |
package gapt.proofs.lk
import gapt.examples.tape
import gapt.expr.formula.hol.universalClosure
import gapt.proofs.SequentMatchers
import gapt.proofs.context.facet.ProofDefinitions
import gapt.proofs.context.facet.ProofNames
import gapt.proofs.lk.rules.ProofLink
import gapt.proofs.lk.transformations.makeTheoryAxiomsExplicit
import org.specs2.mutable.Specification
class makeTheoryAxiomsExplicitTest extends Specification with SequentMatchers {
"tape" in {
val ax =
for {
( _, ( lhs, seq ) ) <- tape.ctx.get[ProofNames].names
if tape.ctx.get[ProofDefinitions].find( lhs ).isEmpty
} yield universalClosure( seq.toDisjunction )
val withoutThAx = makeTheoryAxiomsExplicit( ax.toSeq: _* )( tape.proof )
withoutThAx.subProofs.filter { _.isInstanceOf[ProofLink] } must_== Set()
tape.ctx.check( withoutThAx )
// TODO: multiset equality
withoutThAx.conclusion must beSetEqual( ax ++: tape.proof.conclusion )
}
}
| gapt/gapt | tests/src/test/scala/gapt/proofs/lk/makeTheoryAxiomsExplicitTest.scala | Scala | gpl-3.0 | 964 |
package models.webservice
import models.admin.{AnswerTable, CategoryTable, QuestionTable}
import play.api.Play
import play.api.db.slick.DatabaseConfigProvider
import play.api.libs.json.Json
import slick.driver.JdbcProfile
import slick.driver.MySQLDriver.api._
import slick.lifted.{TableQuery, Tag}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import util.Extensions._
import scala.util.Success
/**
* Created by Murat.
*/
case class Round(id: Option[Long], gameId: Option[Long], categoryId: Option[Long],
quesOneId: Option[Long], quesTwoId: Option[Long], quesThreeId: Option[Long],
uoneAnsOneId: Option[Long], uoneAnsTwoId: Option[Long], uoneAnsThreeId: Option[Long],
utwoAnsOneId: Option[Long], utwoAnsTwoId: Option[Long], utwoAnsThreeId: Option[Long]){
def finished = uoneAnsOneId.isDefined && uoneAnsTwoId.isDefined && uoneAnsThreeId.isDefined &&
utwoAnsOneId.isDefined && utwoAnsTwoId.isDefined && utwoAnsThreeId.isDefined
def empty = uoneAnsOneId.isEmpty && uoneAnsTwoId.isEmpty && uoneAnsThreeId.isEmpty &&
utwoAnsOneId.isEmpty && utwoAnsTwoId.isEmpty && utwoAnsThreeId.isDefined
def playerAnswers = Map(quesOneId -> uoneAnsOneId, quesTwoId -> uoneAnsTwoId, quesThreeId -> uoneAnsThreeId).withDefaultValue(None)
def opponentAnswers = Map(quesOneId -> utwoAnsOneId, quesTwoId -> utwoAnsTwoId, quesThreeId -> utwoAnsThreeId).withDefaultValue(None)
def userAnswers(game: Game) = if (game.userOneMove) opponentAnswers else playerAnswers
def questions = Set(quesOneId, quesTwoId, quesThreeId).flatten
def replyMove(count: Int):Boolean = {
if(count % 2 == 0) finished
else !finished
}
}
class RoundTable(tag: Tag) extends Table[Round](tag, "ROUND"){
def id = column[Option[Long]]("ID", O.PrimaryKey, O.AutoInc)
def gameId = column[Option[Long]]("GAME_ID")
def categoryId = column[Option[Long]]("CATEGORY_ID")
def quesOneId = column[Option[Long]]("QUES_ONE")
def quesTwoId = column[Option[Long]]("QUES_TWO")
def quesThreeId = column[Option[Long]]("QUES_THREE")
def uoneAnsOneId = column[Option[Long]]("UONE_ANSONE")
def uoneAnsTwoId = column[Option[Long]]("UONE_ANSTWO")
def uoneAnsThreeId = column[Option[Long]]("UONE_ANSTHREE")
def utwoAnsOneId = column[Option[Long]]("UTWO_ANSONE")
def utwoAnsTwoId = column[Option[Long]]("UTWO_ANSTWO")
def utwoAnsThreeId = column[Option[Long]]("UTWO_ANSTHREE")
override def * = (id, gameId, categoryId,
quesOneId, quesTwoId, quesThreeId,
uoneAnsOneId, uoneAnsTwoId, uoneAnsThreeId,
utwoAnsOneId, utwoAnsTwoId, utwoAnsThreeId) <> (Round.tupled, Round.unapply)
def game = foreignKey("GAME_ID_FK", gameId, TableQuery[GameTable])(_.id)
def category = foreignKey("CATEGORY_ID_FK", categoryId, TableQuery[CategoryTable])(_.id)
def quesOne = foreignKey("QUES_ONE_FK", quesOneId, TableQuery[QuestionTable])(_.id)
def quesTwo = foreignKey("QUES_TWO_FK", quesTwoId, TableQuery[QuestionTable])(_.id)
def quesThree = foreignKey("QUES_THREE_FK", quesThreeId, TableQuery[QuestionTable])(_.id)
def uoneAnsOne = foreignKey("UONE_ANSONE_FK", uoneAnsOneId, TableQuery[AnswerTable])(_.id)
def uoneAnsTwo = foreignKey("UONE_ANSTWO_FK", uoneAnsTwoId, TableQuery[AnswerTable])(_.id)
def uoneAnsThree = foreignKey("UONE_ANSTHREE_FK", uoneAnsThreeId, TableQuery[AnswerTable])(_.id)
def utwoAnsOne = foreignKey("UTWO_ANSONE_FK", utwoAnsOneId, TableQuery[AnswerTable])(_.id)
def utwoAnsTwo = foreignKey("UTWO_ANSTWO_FK", utwoAnsTwoId, TableQuery[AnswerTable])(_.id)
def utwoAnsThree = foreignKey("UTWO_ANSTHREE_FK", utwoAnsThreeId, TableQuery[AnswerTable])(_.id)
}
object RoundDAO{
val db = DatabaseConfigProvider.get[JdbcProfile](Play.current).db
def add(round: Round): Unit ={
db.run(ServiceTables.rounds.insertOrUpdate(round))
}
def roundNum(gameId: Option[Long]): Future[Int] = {
db.run(ServiceTables.rounds.filter(_.gameId === gameId).length.result)
}
def lastRound(gameId: Option[Long]): Future[Option[Round]] = {
db.run(ServiceTables.rounds.filter(_.gameId === gameId).sortBy(_.id.desc).result.headOption)
}
def newRound(gameId: Option[Long]): Future[Round] ={
val round = Round(None, gameId, None, None, None, None, None, None, None, None, None, None)
db.run(ServiceTables.rounds returning ServiceTables.rounds.map(_.id)
into ((user,id) => user.copy(id = id)) += round)
}
def submitRound(gr: GameRound, game: Game, userId: Option[Long]): Unit ={
val query = ServiceTables.rounds.filter(g => g.gameId === gr.gameId && g.id === gr.roundId)
val d = if (userId === game.userOneId) {
val updateQ = query.map(r => (r.categoryId, r.quesOneId, r.quesTwoId, r.quesThreeId,
r.uoneAnsOneId, r.uoneAnsTwoId, r.uoneAnsThreeId))
.update((gr.catId, gr.q1Id, gr.q2Id, gr.q3Id, gr.a1Id, gr.a2Id, gr.a3Id))
db.run(updateQ)
} else if (userId === game.userTwoId) {
val update = query.map(r => (r.categoryId, r.quesOneId, r.quesTwoId, r.quesThreeId,
r.utwoAnsOneId, r.utwoAnsTwoId, r.utwoAnsThreeId))
.update((gr.catId, gr.q1Id, gr.q2Id, gr.q3Id, gr.a1Id, gr.a2Id, gr.a3Id))
db.run(update)
} else Future.successful(-1)
d.andThen{
case Success(e) => //TODO: SIMPLIFY THIS
if(e != -1)
db.run(ServiceTables.rounds.filter(_.id === gr.roundId).result.headOption).map{
rOp => rOp.foreach{
r => if(!r.finished) GameDAO.toggleMove(game)
}
}
}
}
object Implicits{
implicit val roundFormat = Json.format[Round]
}
}
| mustafin/ent-quiz-server | modules/webservice/app/models/webservice/Round.scala | Scala | apache-2.0 | 5,683 |
package org.tearne.crosser.spike
import java.net.URL
import java.io.InputStreamReader
import java.io.BufferedReader
import scala.io.Source
import com.typesafe.config.ConfigFactory
object URLRequest extends App{
val stream = new URL("""http://crosser.callsar.com/api/backend/scheme/7?format=json""").openStream()
// val reader = new InputStreamReader(stream), "UTF-8");
val lines = Source.fromInputStream(stream).getLines
val conf = ConfigFactory.parseString(lines.next)
assert(!lines.hasNext)
} | tearne/Crosser | src/main/scala/org/tearne/crosser/spike/URLRequest.scala | Scala | apache-2.0 | 502 |
package zooowner
import zooowner.message._
import org.apache.zookeeper.ZooKeeper.States
import org.apache.zookeeper.Watcher.Event.KeeperState
import scala.concurrent.Future
import scala.concurrent.duration._
import ZKConnection._
trait ZKConnection {
/**
* Takes a function to be called on client taking care of ensuring that it's
* called with active instance of ZooKeeper client.
*/
def apply[T](call: ZKClient => T): T
/**
* Active ZooKeeper client, through which all interactions with ZK are
* being performed.
*/
def client: ZKClient
/**
* Session credentials of connection.
*/
def session: Option[ZKSession]
/**
* Future which is resolved with active connection
* once is is initially established.
*/
def whenConnected: Future[ZKConnection]
/**
* Waits for connection to esablish within given timeout.
*
* @param timeout Amount of time to wait for connection
*/
def awaitConnection(timeout: FiniteDuration): Unit
/**
* Future which is resolved with Session credentials of current connection
* when it becomes expires.
*/
def whenExpired: Future[ZKSession]
/**
* Waits for connection to expire.
*
* @param timeout Amount of time to wait for session expiration.
*/
def awaitExpiration(timeout: FiniteDuration): Unit
/**
* Tests whether the connection to ZooKeeper server is established.
*/
def isConnected: Boolean
/**
* Tests whether connection's session has expired.
*/
def isExpired: Boolean
/**
* Closes current connection and returns a new connection with the same
* arguments as this one.
*/
def recreate(): ZKConnection
/**
* Disconnects from ZooKeeper server.
*/
def close(): Unit
}
/**
* Connection session credentials, used to reestablish the session.
*/
case class ZKSession(
id: ZKSessionId,
password: ZKSessionPassword)
object ZKConnection {
/**
* `ZKConnection` encapsulates and maintaines connection to ZooKeeper.
*
* @param servers Connection string, consisting of comma separated host:port
* values.
* @param timeout Connection timeout.
* @param connectionWatcher Hook-function, that will be called when connection
* to ZooKeeper server is established.
*/
def apply(
connectionString: String,
sessionTimeout: FiniteDuration,
connectionWatcher: ZKConnectionWatcher = NoWatcher,
session: Option[ZKSession] = None): ZKConnection =
{
new impl.ZKConnectionImpl(
connectionString,
sessionTimeout,
connectionWatcher,
session)
}
}
object ZKConnectionWatcher {
def apply(reaction: ZKConnectionWatcher) = reaction
}
// vim: set ts=2 sw=2 et:
| ataraxer/zooowner | zooowner-core/src/main/scala/ZKConnection.scala | Scala | mit | 2,699 |
package com.github.sorhus.webalytics.cruft.redis
import akka.actor.ActorSystem
import com.github.sorhus.webalytics.akka.event._
import com.github.sorhus.webalytics.model._
import com.github.sorhus.webalytics.cruft.model._
import redis.RedisClient
import redis.commands.TransactionBuilder
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
class RedisMetaDao(implicit akkaSystem: ActorSystem) extends MetaDao {
val r = "_"
// reserved char
val elements = s"${r}elements$r"
val next_element = s"${r}next_element$r"
val redis: RedisClient = RedisClient()
val buckets = s"${r}buckets$r"
val dimensions = s"${r}dimensions$r"
def values(dimension: Dimension) = s"${r}values$r${dimension.d}$r"
override def addMeta(bucket: Bucket, element: Element) = {
val transaction: TransactionBuilder = redis.transaction()
element.e.foreach{case(dimension, vals) =>
transaction.sadd(dimensions, dimension.d)
vals.foreach{value =>
transaction.sadd(values(dimension), value.v)
}
}
transaction.exec()
}
override def getDocumentId(element_id: ElementId): Long = {
val result: Future[Long] = redis.hget(elements, element_id.e).flatMap {
case (Some(document_id)) => Future {
document_id.utf8String.toLong
}
case None => redis.incr(next_element).map { id: Long =>
redis.hset(elements, element_id.e, id)
id
}
}
Await.result(result, Duration.Inf)
}
def batchInsertDocumentIds(input: Map[String, Long]): Iterator[Future[Boolean]] = {
input.grouped(10000).map(batch => redis.hmset(elements, batch))
}
override def getDimensionValues(dimensions: List[Dimension]): List[(Dimension, Set[Value])] = {
def getValues(dimensions: List[Dimension]) = {
val transaction = redis.transaction()
val futures: List[(Dimension, Future[Set[Value]])] = dimensions.map{ dimension =>
dimension -> transaction.smembers(values(dimension)).map(seq => seq.map(_.utf8String).map(Value.apply).toSet)
}
transaction.exec()
futures.map{case(dimension, values) =>
dimension -> Await.result(values, Duration.Inf)
}
}
val input = dimensions match {
case Dimension("*") :: Nil =>
Await.result(redis.smembers(this.dimensions), Duration.Inf)
.map(_.utf8String)
.map(Dimension.apply)
.toList
case _ => dimensions
}
getValues(input)
}
}
| sorhus/webalytics | service/src/main/scala/com/github/sorhus/webalytics/cruft/redis/RedisMetaDao.scala | Scala | gpl-3.0 | 2,531 |
package com.michalplachta.shoesorter.api
import akka.actor.{ActorSystem, Props}
import com.michalplachta.shoesorter.DecidersGuardian
import com.typesafe.config.ConfigFactory
object SingleNodeApp extends App {
val config = ConfigFactory.load()
implicit val system = ActorSystem(config getString "application.name")
val decider = system.actorOf(Props[DecidersGuardian])
system.actorOf(Props(classOf[RestInterface], decider, 8080))
}
| miciek/akka-sharding-example | src/main/scala/com/michalplachta/shoesorter/api/SingleNodeApp.scala | Scala | mit | 442 |
package models
import java.util.concurrent.ConcurrentHashMap
case class Team(key: String, email: String)
object Teams {
import scala.collection.JavaConverters._
private val teams =
new ConcurrentHashMap[String, Team]().asScala
def list() = teams.values
def upsert(team: Team): Option[Team] =
teams.putIfAbsent(team.key, team)
def getByKey(key: String): Option[Team] = teams.get(key)
def delete(key: String): Option[Team] = teams.remove(key)
def clear(): Unit = teams.clear()
}
| ebowman/play-json-service-lib | play-2.4-example/app/models/Teams.scala | Scala | mit | 511 |
class A {
// First three compile.
def f1(x: Either[Int, String]) = x.right map (y => y)
def f2(x: Either[Int, String]) = for (y <- x.right) yield y
def f3(x: Either[Int, (String, Int)]) = x.right map { case (y1, y2) => (y1, y2) }
// Last one fails.
def f4(x: Either[Int, (String, Int)]) = for ((y1, y2) <- x.right) yield ((y1, y2))
/**
./a.scala:5: error: constructor cannot be instantiated to expected type;
found : (T1, T2)
required: Either[Nothing,(String, Int)]
def f4(x: Either[Int, (String, Int)]) = for ((y1, y2) <- x.right) yield ((y1, y2))
^
./a.scala:5: error: not found: value y1
def f4(x: Either[Int, (String, Int)]) = for ((y1, y2) <- x.right) yield ((y1, y2))
^
./a.scala:5: error: not found: value y2
def f4(x: Either[Int, (String, Int)]) = for ((y1, y2) <- x.right) yield ((y1, y2))
^
three errors found
**/
}
| felixmulder/scala | test/pending/pos/t5589.scala | Scala | bsd-3-clause | 1,065 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.testing
import java.io.{InputStream, OutputStream}
import com.spotify.scio.coders.Coder
import com.spotify.scio.testing.CoderAssertions._
import org.apache.beam.sdk.coders.{AtomicCoder, StringUtf8Coder}
import org.scalatest.exceptions.TestFailedException
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
case class Foo(id: String)
class CoderAssertionsTest extends AnyFlatSpec with Matchers {
// A coder which roundtrips incorrectly
private def incorrectCoder: Coder[Foo] =
Coder.beam(new AtomicCoder[Foo] {
override def encode(value: Foo, outStream: OutputStream): Unit =
StringUtf8Coder.of().encode(value.id, outStream)
override def decode(inStream: InputStream): Foo =
Foo(StringUtf8Coder.of().decode(inStream) + "wrongBytes")
})
"CoderAssertions" should "support roundtrip" in {
Foo("bar") coderShould roundtrip()
an[TestFailedException] should be thrownBy {
implicit def coder: Coder[Foo] = incorrectCoder
Foo("baz") coderShould roundtrip()
}
}
it should "support fallback" in {
val str = "boom"
val cs: java.lang.CharSequence = str
cs coderShould fallback()
an[TestFailedException] should be thrownBy {
str coderShould fallback()
}
}
it should "support notFallback" in {
val str = "boom"
str coderShould notFallback()
an[TestFailedException] should be thrownBy {
val cs: java.lang.CharSequence = str
cs coderShould notFallback()
}
}
it should "support coderIsSerializable" in {
coderIsSerializable[Foo]
coderIsSerializable(Coder[Foo])
// Inner class's Coder is not serializable
case class InnerCaseClass(id: String)
an[TestFailedException] should be thrownBy {
coderIsSerializable[InnerCaseClass]
}
an[TestFailedException] should be thrownBy {
coderIsSerializable(Coder[InnerCaseClass])
}
}
}
| spotify/scio | scio-test/src/test/scala/com/spotify/scio/testing/CoderAssertionsTest.scala | Scala | apache-2.0 | 2,556 |
package com.bradbrok.filmomatic.state
import com.bradbrok.filmomatic.state.State._
import org.scalatest._
import scala.concurrent.duration._
import scala.language.postfixOps
class PlanSpec extends FlatSpec with Matchers {
"A balanced Plan" should "report itself as balanced" in {
val plan1 = Plan(stages = List(
Stage(bath = Some(Bath.A), steps = List(
Step(Idle, duration = 5 seconds),
Step(Fill, duration = 20 seconds),
Step(Settle, duration = 5 seconds, temperature = Some(47)),
Step(Agitate, duration = 5 seconds, temperature = Some(47)),
Step(Settle, duration = 5 seconds, temperature = Some(47)),
Step(Agitate, duration = 5 seconds, temperature = Some(47)),
Step(Settle, duration = 5 seconds),
Step(Reclaim, duration = 20 seconds),
Step(Idle, duration = 5 seconds)
))
))
plan1.isBalanced shouldBe true
}
"A plan with no steps" should "report that it has no steps" in {
val plan2 = Plan(stages = List(
Stage(bath = Some(Bath.A), steps = Nil)
))
plan2.isBalanced shouldBe true
}
"A plan that is imbalanced" should "report that it's imbalanced" in {
val plan3 = Plan(stages = List(
Stage(bath = Some(Bath.A), steps = List(
Step(Waste, duration = 5 seconds),
Step(Agitate, duration = 5 seconds),
Step(Idle, duration = 5 seconds)
))
))
plan3.isBalanced shouldBe false
}
}
| bradbrok/Film-O-Matic | core/src/test/scala/com/bradbrok/filmomatic/state/PlanSpec.scala | Scala | mit | 1,454 |
package uk.gov.dvla.vehicles.presentation.common.models
import play.api.data.Mapping
import play.api.data.Forms.{mapping, nonEmptyText}
import play.api.libs.json.Json
import play.api.data.validation.{ValidationError, Invalid, Valid, Constraint}
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.CacheKey
case class ValtechSelectModel(selectedInList: String)
object ValtechSelectModel {
implicit val JsonFormat = Json.format[ValtechSelectModel]
final val ValtechSelectModelCacheKey = "valtechSelectModel"
implicit val Key = CacheKey[ValtechSelectModel](value = ValtechSelectModelCacheKey)
object Form {
final val SelectId = "demo_select"
final val FirstOption = "Option 1"
final val SecondOption = "Option 2"
final val DropDownOptions = Map(
FirstOption -> "This is the first option",
SecondOption -> "This is the second option"
)
final val Mapping = mapping(
SelectId -> dropDown(DropDownOptions)
)(ValtechSelectModel.apply)(ValtechSelectModel.unapply)
def dropDown(dropDownOptions: Map[String, String]): Mapping[String] = {
nonEmptyText(maxLength = 12) verifying validDropDown(dropDownOptions)
}
def validDropDown(dropDownOptions: Map[String, String]): Constraint[String] = Constraint[String]("constraint.validDropDown") { input =>
dropDownOptions.contains(input) match {
case true => Valid
case false => Invalid(ValidationError("error.dropDownInvalid"))
}
}
}
}
| dvla/vehicles-presentation-common | common-test/app/uk/gov/dvla/vehicles/presentation/common/models/ValtechSelectModel.scala | Scala | mit | 1,499 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package genc
package ir
import IRs.{ NIR, LIR }
import collection.mutable.{ Map => MutableMap, Set => MutableSet }
// Lift class types to their hierarchy top type in order to properly use tagged union.
final class ClassLifter(val ctx: LeonContext) extends Transformer(NIR, LIR) with MiniReporter {
import from._
type Env = Boolean // === lift flag
val Ø = false
private val lift = true
// We use a global database to ease the recursion. This works because ValDef's are unique.
private val valDB = MutableMap[ValDef, (to.ValDef, to.ClassType)]() // known runtime class type for values
private val arrayDB = MutableMap[ValDef, (to.ValDef, to.ClassType)]() // idem but for array elements
private val fieldValDB = MutableMap[(to.ClassDef, Id), to.ClassType]() // idem for class field type
private val fieldArrayDB = MutableMap[(to.ClassDef, Id), to.ClassType]() // idem for the elements of class fields that are arrays
private def isKnownValField(cd: to.ClassDef, fieldId: to.Id): Boolean = fieldValDB contains (cd -> fieldId)
private def isKnownArrayField(cd: to.ClassDef, fieldId: to.Id): Boolean = fieldArrayDB contains (cd -> fieldId)
// Lift context, params & return type
override def recImpl(fd: FunDef)(implicit env: Env): to.FunDef = {
val id = fd.id
val returnType = rec(fd.returnType)(lift)
val ctx = fd.ctx map lift
val params = fd.params map lift
// Handle recursive functions
val newer = to.FunDef(id, returnType, ctx, params, null)
registerFunction(fd, newer)
newer.body = rec(fd.body)(lift)
newer
}
// Lift fields types
override def recImpl(cd0: ClassDef, parent: Option[to.ClassDef])(implicit env: Env): to.ClassDef = {
val id = cd0.id
val isAbstract = cd0.isAbstract
val valFieldsToRegister = MutableSet[(Id, to.ClassType)]()
val arrayFieldsToRegister = MutableSet[(Id, to.ClassType)]() // for the element of the arrays
val fields = cd0.fields map { vd0 => // This is similar to lift(ValDef) but here we need to defer the registration
val vd = rec(vd0)(lift)
// "Pre"-register fields if class type or array type was lifted
val typ = rec(vd0.getType)(!lift)
typ match {
case ct @ to.ClassType(c) if c.hierarchyTop != c =>
valFieldsToRegister += (vd.id -> ct)
case to.ArrayType(ct @ to.ClassType(c)) if c.hierarchyTop != c =>
arrayFieldsToRegister += (vd.id -> ct)
case _ => ()
}
vd
}
val cd = to.ClassDef(id, parent, fields, isAbstract)
// Actually register the classes/arrays now that we have the corresponding ClassDef
valFieldsToRegister foreach { case (id, ct) =>
fieldValDB += (cd, id) -> ct
}
arrayFieldsToRegister foreach { case (id, ct) =>
fieldArrayDB += (cd, id) -> ct
}
cd
}
override def recImpl(e: Expr)(implicit env: Env): (to.Expr, Env) = e match {
case Decl(vd0) =>
val vd = lift(vd0)
to.Decl(vd) -> env
case DeclInit(vd0, value0) =>
val vd = lift(vd0)
val value = rec(value0)(lift)
to.DeclInit(vd, value) -> env
case FieldAccess(Castable(asa), fieldId) =>
to.FieldAccess(asa, fieldId) -> env
case App(fun0, ctx0, args0) =>
val fun = recCallable(fun0)
// Don't pass a casted object but the object itself
// (this can happen with pattern matching translation).
val ctx = ctx0 map removeTopCast
val args = args0 map removeTopCast
to.App(fun, ctx, args) -> env
case _ => super.recImpl(e)
}
override def rec(typ: Type)(implicit lift: Env): to.Type = typ match {
case ClassType(clazz) if lift => to.ClassType(rec(clazz.hierarchyTop))
case ArrayType(ArrayType(ClassType(_))) => fatalError("Multidimentional arrays of objects are not yet supported")
case typ => super.rec(typ)
}
private def removeTopCast(e: Expr): to.Expr = rec(e)(lift) match {
case to.AsA(expr, _) => expr
case e => e
}
private object Castable {
def unapply(e: Expr): Option[to.Expr] = e match {
case CastableImpl(asa, _) => Some(asa)
case _ => None
}
}
private object ClassTypedExpr {
def unapply(e: Expr): Option[(to.Expr, to.ClassDef)] = e.getType match {
case ClassType(cd) => Some(rec(e)(lift) -> rec(cd)(!lift))
case _ => None
}
}
// An expression can be safely cast to it known initial type (i.e. before lifting) when:
// - the vd referenced by a binding was registered with its unlifted type;
// - accessing a class field that was lifted, either by recursion or through an expression
// (e.g. function call) of a known conrete class type (before lifting);
// - accessing an element of an array that was lifted through a registered vd or a field
// access.
private object CastableImpl {
def unapply(e0: Expr): Option[(to.AsA, to.ClassDef)] = e0 match {
case Binding(vd0) if valDB contains vd0 =>
val (vd, ct) = valDB(vd0)
val asa = to.AsA(to.Binding(vd), ct)
val cd = ct.clazz
Some(asa -> cd)
case FieldAccess(CastableImpl(asa1, cd1), fieldId) if isKnownValField(cd1, fieldId) =>
val ct2 = fieldValDB(cd1 -> fieldId)
val asa2 = to.AsA(to.FieldAccess(asa1, fieldId), ct2)
val cd2 = ct2.clazz
Some(asa2 -> cd2)
case FieldAccess(ClassTypedExpr(e, cd1), fieldId) if isKnownValField(cd1, fieldId) =>
val ct2 = fieldValDB(cd1 -> fieldId)
val asa2 = to.AsA(to.FieldAccess(e, fieldId), ct2)
val cd2 = ct2.clazz
Some(asa2 -> cd2)
case ArrayAccess(Binding(vd0), index0) if arrayDB contains vd0 =>
val (vd, ct) = arrayDB(vd0)
val asa = to.AsA(to.ArrayAccess(to.Binding(vd), rec(index0)(lift)), ct)
val cd = ct.clazz
Some(asa -> cd)
case ArrayAccess(FieldAccess(CastableImpl(asa1, cd1), fieldId), index0) if isKnownArrayField(cd1, fieldId) =>
val ct2 = fieldArrayDB(cd1 -> fieldId)
val asa2 = to.AsA(to.ArrayAccess(to.FieldAccess(asa1, fieldId), rec(index0)(lift)), ct2)
val cd2 = ct2.clazz
Some(asa2 -> cd2)
case ArrayAccess(FieldAccess(ClassTypedExpr(e, cd1), fieldId), index0) if isKnownArrayField(cd1, fieldId) =>
val ct2 = fieldArrayDB(cd1 -> fieldId)
val asa2 = to.AsA(to.ArrayAccess(to.FieldAccess(e, fieldId), rec(index0)(lift)), ct2)
val cd2 = ct2.clazz
Some(asa2 -> cd2)
case _ =>
None
}
}
private def lift(vd0: ValDef): to.ValDef = {
val vd = rec(vd0)(lift)
val typ = rec(vd0.getType)(!lift)
// Register val if class type or array type was lifted
typ match {
case ct @ to.ClassType(c) if c.hierarchyTop != c =>
valDB += vd0 -> (vd, ct)
case to.ArrayType(ct @ to.ClassType(c)) if c.hierarchyTop != c =>
arrayDB += vd0 -> (vd, ct)
case _ => ()
}
vd
}
}
| epfl-lara/leon | src/main/scala/leon/genc/ir/ClassLifter.scala | Scala | gpl-3.0 | 6,980 |
package com.avsystem.scex
package compiler
import java.{lang => jl, util => ju}
import com.avsystem.scex.util.MacroUtils
import scala.collection.mutable
import scala.reflect.internal.util._
import scala.reflect.io.AbstractFile
import scala.tools.nsc.Global
import scala.tools.nsc.plugins.Plugin
/**
* Created: 01-04-2014
* Author: ghik
*/
trait ScexGlobal extends Global with MacroUtils with SymbolErasures {
val universe: this.type = this
def loadAdditionalPlugins(): List[Plugin] = Nil
def parseExpression(code: String, template: Boolean) = {
val (wrappedCode, offset) = CodeGeneration.wrapForParsing(code, template)
val sourceFile = new BatchSourceFile("(for_parsing)", wrappedCode)
val unit = new CompilationUnit(sourceFile)
val PackageDef(_, List(ModuleDef(_, _, Template(_, _, List(_, expressionTree))))) = new syntaxAnalyzer.UnitParser(unit).parse()
moveTree(expressionTree, -offset)
}
def movePosition(pos: Position, offset: Int) = pos match {
case tp: TransparentPosition => new TransparentPosition(tp.source, tp.start + offset, tp.point + offset, tp.end + offset)
case rp: RangePosition => new RangePosition(rp.source, rp.start + offset, rp.point + offset, rp.end + offset)
case op: OffsetPosition => new OffsetPosition(op.source, op.point + offset)
case _ => pos
}
def moveTree(tree: Tree, offset: Int) = {
tree.foreach { t =>
t.setPos(movePosition(t.pos, offset))
}
tree
}
/**
* Locator with slightly modified inclusion check.
*
* @param pos
*/
class Locator(pos: Position) extends Traverser {
var last: Tree = _
def locateIn(root: Tree): Tree = {
this.last = EmptyTree
traverse(root)
this.last
}
override def traverse(t: Tree) {
t match {
case tt: TypeTree if tt.original != null && includes(tt.pos, tt.original.pos) =>
traverse(tt.original)
case _ =>
if (includes(t.pos, pos)) {
if (!t.pos.isTransparent) last = t
super.traverse(t)
} else t match {
case mdef: MemberDef =>
traverseTrees(mdef.mods.annotations)
case _ =>
}
}
}
private def includes(pos1: Position, pos2: Position) =
(pos1 includes pos2) && pos1.end > pos2.start
}
override protected def loadRoughPluginsList() =
loadAdditionalPlugins() ::: super.loadRoughPluginsList()
// toplevel symbol dropping is implemented based on how it's done in the Scala Presentation Compiler
// (which happens e.g. when a source file is deleted)
private val toplevelSymbolsMap = new mutable.WeakHashMap[AbstractFile, mutable.Set[Symbol]]
override def registerTopLevelSym(sym: Symbol): Unit = {
toplevelSymbolsMap.getOrElseUpdate(sym.sourceFile, new mutable.HashSet) += sym
}
def forgetSymbolsFromSource(file: AbstractFile) = {
val symbols = toplevelSymbolsMap.get(file).map(_.toSet).getOrElse(Set.empty)
symbols.foreach { s =>
//like in: scala.tools.nsc.interactive.Global.filesDeleted
s.owner.info.decls unlink s
}
toplevelSymbolsMap.remove(file)
}
}
| pnf/scex | scex-core/src/main/scala/com/avsystem/scex/compiler/ScexGlobal.scala | Scala | apache-2.0 | 3,146 |
package ml.sparkling.graph.operators.algorithms.bfs
import java.util.Date
import ml.sparkling.graph.operators.algorithms.bfs.predicate.BFSPredicate
import ml.sparkling.graph.operators.algorithms.bfs.processor.BFSProcessor
import org.apache.spark.graphx.{Graph, VertexId}
import scala.reflect.ClassTag
/**
* Created by mth on 3/13/17.
*/
class BFSShortestPath[VD: ClassTag, ED, MD: ClassTag](vPredicate: BFSPredicate[VD, MD], processor: BFSProcessor[VD, ED, MD]) extends Serializable {
def computeSingleSelectedSourceBFS(graph: Graph[VD, ED], source: VertexId): Graph[VD, ED] = {
val initGraph = graph.mapVertices((vId, attr) => vPredicate.getInitialData(vId, attr)(source)).cache
val result = initGraph.ops.pregel[MD](processor.initialMessage)(
vPredicate.applyMessages,
processor.sendMessage,
processor.mergeMessages
)
initGraph.unpersist(false)
result
}
}
| sparkling-graph/sparkling-graph | operators/src/main/scala/ml/sparkling/graph/operators/algorithms/bfs/BFSShortestPath.scala | Scala | bsd-2-clause | 911 |
package edu.berkeley.nlp.entity.preprocess
import edu.berkeley.nlp.futile.LightRunner
import java.io.File
import edu.berkeley.nlp.futile.syntax.Trees.PennTreeReader
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import edu.berkeley.nlp.entity.ConllDoc
import edu.berkeley.nlp.entity.DepConstTree
import scala.collection.mutable.ArrayBuffer
import edu.berkeley.nlp.entity.Chunk
import edu.berkeley.nlp.entity.ConllDocWriter
import edu.berkeley.nlp.entity.coref.OrderedClusteringBound
import edu.berkeley.nlp.entity.coref.Mention
import edu.berkeley.nlp.entity.coref.OrderedClustering
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.futile.syntax.Tree
import scala.collection.JavaConverters._
import scala.collection.mutable.HashSet
import edu.berkeley.nlp.futile.syntax.Trees
/**
* Takes either a file or directory as input, reads in PTB files one per line,
* and writes them to a file or directory
*/
object PTBToConllMunger {
val input = ""
val output = ""
// Changes trees from having "Mr ." as two tokens to "Mr."; this appears to be
// a problem when using some tokenizers. Coref systems expect the latter.
val fixAbbrevs = false
val abbrevsToFix = new HashSet[String]() ++ Array[String]("Mr", "Mrs", "Ms", "Dr")
def main(args: Array[String]) {
// LightRunner.initializeOutput(PTBToConllMunger.getClass());
LightRunner.populateScala(PTBToConllMunger.getClass(), args)
val inputFile = new File(input)
val outputFile = new File(output)
var outputWriter = if (outputFile.isDirectory) null else IOUtils.openOutHard(outputFile)
for (file <- (if (inputFile.isDirectory) inputFile.listFiles.toSeq else Seq(inputFile))) {
val doc = readParsesMakeDoc(file)
if (outputWriter == null) {
outputWriter = IOUtils.openOutHard(outputFile.getAbsolutePath + "/" + doc.docID)
ConllDocWriter.writeDoc(outputWriter, doc)
outputWriter.close
outputWriter = null
} else {
ConllDocWriter.writeDoc(outputWriter, doc)
}
}
if (outputWriter != null) {
outputWriter.close
}
}
def readParsesMakeDoc(file: File) = {
val inBuffer = IOUtils.openInHard(file)
val lineItr = IOUtils.lineIterator(inBuffer)
val words = new ArrayBuffer[Seq[String]]
val pos = new ArrayBuffer[Seq[String]]
val trees = new ArrayBuffer[DepConstTree]
while (lineItr.hasNext) {
val currLine = lineItr.next.trim
if (currLine != "" && currLine != "(())") {
val currParseRaw = PennTreeReader.parseHard(currLine, false)
val currParse = if (fixAbbrevs) {
fixAbbrevs(currParseRaw)
} else {
currParseRaw
}
val currDepConstTree = DepConstTree.apply(currParse)
words += currDepConstTree.words
pos += currDepConstTree.pos
trees += currDepConstTree
}
}
inBuffer.close()
val nerChunks = (0 until words.size).map(i => Seq[Chunk[String]]())
val corefChunks = (0 until words.size).map(i => Seq[Chunk[Int]]())
val speakers = (0 until words.size).map(i => (0 until words(i).size).map(j => "-"))
new ConllDoc(file.getName(), 0, words, pos, trees, nerChunks, corefChunks, speakers)
}
def fixAbbrevs(tree: Tree[String]): Tree[String] = {
val treeYield = tree.getYield().asScala
val abbrevIndices = new ArrayBuffer[Int]
for (abbrev <- abbrevsToFix) {
var startIdx = 0
var currIdx = treeYield.indexOf(abbrev, startIdx)
while (currIdx != -1) {
abbrevIndices += currIdx
startIdx = currIdx + 1
currIdx = treeYield.indexOf(abbrev, startIdx)
}
}
if (abbrevIndices.size == 0) {
tree
} else {
// The transformation could theoretically product X over X so redo this transformation
new Trees.XOverXRemover().transformTree(transformFixAbbrevs(tree, 0, treeYield.size, abbrevIndices.sorted))
}
}
/**
* Need to do two things to fix abbreviations: add . to the abbreviation and remove the . token
*/
def transformFixAbbrevs(tree: Tree[String], startIdx: Int, endIdx: Int, abbrevIndices: Seq[Int]): Tree[String] = {
// Leaf: fix the abbreviation label if necessary
if (tree.isLeaf()) {
if (abbrevIndices.contains(startIdx)) {
new Tree[String](tree.getLabel() + ".")
} else {
tree
}
} else {
// } else if (tree.isPreTerminal()) {
// new Tree[String](tree.getLabel(), transformFixAbbrevs(tree.getChildren().get(0), startIdx, endIdx, abbrevIndices)
// } else {
// Select things that either contain this index or the next (the .)
val matchingAbbrevIndices = abbrevIndices.filter(idx => startIdx <= idx + 1 && idx < endIdx)
if (matchingAbbrevIndices.size == 0) {
tree
} else {
val children = tree.getChildren().asScala
var currIdx = startIdx
val newChildren = new ArrayBuffer[Tree[String]]
for (child <- children) {
val childYield = child.getYield().asScala
val childYieldSize = childYield.size
val currEndIdx = currIdx + childYieldSize
// If this child only dominates the offending period
if (matchingAbbrevIndices.contains(currIdx - 1) && childYieldSize == 1 && childYield.head == ".") {
// Delete this child by doing nothing
} else {
// Otherwise proceed as normal
newChildren += transformFixAbbrevs(child, currIdx, currEndIdx, abbrevIndices)
}
currIdx += childYieldSize
}
new Tree[String](tree.getLabel(), newChildren.asJava)
}
}
}
} | gregdurrett/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/preprocess/PTBToConllMunger.scala | Scala | gpl-3.0 | 5,626 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.{Externalizable, ObjectInput, ObjectOutput}
import java.sql.{Date, Timestamp}
import org.apache.spark.sql.catalyst.encoders.{OuterScopes, RowEncoder}
import org.apache.spark.sql.catalyst.util.sideBySide
import org.apache.spark.sql.execution.{LogicalRDD, RDDScanExec, SortExec}
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, ShuffleExchange}
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
private implicit val ordering = Ordering.by((c: ClassData) => c.a -> c.b)
test("checkAnswer should compare map correctly") {
val data = Seq((1, "2", Map(1 -> 2, 2 -> 1)))
checkAnswer(
data.toDF(),
Seq(Row(1, "2", Map(2 -> 1, 1 -> 2))))
}
test("toDS") {
val data = Seq(("a", 1), ("b", 2), ("c", 3))
checkDataset(
data.toDS(),
data: _*)
}
test("toDS with RDD") {
val ds = sparkContext.makeRDD(Seq("a", "b", "c"), 3).toDS()
checkDataset(
ds.mapPartitions(_ => Iterator(1)),
1, 1, 1)
}
test("emptyDataset") {
val ds = spark.emptyDataset[Int]
assert(ds.count() == 0L)
assert(ds.collect() sameElements Array.empty[Int])
}
test("range") {
assert(spark.range(10).map(_ + 1).reduce(_ + _) == 55)
assert(spark.range(10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
assert(spark.range(0, 10).map(_ + 1).reduce(_ + _) == 55)
assert(spark.range(0, 10).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
assert(spark.range(0, 10, 1, 2).map(_ + 1).reduce(_ + _) == 55)
assert(spark.range(0, 10, 1, 2).map{ case i: java.lang.Long => i + 1 }.reduce(_ + _) == 55)
}
test("SPARK-12404: Datatype Helper Serializability") {
val ds = sparkContext.parallelize((
new Timestamp(0),
new Date(0),
java.math.BigDecimal.valueOf(1),
scala.math.BigDecimal(1)) :: Nil).toDS()
ds.collect()
}
test("collect, first, and take should use encoders for serialization") {
val item = NonSerializableCaseClass("abcd")
val ds = Seq(item).toDS()
assert(ds.collect().head == item)
assert(ds.collectAsList().get(0) == item)
assert(ds.first() == item)
assert(ds.take(1).head == item)
assert(ds.takeAsList(1).get(0) == item)
assert(ds.toLocalIterator().next() === item)
}
test("coalesce, repartition") {
val data = (1 to 100).map(i => ClassData(i.toString, i))
val ds = data.toDS()
intercept[IllegalArgumentException] {
ds.coalesce(0)
}
intercept[IllegalArgumentException] {
ds.repartition(0)
}
assert(ds.repartition(10).rdd.partitions.length == 10)
checkDatasetUnorderly(
ds.repartition(10),
data: _*)
assert(ds.coalesce(1).rdd.partitions.length == 1)
checkDatasetUnorderly(
ds.coalesce(1),
data: _*)
}
test("as tuple") {
val data = Seq(("a", 1), ("b", 2)).toDF("a", "b")
checkDataset(
data.as[(String, Int)],
("a", 1), ("b", 2))
}
test("as case class / collect") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkDataset(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
assert(ds.collect().head == ClassData("a", 1))
}
test("as case class - reordered fields by name") {
val ds = Seq((1, "a"), (2, "b"), (3, "c")).toDF("b", "a").as[ClassData]
assert(ds.collect() === Array(ClassData("a", 1), ClassData("b", 2), ClassData("c", 3)))
}
test("as case class - take") {
val ds = Seq((1, "a"), (2, "b"), (3, "c")).toDF("b", "a").as[ClassData]
assert(ds.take(2) === Array(ClassData("a", 1), ClassData("b", 2)))
}
test("map") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
}
test("map with type change with the exact matched number of attributes") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.map(identity[(String, Int)])
.as[OtherTuple]
.map(identity[OtherTuple]),
OtherTuple("a", 1), OtherTuple("b", 2), OtherTuple("c", 3))
}
test("map with type change with less attributes") {
val ds = Seq(("a", 1, 3), ("b", 2, 4), ("c", 3, 5)).toDS()
checkDataset(
ds.as[OtherTuple]
.map(identity[OtherTuple]),
OtherTuple("a", 1), OtherTuple("b", 2), OtherTuple("c", 3))
}
test("map and group by with class data") {
// We inject a group by here to make sure this test case is future proof
// when we implement better pipelining and local execution mode.
val ds: Dataset[(ClassData, Long)] = Seq(ClassData("one", 1), ClassData("two", 2)).toDS()
.map(c => ClassData(c.a, c.b + 1))
.groupByKey(p => p).count()
checkDatasetUnorderly(
ds,
(ClassData("one", 2), 1L), (ClassData("two", 3), 1L))
}
test("select") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("SPARK-16853: select, case class and tuple") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(expr("struct(_2, _2)").as[(Int, Int)]): Dataset[(Int, Int)],
(1, 1), (2, 2), (3, 3))
checkDataset(
ds.select(expr("named_struct('a', _1, 'b', _2)").as[ClassData]): Dataset[ClassData],
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
}
test("select 2") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("_2").as[Int]) : Dataset[(String, Int)],
("a", 1), ("b", 2), ("c", 3))
}
test("select 2, primitive and tuple") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("struct(_2, _2)").as[(Int, Int)]),
("a", (1, 1)), ("b", (2, 2)), ("c", (3, 3)))
}
test("select 2, primitive and class") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("named_struct('a', _1, 'b', _2)").as[ClassData]),
("a", ClassData("a", 1)), ("b", ClassData("b", 2)), ("c", ClassData("c", 3)))
}
test("select 2, primitive and class, fields reordered") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.select(
expr("_1").as[String],
expr("named_struct('b', _2, 'a', _1)").as[ClassData]),
("a", ClassData("a", 1)), ("b", ClassData("b", 2)), ("c", ClassData("c", 3)))
}
test("filter") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.filter(_._1 == "b"),
("b", 2))
}
test("filter and then select") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
checkDataset(
ds.filter(_._1 == "b").select(expr("_1").as[String]),
"b")
}
test("SPARK-15632: typed filter should preserve the underlying logical schema") {
val ds = spark.range(10)
val ds2 = ds.filter(_ > 3)
assert(ds.schema.equals(ds2.schema))
}
test("foreach") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.longAccumulator
ds.foreach(v => acc.add(v._2))
assert(acc.value == 6)
}
test("foreachPartition") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.longAccumulator
ds.foreachPartition(_.foreach(v => acc.add(v._2)))
assert(acc.value == 6)
}
test("reduce") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
}
test("joinWith, flat schema") {
val ds1 = Seq(1, 2, 3).toDS().as("a")
val ds2 = Seq(1, 2).toDS().as("b")
checkDataset(
ds1.joinWith(ds2, $"a.value" === $"b.value", "inner"),
(1, 1), (2, 2))
}
test("joinWith tuple with primitive, expression") {
val ds1 = Seq(1, 1, 2).toDS()
val ds2 = Seq(("a", 1), ("b", 2)).toDS()
checkDataset(
ds1.joinWith(ds2, $"value" === $"_2"),
(1, ("a", 1)), (1, ("a", 1)), (2, ("b", 2)))
}
test("joinWith class with primitive, toDF") {
val ds1 = Seq(1, 1, 2).toDS()
val ds2 = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
checkAnswer(
ds1.joinWith(ds2, $"value" === $"b").toDF().select($"_1", $"_2.a", $"_2.b"),
Row(1, "a", 1) :: Row(1, "a", 1) :: Row(2, "b", 2) :: Nil)
}
test("multi-level joinWith") {
val ds1 = Seq(("a", 1), ("b", 2)).toDS().as("a")
val ds2 = Seq(("a", 1), ("b", 2)).toDS().as("b")
val ds3 = Seq(("a", 1), ("b", 2)).toDS().as("c")
checkDataset(
ds1.joinWith(ds2, $"a._2" === $"b._2").as("ab").joinWith(ds3, $"ab._1._2" === $"c._2"),
((("a", 1), ("a", 1)), ("a", 1)),
((("b", 2), ("b", 2)), ("b", 2)))
}
test("groupBy function, keys") {
val ds = Seq(("a", 1), ("b", 1)).toDS()
val grouped = ds.groupByKey(v => (1, v._2))
checkDatasetUnorderly(
grouped.keys,
(1, 1))
}
test("groupBy function, map") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupByKey(v => (v._1, "word"))
val agged = grouped.mapGroups { case (g, iter) => (g._1, iter.map(_._2).sum) }
checkDatasetUnorderly(
agged,
("a", 30), ("b", 3), ("c", 1))
}
test("groupBy function, flatMap") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val grouped = ds.groupByKey(v => (v._1, "word"))
val agged = grouped.flatMapGroups { case (g, iter) =>
Iterator(g._1, iter.map(_._2).sum.toString)
}
checkDatasetUnorderly(
agged,
"a", "30", "b", "3", "c", "1")
}
test("groupBy function, mapValues, flatMap") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
val keyValue = ds.groupByKey(_._1).mapValues(_._2)
val agged = keyValue.mapGroups { case (g, iter) => (g, iter.sum) }
checkDataset(agged, ("a", 30), ("b", 3), ("c", 1))
val keyValue1 = ds.groupByKey(t => (t._1, "key")).mapValues(t => (t._2, "value"))
val agged1 = keyValue1.mapGroups { case (g, iter) => (g._1, iter.map(_._1).sum) }
checkDataset(agged, ("a", 30), ("b", 3), ("c", 1))
}
test("groupBy function, reduce") {
val ds = Seq("abc", "xyz", "hello").toDS()
val agged = ds.groupByKey(_.length).reduceGroups(_ + _)
checkDatasetUnorderly(
agged,
3 -> "abcxyz", 5 -> "hello")
}
test("groupBy single field class, count") {
val ds = Seq("abc", "xyz", "hello").toDS()
val count = ds.groupByKey(s => Tuple1(s.length)).count()
checkDataset(
count,
(Tuple1(3), 2L), (Tuple1(5), 1L)
)
}
test("typed aggregation: expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(sum("_2").as[Long]),
("a", 30L), ("b", 3L), ("c", 1L))
}
test("typed aggregation: expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(sum("_2").as[Long], sum($"_2" + 1).as[Long]),
("a", 30L, 32L), ("b", 3L, 5L), ("c", 1L, 2L))
}
test("typed aggregation: expr, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(sum("_2").as[Long], sum($"_2" + 1).as[Long], count("*")),
("a", 30L, 32L, 2L), ("b", 3L, 5L, 2L), ("c", 1L, 2L, 1L))
}
test("typed aggregation: expr, expr, expr, expr") {
val ds = Seq(("a", 10), ("a", 20), ("b", 1), ("b", 2), ("c", 1)).toDS()
checkDatasetUnorderly(
ds.groupByKey(_._1).agg(
sum("_2").as[Long],
sum($"_2" + 1).as[Long],
count("*").as[Long],
avg("_2").as[Double]),
("a", 30L, 32L, 2L, 15.0), ("b", 3L, 5L, 2L, 1.5), ("c", 1L, 2L, 1L, 1.0))
}
test("cogroup") {
val ds1 = Seq(1 -> "a", 3 -> "abc", 5 -> "hello", 3 -> "foo").toDS()
val ds2 = Seq(2 -> "q", 3 -> "w", 5 -> "e", 5 -> "r").toDS()
val cogrouped = ds1.groupByKey(_._1).cogroup(ds2.groupByKey(_._1)) { case (key, data1, data2) =>
Iterator(key -> (data1.map(_._2).mkString + "#" + data2.map(_._2).mkString))
}
checkDatasetUnorderly(
cogrouped,
1 -> "a#", 2 -> "#q", 3 -> "abcfoo#w", 5 -> "hello#er")
}
test("cogroup with complex data") {
val ds1 = Seq(1 -> ClassData("a", 1), 2 -> ClassData("b", 2)).toDS()
val ds2 = Seq(2 -> ClassData("c", 3), 3 -> ClassData("d", 4)).toDS()
val cogrouped = ds1.groupByKey(_._1).cogroup(ds2.groupByKey(_._1)) { case (key, data1, data2) =>
Iterator(key -> (data1.map(_._2.a).mkString + data2.map(_._2.a).mkString))
}
checkDatasetUnorderly(
cogrouped,
1 -> "a", 2 -> "bc", 3 -> "d")
}
test("sample with replacement") {
val n = 100
val data = sparkContext.parallelize(1 to n, 2).toDS()
checkDataset(
data.sample(withReplacement = true, 0.05, seed = 13),
5, 10, 52, 73)
}
test("sample without replacement") {
val n = 100
val data = sparkContext.parallelize(1 to n, 2).toDS()
checkDataset(
data.sample(withReplacement = false, 0.05, seed = 13),
3, 17, 27, 58, 62)
}
test("SPARK-16686: Dataset.sample with seed results shouldn't depend on downstream usage") {
val simpleUdf = udf((n: Int) => {
require(n != 1, "simpleUdf shouldn't see id=1!")
1
})
val df = Seq(
(0, "string0"),
(1, "string1"),
(2, "string2"),
(3, "string3"),
(4, "string4"),
(5, "string5"),
(6, "string6"),
(7, "string7"),
(8, "string8"),
(9, "string9")
).toDF("id", "stringData")
val sampleDF = df.sample(false, 0.7, 50)
// After sampling, sampleDF doesn't contain id=1.
assert(!sampleDF.select("id").collect.contains(1))
// simpleUdf should not encounter id=1.
checkAnswer(sampleDF.select(simpleUdf($"id")), List.fill(sampleDF.count.toInt)(Row(1)))
}
test("SPARK-11436: we should rebind right encoder when join 2 datasets") {
val ds1 = Seq("1", "2").toDS().as("a")
val ds2 = Seq(2, 3).toDS().as("b")
val joined = ds1.joinWith(ds2, $"a.value" === $"b.value")
checkDataset(joined, ("2", 2))
}
test("self join") {
val ds = Seq("1", "2").toDS().as("a")
val joined = ds.joinWith(ds, lit(true), "cross")
checkDataset(joined, ("1", "1"), ("1", "2"), ("2", "1"), ("2", "2"))
}
test("toString") {
val ds = Seq((1, 2)).toDS()
assert(ds.toString == "[_1: int, _2: int]")
}
test("Kryo encoder") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.groupByKey(p => p).count().collect().toSet ==
Set((KryoData(1), 1L), (KryoData(2), 1L)))
}
test("Kryo encoder self join") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val ds = Seq(KryoData(1), KryoData(2)).toDS()
assert(ds.joinWith(ds, lit(true), "cross").collect().toSet ==
Set(
(KryoData(1), KryoData(1)),
(KryoData(1), KryoData(2)),
(KryoData(2), KryoData(1)),
(KryoData(2), KryoData(2))))
}
test("Kryo encoder: check the schema mismatch when converting DataFrame to Dataset") {
implicit val kryoEncoder = Encoders.kryo[KryoData]
val df = Seq((1)).toDF("a")
val e = intercept[AnalysisException] {
df.as[KryoData]
}.message
assert(e.contains("cannot cast IntegerType to BinaryType"))
}
test("Java encoder") {
implicit val kryoEncoder = Encoders.javaSerialization[JavaData]
val ds = Seq(JavaData(1), JavaData(2)).toDS()
assert(ds.groupByKey(p => p).count().collect().toSet ==
Set((JavaData(1), 1L), (JavaData(2), 1L)))
}
test("Java encoder self join") {
implicit val kryoEncoder = Encoders.javaSerialization[JavaData]
val ds = Seq(JavaData(1), JavaData(2)).toDS()
assert(ds.joinWith(ds, lit(true), "cross").collect().toSet ==
Set(
(JavaData(1), JavaData(1)),
(JavaData(1), JavaData(2)),
(JavaData(2), JavaData(1)),
(JavaData(2), JavaData(2))))
}
test("SPARK-14696: implicit encoders for boxed types") {
assert(spark.range(1).map { i => i : java.lang.Long }.head == 0L)
}
test("SPARK-11894: Incorrect results are returned when using null") {
val nullInt = null.asInstanceOf[java.lang.Integer]
val ds1 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS()
val ds2 = Seq((nullInt, "1"), (new java.lang.Integer(22), "2")).toDS()
checkDataset(
ds1.joinWith(ds2, lit(true), "cross"),
((nullInt, "1"), (nullInt, "1")),
((nullInt, "1"), (new java.lang.Integer(22), "2")),
((new java.lang.Integer(22), "2"), (nullInt, "1")),
((new java.lang.Integer(22), "2"), (new java.lang.Integer(22), "2")))
}
test("change encoder with compatible schema") {
val ds = Seq(2 -> 2.toByte, 3 -> 3.toByte).toDF("a", "b").as[ClassData]
assert(ds.collect().toSeq == Seq(ClassData("2", 2), ClassData("3", 3)))
}
test("verify mismatching field names fail with a good error") {
val ds = Seq(ClassData("a", 1)).toDS()
val e = intercept[AnalysisException] {
ds.as[ClassData2]
}
assert(e.getMessage.contains("cannot resolve '`c`' given input columns: [a, b]"), e.getMessage)
}
test("runtime nullability check") {
val schema = StructType(Seq(
StructField("f", StructType(Seq(
StructField("a", StringType, nullable = true),
StructField("b", IntegerType, nullable = true)
)), nullable = true)
))
def buildDataset(rows: Row*): Dataset[NestedStruct] = {
val rowRDD = spark.sparkContext.parallelize(rows)
spark.createDataFrame(rowRDD, schema).as[NestedStruct]
}
checkDataset(
buildDataset(Row(Row("hello", 1))),
NestedStruct(ClassData("hello", 1))
)
// Shouldn't throw runtime exception when parent object (`ClassData`) is null
assert(buildDataset(Row(null)).collect() === Array(NestedStruct(null)))
val message = intercept[RuntimeException] {
buildDataset(Row(Row("hello", null))).collect()
}.getMessage
assert(message.contains("Null value appeared in non-nullable field"))
}
test("SPARK-12478: top level null field") {
val ds0 = Seq(NestedStruct(null)).toDS()
checkDataset(ds0, NestedStruct(null))
checkAnswer(ds0.toDF(), Row(null))
val ds1 = Seq(DeepNestedStruct(NestedStruct(null))).toDS()
checkDataset(ds1, DeepNestedStruct(NestedStruct(null)))
checkAnswer(ds1.toDF(), Row(Row(null)))
}
test("support inner class in Dataset") {
val outer = new OuterClass
OuterScopes.addOuterScope(outer)
val ds = Seq(outer.InnerClass("1"), outer.InnerClass("2")).toDS()
checkDataset(ds.map(_.a), "1", "2")
}
test("grouping key and grouped value has field with same name") {
val ds = Seq(ClassData("a", 1), ClassData("a", 2)).toDS()
val agged = ds.groupByKey(d => ClassNullableData(d.a, null)).mapGroups {
case (key, values) => key.a + values.map(_.b).sum
}
checkDataset(agged, "a3")
}
test("cogroup's left and right side has field with same name") {
val left = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
val right = Seq(ClassNullableData("a", 3), ClassNullableData("b", 4)).toDS()
val cogrouped = left.groupByKey(_.a).cogroup(right.groupByKey(_.a)) {
case (key, lData, rData) => Iterator(key + lData.map(_.b).sum + rData.map(_.b.toInt).sum)
}
checkDataset(cogrouped, "a13", "b24")
}
test("give nice error message when the real number of fields doesn't match encoder schema") {
val ds = Seq(ClassData("a", 1), ClassData("b", 2)).toDS()
val message = intercept[AnalysisException] {
ds.as[(String, Int, Long)]
}.message
assert(message ==
"Try to map struct<a:string,b:int> to Tuple3, " +
"but failed as the number of fields does not line up.")
val message2 = intercept[AnalysisException] {
ds.as[Tuple1[String]]
}.message
assert(message2 ==
"Try to map struct<a:string,b:int> to Tuple1, " +
"but failed as the number of fields does not line up.")
}
test("SPARK-13440: Resolving option fields") {
val df = Seq(1, 2, 3).toDS()
val ds = df.as[Option[Int]]
checkDataset(
ds.filter(_ => true),
Some(1), Some(2), Some(3))
}
test("SPARK-13540 Dataset of nested class defined in Scala object") {
checkDataset(
Seq(OuterObject.InnerClass("foo")).toDS(),
OuterObject.InnerClass("foo"))
}
test("SPARK-14000: case class with tuple type field") {
checkDataset(
Seq(TupleClass((1, "a"))).toDS(),
TupleClass(1, "a")
)
}
test("isStreaming returns false for static Dataset") {
val data = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
assert(!data.isStreaming, "static Dataset returned true for 'isStreaming'.")
}
test("isStreaming returns true for streaming Dataset") {
val data = MemoryStream[Int].toDS()
assert(data.isStreaming, "streaming Dataset returned false for 'isStreaming'.")
}
test("isStreaming returns true after static and streaming Dataset join") {
val static = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b")
val streaming = MemoryStream[Int].toDS().toDF("b")
val df = streaming.join(static, Seq("b"))
assert(df.isStreaming, "streaming Dataset returned false for 'isStreaming'.")
}
test("SPARK-14554: Dataset.map may generate wrong java code for wide table") {
val wideDF = spark.range(10).select(Seq.tabulate(1000) {i => ('id + i).as(s"c$i")} : _*)
// Make sure the generated code for this plan can compile and execute.
checkDataset(wideDF.map(_.getLong(0)), 0L until 10 : _*)
}
test("SPARK-14838: estimating sizeInBytes in operators with ObjectProducer shouldn't fail") {
val dataset = Seq(
(0, 3, 54f),
(0, 4, 44f),
(0, 5, 42f),
(1, 3, 39f),
(1, 5, 33f),
(1, 4, 26f),
(2, 3, 51f),
(2, 5, 45f),
(2, 4, 30f)
).toDF("user", "item", "rating")
val actual = dataset
.select("user", "item")
.as[(Int, Int)]
.groupByKey(_._1)
.mapGroups { case (src, ids) => (src, ids.map(_._2).toArray) }
.toDF("id", "actual")
dataset.join(actual, dataset("user") === actual("id")).collect()
}
test("SPARK-15097: implicits on dataset's spark can be imported") {
val dataset = Seq(1, 2, 3).toDS()
checkDataset(DatasetTransform.addOne(dataset), 2, 3, 4)
}
test("dataset.rdd with generic case class") {
val ds = Seq(Generic(1, 1.0), Generic(2, 2.0)).toDS()
val ds2 = ds.map(g => Generic(g.id, g.value))
assert(ds.rdd.map(r => r.id).count === 2)
assert(ds2.rdd.map(r => r.id).count === 2)
val ds3 = ds.map(g => new java.lang.Long(g.id))
assert(ds3.rdd.map(r => r).count === 2)
}
test("runtime null check for RowEncoder") {
val schema = new StructType().add("i", IntegerType, nullable = false)
val df = spark.range(10).map(l => {
if (l % 5 == 0) {
Row(null)
} else {
Row(l)
}
})(RowEncoder(schema))
val message = intercept[Exception] {
df.collect()
}.getMessage
assert(message.contains("The 0th field 'i' of input row cannot be null"))
}
test("row nullability mismatch") {
val schema = new StructType().add("a", StringType, true).add("b", StringType, false)
val rdd = spark.sparkContext.parallelize(Row(null, "123") :: Row("234", null) :: Nil)
val message = intercept[Exception] {
spark.createDataFrame(rdd, schema).collect()
}.getMessage
assert(message.contains("The 1th field 'b' of input row cannot be null"))
}
test("createTempView") {
val dataset = Seq(1, 2, 3).toDS()
dataset.createOrReplaceTempView("tempView")
// Overrides the existing temporary view with same name
// No exception should be thrown here.
dataset.createOrReplaceTempView("tempView")
// Throws AnalysisException if temp view with same name already exists
val e = intercept[AnalysisException](
dataset.createTempView("tempView"))
intercept[AnalysisException](dataset.createTempView("tempView"))
assert(e.message.contains("already exists"))
dataset.sparkSession.catalog.dropTempView("tempView")
}
test("SPARK-15381: physical object operator should define `reference` correctly") {
val df = Seq(1 -> 2).toDF("a", "b")
checkAnswer(df.map(row => row)(RowEncoder(df.schema)).select("b", "a"), Row(2, 1))
}
private def checkShowString[T](ds: Dataset[T], expected: String): Unit = {
val numRows = expected.split("\n").length - 4
val actual = ds.showString(numRows, truncate = 20)
if (expected != actual) {
fail(
"Dataset.showString() gives wrong result:\n\n" + sideBySide(
"== Expected ==\n" + expected,
"== Actual ==\n" + actual
).mkString("\n")
)
}
}
test("SPARK-15550 Dataset.show() should show contents of the underlying logical plan") {
val df = Seq((1, "foo", "extra"), (2, "bar", "extra")).toDF("b", "a", "c")
val ds = df.as[ClassData]
val expected =
"""+---+---+-----+
|| b| a| c|
|+---+---+-----+
|| 1|foo|extra|
|| 2|bar|extra|
|+---+---+-----+
|""".stripMargin
checkShowString(ds, expected)
}
test("SPARK-15550 Dataset.show() should show inner nested products as rows") {
val ds = Seq(
NestedStruct(ClassData("foo", 1)),
NestedStruct(ClassData("bar", 2))
).toDS()
val expected =
"""+-------+
|| f|
|+-------+
||[foo,1]|
||[bar,2]|
|+-------+
|""".stripMargin
checkShowString(ds, expected)
}
test(
"SPARK-15112: EmbedDeserializerInFilter should not optimize plan fragment that changes schema"
) {
val ds = Seq(1 -> "foo", 2 -> "bar").toDF("b", "a").as[ClassData]
assertResult(Seq(ClassData("foo", 1), ClassData("bar", 2))) {
ds.collect().toSeq
}
assertResult(Seq(ClassData("bar", 2))) {
ds.filter(_.b > 1).collect().toSeq
}
}
test("mapped dataset should resolve duplicated attributes for self join") {
val ds = Seq(1, 2, 3).toDS().map(_ + 1)
val ds1 = ds.as("d1")
val ds2 = ds.as("d2")
checkDatasetUnorderly(ds1.joinWith(ds2, $"d1.value" === $"d2.value"), (2, 2), (3, 3), (4, 4))
checkDatasetUnorderly(ds1.intersect(ds2), 2, 3, 4)
checkDatasetUnorderly(ds1.except(ds1))
}
test("SPARK-15441: Dataset outer join") {
val left = Seq(ClassData("a", 1), ClassData("b", 2)).toDS().as("left")
val right = Seq(ClassData("x", 2), ClassData("y", 3)).toDS().as("right")
val joined = left.joinWith(right, $"left.b" === $"right.b", "left")
val result = joined.collect().toSet
assert(result == Set(ClassData("a", 1) -> null, ClassData("b", 2) -> ClassData("x", 2)))
}
test("better error message when use java reserved keyword as field name") {
val e = intercept[UnsupportedOperationException] {
Seq(InvalidInJava(1)).toDS()
}
assert(e.getMessage.contains(
"`abstract` is a reserved keyword and cannot be used as field name"))
}
test("Dataset should support flat input object to be null") {
checkDataset(Seq("a", null).toDS(), "a", null)
}
test("Dataset should throw RuntimeException if top-level product input object is null") {
val e = intercept[RuntimeException](Seq(ClassData("a", 1), null).toDS())
assert(e.getMessage.contains("Null value appeared in non-nullable field"))
assert(e.getMessage.contains("top level Product input object"))
}
test("dropDuplicates") {
val ds = Seq(("a", 1), ("a", 2), ("b", 1), ("a", 1)).toDS()
checkDataset(
ds.dropDuplicates("_1"),
("a", 1), ("b", 1))
checkDataset(
ds.dropDuplicates("_2"),
("a", 1), ("a", 2))
checkDataset(
ds.dropDuplicates("_1", "_2"),
("a", 1), ("a", 2), ("b", 1))
}
test("dropDuplicates: columns with same column name") {
val ds1 = Seq(("a", 1), ("a", 2), ("b", 1), ("a", 1)).toDS()
val ds2 = Seq(("a", 1), ("a", 2), ("b", 1), ("a", 1)).toDS()
// The dataset joined has two columns of the same name "_2".
val joined = ds1.join(ds2, "_1").select(ds1("_2").as[Int], ds2("_2").as[Int])
checkDataset(
joined.dropDuplicates(),
(1, 2), (1, 1), (2, 1), (2, 2))
}
test("SPARK-16097: Encoders.tuple should handle null object correctly") {
val enc = Encoders.tuple(Encoders.tuple(Encoders.STRING, Encoders.STRING), Encoders.STRING)
val data = Seq((("a", "b"), "c"), (null, "d"))
val ds = spark.createDataset(data)(enc)
checkDataset(ds, (("a", "b"), "c"), (null, "d"))
}
test("SPARK-16995: flat mapping on Dataset containing a column created with lit/expr") {
val df = Seq("1").toDF("a")
import df.sparkSession.implicits._
checkDataset(
df.withColumn("b", lit(0)).as[ClassData]
.groupByKey(_.a).flatMapGroups { case (x, iter) => List[Int]() })
checkDataset(
df.withColumn("b", expr("0")).as[ClassData]
.groupByKey(_.a).flatMapGroups { case (x, iter) => List[Int]() })
}
test("SPARK-18125: Spark generated code causes CompileException") {
val data = Array(
Route("a", "b", 1),
Route("a", "b", 2),
Route("a", "c", 2),
Route("a", "d", 10),
Route("b", "a", 1),
Route("b", "a", 5),
Route("b", "c", 6))
val ds = sparkContext.parallelize(data).toDF.as[Route]
val grped = ds.map(r => GroupedRoutes(r.src, r.dest, Seq(r)))
.groupByKey(r => (r.src, r.dest))
.reduceGroups { (g1: GroupedRoutes, g2: GroupedRoutes) =>
GroupedRoutes(g1.src, g1.dest, g1.routes ++ g2.routes)
}.map(_._2)
val expected = Seq(
GroupedRoutes("a", "d", Seq(Route("a", "d", 10))),
GroupedRoutes("b", "c", Seq(Route("b", "c", 6))),
GroupedRoutes("a", "b", Seq(Route("a", "b", 1), Route("a", "b", 2))),
GroupedRoutes("b", "a", Seq(Route("b", "a", 1), Route("b", "a", 5))),
GroupedRoutes("a", "c", Seq(Route("a", "c", 2)))
)
implicit def ordering[GroupedRoutes]: Ordering[GroupedRoutes] = new Ordering[GroupedRoutes] {
override def compare(x: GroupedRoutes, y: GroupedRoutes): Int = {
x.toString.compareTo(y.toString)
}
}
checkDatasetUnorderly(grped, expected: _*)
}
test("SPARK-18189: Fix serialization issue in KeyValueGroupedDataset") {
val resultValue = 12345
val keyValueGrouped = Seq((1, 2), (3, 4)).toDS().groupByKey(_._1)
val mapGroups = keyValueGrouped.mapGroups((k, v) => (k, 1))
val broadcasted = spark.sparkContext.broadcast(resultValue)
// Using broadcast triggers serialization issue in KeyValueGroupedDataset
val dataset = mapGroups.map(_ => broadcasted.value)
assert(dataset.collect() sameElements Array(resultValue, resultValue))
}
Seq(true, false).foreach { eager =>
def testCheckpointing(testName: String)(f: => Unit): Unit = {
test(s"Dataset.checkpoint() - $testName (eager = $eager)") {
withTempDir { dir =>
val originalCheckpointDir = spark.sparkContext.checkpointDir
try {
spark.sparkContext.setCheckpointDir(dir.getCanonicalPath)
f
} finally {
// Since the original checkpointDir can be None, we need
// to set the variable directly.
spark.sparkContext.checkpointDir = originalCheckpointDir
}
}
}
}
testCheckpointing("basic") {
val ds = spark.range(10).repartition('id % 2).filter('id > 5).orderBy('id.desc)
val cp = ds.checkpoint(eager)
val logicalRDD = cp.logicalPlan match {
case plan: LogicalRDD => plan
case _ =>
val treeString = cp.logicalPlan.treeString(verbose = true)
fail(s"Expecting a LogicalRDD, but got\n$treeString")
}
val dsPhysicalPlan = ds.queryExecution.executedPlan
val cpPhysicalPlan = cp.queryExecution.executedPlan
assertResult(dsPhysicalPlan.outputPartitioning) { logicalRDD.outputPartitioning }
assertResult(dsPhysicalPlan.outputOrdering) { logicalRDD.outputOrdering }
assertResult(dsPhysicalPlan.outputPartitioning) { cpPhysicalPlan.outputPartitioning }
assertResult(dsPhysicalPlan.outputOrdering) { cpPhysicalPlan.outputOrdering }
// For a lazy checkpoint() call, the first check also materializes the checkpoint.
checkDataset(cp, (9L to 6L by -1L).map(java.lang.Long.valueOf): _*)
// Reads back from checkpointed data and check again.
checkDataset(cp, (9L to 6L by -1L).map(java.lang.Long.valueOf): _*)
}
testCheckpointing("should preserve partitioning information") {
val ds = spark.range(10).repartition('id % 2)
val cp = ds.checkpoint(eager)
val agg = cp.groupBy('id % 2).agg(count('id))
agg.queryExecution.executedPlan.collectFirst {
case ShuffleExchange(_, _: RDDScanExec, _) =>
case BroadcastExchangeExec(_, _: RDDScanExec) =>
}.foreach { _ =>
fail(
"No Exchange should be inserted above RDDScanExec since the checkpointed Dataset " +
"preserves partitioning information:\n\n" + agg.queryExecution
)
}
checkAnswer(agg, ds.groupBy('id % 2).agg(count('id)))
}
}
test("identity map for primitive arrays") {
val arrayByte = Array(1.toByte, 2.toByte, 3.toByte)
val arrayInt = Array(1, 2, 3)
val arrayLong = Array(1.toLong, 2.toLong, 3.toLong)
val arrayDouble = Array(1.1, 2.2, 3.3)
val arrayString = Array("a", "b", "c")
val dsByte = sparkContext.parallelize(Seq(arrayByte), 1).toDS.map(e => e)
val dsInt = sparkContext.parallelize(Seq(arrayInt), 1).toDS.map(e => e)
val dsLong = sparkContext.parallelize(Seq(arrayLong), 1).toDS.map(e => e)
val dsDouble = sparkContext.parallelize(Seq(arrayDouble), 1).toDS.map(e => e)
val dsString = sparkContext.parallelize(Seq(arrayString), 1).toDS.map(e => e)
checkDataset(dsByte, arrayByte)
checkDataset(dsInt, arrayInt)
checkDataset(dsLong, arrayLong)
checkDataset(dsDouble, arrayDouble)
checkDataset(dsString, arrayString)
}
test("SPARK-18251: the type of Dataset can't be Option of Product type") {
checkDataset(Seq(Some(1), None).toDS(), Some(1), None)
val e = intercept[UnsupportedOperationException] {
Seq(Some(1 -> "a"), None).toDS()
}
assert(e.getMessage.contains("Cannot create encoder for Option of Product type"))
}
test ("SPARK-17460: the sizeInBytes in Statistics shouldn't overflow to a negative number") {
// Since the sizeInBytes in Statistics could exceed the limit of an Int, we should use BigInt
// instead of Int for avoiding possible overflow.
val ds = (0 to 10000).map( i =>
(i, Seq((i, Seq((i, "This is really not that long of a string")))))).toDS()
val sizeInBytes = ds.logicalPlan.statistics.sizeInBytes
// sizeInBytes is 2404280404, before the fix, it overflows to a negative number
assert(sizeInBytes > 0)
}
test("SPARK-18717: code generation works for both scala.collection.Map" +
" and scala.collection.imutable.Map") {
val ds = Seq(WithImmutableMap("hi", Map(42L -> "foo"))).toDS
checkDataset(ds.map(t => t), WithImmutableMap("hi", Map(42L -> "foo")))
val ds2 = Seq(WithMap("hi", Map(42L -> "foo"))).toDS
checkDataset(ds2.map(t => t), WithMap("hi", Map(42L -> "foo")))
}
test("SPARK-20125: option of map") {
val ds = Seq(WithMapInOption(Some(Map(1 -> 1)))).toDS()
checkDataset(ds, WithMapInOption(Some(Map(1 -> 1))))
}
}
case class WithImmutableMap(id: String, map_test: scala.collection.immutable.Map[Long, String])
case class WithMap(id: String, map_test: scala.collection.Map[Long, String])
case class WithMapInOption(m: Option[scala.collection.Map[Int, Int]])
case class Generic[T](id: T, value: Double)
case class OtherTuple(_1: String, _2: Int)
case class TupleClass(data: (Int, String))
class OuterClass extends Serializable {
case class InnerClass(a: String)
}
object OuterObject {
case class InnerClass(a: String)
}
case class ClassData(a: String, b: Int)
case class ClassData2(c: String, d: Int)
case class ClassNullableData(a: String, b: Integer)
case class NestedStruct(f: ClassData)
case class DeepNestedStruct(f: NestedStruct)
case class InvalidInJava(`abstract`: Int)
/**
* A class used to test serialization using encoders. This class throws exceptions when using
* Java serialization -- so the only way it can be "serialized" is through our encoders.
*/
case class NonSerializableCaseClass(value: String) extends Externalizable {
override def readExternal(in: ObjectInput): Unit = {
throw new UnsupportedOperationException
}
override def writeExternal(out: ObjectOutput): Unit = {
throw new UnsupportedOperationException
}
}
/** Used to test Kryo encoder. */
class KryoData(val a: Int) {
override def equals(other: Any): Boolean = {
a == other.asInstanceOf[KryoData].a
}
override def hashCode: Int = a
override def toString: String = s"KryoData($a)"
}
object KryoData {
def apply(a: Int): KryoData = new KryoData(a)
}
/** Used to test Java encoder. */
class JavaData(val a: Int) extends Serializable {
override def equals(other: Any): Boolean = {
a == other.asInstanceOf[JavaData].a
}
override def hashCode: Int = a
override def toString: String = s"JavaData($a)"
}
object JavaData {
def apply(a: Int): JavaData = new JavaData(a)
}
/** Used to test importing dataset.spark.implicits._ */
object DatasetTransform {
def addOne(ds: Dataset[Int]): Dataset[Int] = {
import ds.sparkSession.implicits._
ds.map(_ + 1)
}
}
case class Route(src: String, dest: String, cost: Int)
case class GroupedRoutes(src: String, dest: String, routes: Seq[Route])
| u2009cf/spark-radar | sql/core/src/test/scala/org/apache/spark/sql/DatasetSuite.scala | Scala | apache-2.0 | 39,080 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.TensorModule
import com.intel.analytics.bigdl.tensor.{DenseTensorApply, Tensor, TensorFunc6}
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* This is a transfer layer which applies the hard shrinkage function
* element-wise to the input Tensor. The parameter lambda is set to 0.5
* by default
* ⎧ x, if x > lambda
* f(x) = ⎨ x, if x < -lambda
* ⎩ 0, otherwise
*
* @param lambda: a threshold value whose default value is 0.5
*/
@SerialVersionUID( 3551967457354343585L)
class HardShrink[T: ClassTag](lambda: Double = 0.5)
(implicit ev: TensorNumeric[T])
extends TensorModule[T] {
private val lam = ev.fromType[Double](lambda)
override def updateOutput(input: Tensor[T]): Tensor[T] = {
output.resizeAs(input)
output.map(input, (out, in) => {
if (ev.isGreater(in, lam) || ev.isGreater(ev.negative(lam), in)) {
in
} else {
ev.fromType[Int](0)
}
})
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
require(input.isSameSizeAs(gradOutput),
"Input should have the same size as gradOutput")
gradInput.resizeAs(input)
val func = new TensorFunc6[T] {
override def apply(data1: Array[T], offset1: Int, data2: Array[T],
offset2: Int, data3: Array[T], offset3: Int): Unit = {
if (ev.isGreater(data3(offset3), lam)
|| ev.isGreater(ev.negative(lam), data3(offset3))) {
data1(offset1) = data2(offset2)
} else {
data1(offset1) = ev.fromType[Double](0)
}
}
}
DenseTensorApply.apply3[T](gradInput, gradOutput, input, func)
gradInput
}
}
object HardShrink {
def apply[@specialized(Float, Double) T: ClassTag](
lambda: Double = 0.5)(implicit ev: TensorNumeric[T]) : HardShrink[T] = {
new HardShrink[T]()
}
}
| psyyz10/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/HardShrink.scala | Scala | apache-2.0 | 2,582 |
/**
* Copyright (c) 2007-2011 Eric Torreborre <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
* documentation files (the "Software"), to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all copies or substantial portions of
* the Software. Neither the name of specs nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
* TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
* CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package org.specs.xml
import org.specs.collection.ExtendedIterable._
import org.specs.xml.NodeFunctions._
import scala.xml._
/**
* This class adds more methods to the NodeSeq class
*/
class ExtendedNodeSeq(ns: NodeSeq) {
def ==/(n: NodeSeq): Boolean = NodeFunctions.isEqualIgnoringSpace(ns, n)
def isEqualIgnoringSpace(n: NodeSeq): Boolean = NodeFunctions.isEqualIgnoringSpace(ns, n)
def isEqualIgnoringSpaceOrdered(n: NodeSeq): Boolean = NodeFunctions.isEqualIgnoringSpaceOrdered(ns, n)
}
/**
* This class adds more methods to the Node class
*/
class ExtendedNode(n: Node) {
/**
* @returns true if the Node represents some empty text (containing spaces or newlines)
*/
def isSpaceNode: Boolean = NodeFunctions.isSpaceNode(n)
}
/**
* This object provides implicit methods to extend Node and NodeSeq objects
*/
object ExtendedNode {
implicit def toExtendedNodeSeq(n: NodeSeq) = new ExtendedNodeSeq(n)
implicit def toExtendedNode(n: Node) = new ExtendedNode(n)
}
/**
* This object provides useful functions for Nodes and NodeSeqs
*/
object NodeFunctions {
/**
* @returns true if the Node represents some empty text (containing spaces or newlines)
*/
def isSpaceNode(n1: Node): Boolean = n1 match {
case g: Group => false
case _ => n1.label.equals("#PCDATA") && n1.text.matches("\\\\s*")
}
/**
* Alias for isEqualIgnoringSpace
*/
def ==/(node: NodeSeq, n: NodeSeq): Boolean = isEqualIgnoringSpace(node, n)
/**
* @returns true if the Node represents some empty text (containing spaces or newlines)
*/
def isEqualIgnoringSpaceOrdered(node: NodeSeq, n: NodeSeq): Boolean = {
def sameOrder(nodes1: NodeSeq, nodes2: NodeSeq) = nodes1.isSimilar(nodes2, isEqualIgnoringSpace _)
isEqualIgnoringSpace(node, n, sameOrder(_, _))
}
/**
* Generic version of This version don't check if the nodes are in the same order
* @returns true if the Node represents some empty text (containing spaces or newlines)
*/
def isEqualIgnoringSpace(node: NodeSeq, n: NodeSeq): Boolean = {
def sameAs(nodes1: NodeSeq, nodes2: NodeSeq) = nodes1.sameElementsAs(nodes2.toSeq, isEqualIgnoringSpace _)
isEqualIgnoringSpace(node, n, sameAs(_, _))
}
def isEqualIgnoringSpace(node: NodeSeq, n: NodeSeq, iterableComparison: Function2[NodeSeq, NodeSeq, Boolean]): Boolean = {
(node, n) match {
case (null, other) => other == null
case (other, null) => other == null
case (n1: Text, n2:Text) => n1.text.trim == n2.text.trim
case (n1: Text, n2:Atom[_]) => n1.text.trim == n2.text.trim
case (n1: Atom[_], n2:Text) => n1.text.trim == n2.text.trim
case (n1: Node, n2:Node) => (isSpaceNode(n1) && isSpaceNode(n2)) ||
n1.prefix == n2.prefix &&
attributesSet(n1) == attributesSet(n2) &&
n1.label == n2.label &&
iterableComparison(n1.child.filter(!isSpaceNode(_)), n2.child.filter(!isSpaceNode(_)))
case (n1: NodeSeq, n2: NodeSeq) => iterableComparison(n1.filter(!isSpaceNode(_)), n2.filter(!isSpaceNode(_)))
}
}
/** @return the set of attributes as a set of key/value */
private def attributesSet(n: Node): Set[(String, String)] = n.attributes.toSet.map((n:MetaData) => (n.key, n.value.mkString(",")))
def reduce[T](list: Seq[T], f: T => NodeSeq): NodeSeq = {
if (list.isEmpty)
NodeSeq.Empty
else if (list.size == 1)
f(list(0))
else
f(list(0)) ++ reduce(list.drop(1), f)
}
/** reduce a list with a function and an init NodeSeq value. */
def fold[T](initValue: NodeSeq)(list: Iterable[T], f: T => NodeSeq): NodeSeq = {
list.foldLeft(initValue)( (res, value) => res ++ f(value))
}
} | yyuu/specs | src/main/scala/org/specs/xml/ExtendedNode.scala | Scala | mit | 5,208 |
package com.rcirka.play.dynamodb.dao
import com.rcirka.play.dynamodb.requests.CreateTableRequest
import com.rcirka.play.dynamodb.results.DescribeTableResult
import com.rcirka.play.dynamodb.{DynamoDbWebService, DynamoDBClient}
import play.api.libs.json._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import retry._
import CreateTableRequest._
class GlobalDynamoDao(val client: DynamoDBClient) {
val webService = DynamoDbWebService(client)
// TODO: Move to central location
def listTables() : Future[Seq[String]] = {
webService.post("DynamoDB_20120810.ListTables", Json.obj()).map{ result =>
(result.json \\ "TableNames").as[Seq[String]]
}
}
def deleteTable(tableName: String) : Future[Unit] = {
webService.post("DynamoDB_20120810.DeleteTable", Json.obj("TableName" -> tableName)).map(x => ())
}
def createTable(tableName: String) : Future[Unit] = {
val json = Json.obj(
"KeySchema" -> Json.arr(
Json.obj(
"AttributeName" -> "id",
"KeyType" -> "HASH"
)
),
"AttributeDefinitions" -> Json.arr(
Json.obj(
"AttributeName" -> "id",
"AttributeType" -> "S"
)
),
"ProvisionedThroughput" -> Json.obj(
"ReadCapacityUnits" -> 5,
"WriteCapacityUnits" -> 5
),
"TableName" -> tableName
)
webService.post("DynamoDB_20120810.CreateTable", json).map(x => ())
}
def tableExists(tableName: String) : Future[Boolean] = {
webService.post("DynamoDB_20120810.ListTables", Json.obj("TableName" -> tableName)).map { result =>
val json = result.json.as[JsObject]
(json \\ "TableStatus").asOpt[String].exists(_.contains("ACTIVE", "CREATING", "UPDATING"))
}
}
def createTableIfMissing(tableName: String) : Unit = {
tableExists(tableName).map { result =>
println(s"RESULT - $result")
if (!result) createTable(tableName) else ()
}.onComplete {
case _ => ()
}
}
def createTable(request: CreateTableRequest) : Future[Unit] = {
webService.post("DynamoDB_20120810.CreateTable", Json.toJson(request)).map(x => ())
}
/**
* Future is completed when table status has turned active
* @param tableName
* @return
*/
def createTableOnComplete(request: CreateTableRequest) : Future[Unit] = {
Await.result(createTable(request), 10 seconds)
implicit val success = Success[DescribeTableResult](_.Table.TableStatus.exists(_ == "ACTIVE"))
val policy = retry.Pause(60, 1 second)
val future = policy { () =>
println("--- Waiting for create table ---")
describeTable(request.tableName)
}
val result = Await.result(future, 10 seconds)
Future.successful()
}
def describeTable(tableName: String) : Future[DescribeTableResult] = {
webService.post("DynamoDB_20120810.DescribeTable", Json.obj("TableName" -> tableName)).map(_.json.as[DescribeTableResult])
}
}
| rcirka/Play-DynamoDB | src/main/scala/com/rcirka/play/dynamodb/dao/GlobalDynamoDao.scala | Scala | mit | 3,013 |
package com.softwaremill.bootzooka.dao.sql
import com.softwaremill.bootzooka.dao.DatabaseConfig
import com.typesafe.config.ConfigFactory
object H2BrowserConsole extends App {
val config = new DatabaseConfig {
def rootConfig = ConfigFactory.load()
}
new Thread(new Runnable {
def run() = new org.h2.tools.Console().runTool("-url", SqlDatabase.embeddedConnectionStringFromConfig(config))
}).start()
println("The console is now running in the background.")
}
| umitunal/bootzooka | backend/src/main/scala/com/softwaremill/bootzooka/dao/sql/H2BrowserConsole.scala | Scala | apache-2.0 | 478 |
package filodb.query.exec.aggregator
import filodb.core.query.{MutableRowReader, RangeVector, RangeVectorKey, ResultSchema, TransientHistRow}
import filodb.memory.format.RowReader
object HistSumRowAggregator extends RowAggregator {
import filodb.memory.format.{vectors => bv}
class HistSumHolder(var timestamp: Long = 0L,
var h: bv.MutableHistogram = bv.Histogram.empty) extends AggregateHolder {
val row = new TransientHistRow()
def toRowReader: MutableRowReader = { row.setValues(timestamp, h); row }
def resetToZero(): Unit = h = bv.Histogram.empty
}
type AggHolderType = HistSumHolder
def zero: HistSumHolder = new HistSumHolder
def newRowToMapInto: MutableRowReader = new TransientHistRow()
def map(rvk: RangeVectorKey, item: RowReader, mapInto: MutableRowReader): RowReader = item
def reduceAggregate(acc: HistSumHolder, aggRes: RowReader): HistSumHolder = {
acc.timestamp = aggRes.getLong(0)
val newHist = aggRes.getHistogram(1)
acc.h match {
// sum is mutable histogram, copy to be sure it's our own copy
case hist if hist.numBuckets == 0 => acc.h = bv.MutableHistogram(newHist)
case h if newHist.numBuckets > 0 => acc.h.add(newHist.asInstanceOf[bv.HistogramWithBuckets])
case h =>
}
acc
}
def present(aggRangeVector: RangeVector, limit: Int): Seq[RangeVector] = Seq(aggRangeVector)
def reductionSchema(source: ResultSchema): ResultSchema = source
def presentationSchema(reductionSchema: ResultSchema): ResultSchema = reductionSchema
} | tuplejump/FiloDB | query/src/main/scala/filodb/query/exec/aggregator/HistSumRowAggregator.scala | Scala | apache-2.0 | 1,576 |
package persistent.script
import scala.slick.codegen.SourceCodeGenerator
/**
* Created by yangguo on 14-11-3.
*/
object AutoGen {
def main(args:Array[String]) :Unit={
SourceCodeGenerator.main(Array(
"scala.slick.driver.MySQLDriver",
"com.mysql.jdbc.Driver",
"jdbc:mysql://localhost:3306/crazycat",
"src/main/scala/",
"cn.changhong.persistent.Tables",
"yangguo",
"123456"
))
}
}
| guoyang2011/myfinagle | chcare_back/src/main/scala/cn/changhong/persistent/script/AutoGen.scala | Scala | apache-2.0 | 437 |
// Wei Chen - HDBSCAN Test
// 2016-11-12
import com.scalaml.TestData._
import com.scalaml.general.MatrixFunc._
import com.scalaml.algorithm.HDBSCAN
import org.scalatest.funsuite.AnyFunSuite
class HDBSCANSuite extends AnyFunSuite {
val hdbscan = new HDBSCAN()
test("HDBSCAN Test : Clustering Tiny Data") {
assert(hdbscan.clear())
assert(hdbscan.config(Map("k" -> 2, "limit" -> 2)))
val result = hdbscan.cluster(UNLABELED_TINY_DATA)
assert(arrayequal(result, LABEL_TINY_DATA))
}
test("HDBSCAN Test : Clustering Small Data") {
assert(hdbscan.clear())
assert(hdbscan.config(Map("k" -> 2, "limit" -> 2)))
val result = hdbscan.cluster(UNLABELED_SMALL_DATA)
assert(arrayequal(result, LABEL_SMALL_DATA))
}
test("HDBSCAN Test : Clustering Large Data") {
assert(hdbscan.clear())
assert(hdbscan.config(Map("k" -> 2, "limit" -> 2)))
val result = hdbscan.cluster(UNLABELED_LARGE_DATA)
assert(arrayequal(result, LABEL_LARGE_DATA))
}
test("HDBSCAN Test : Invalid Config") {
assert(hdbscan.clear())
assert(!hdbscan.config(Map("limit" -> "test")))
}
}
| Wei-1/Scala-Machine-Learning | src/test/scala/algorithm/clustering/HDBSCANTest.scala | Scala | mit | 1,189 |
import scala.quoted.*
class A[+X[_], -Y]
class P[T]
class B extends A[P, String]
inline def test(): Unit = ${ testExpr }
def testExpr(using Quotes): Expr[Unit] = {
import quotes.reflect.*
val t = TypeRepr.of[B]
val baseTypes = t.baseClasses.map(b => t.baseType(b))
'{
println(${Expr(baseTypes.map(_.show).mkString("\n"))})
}
}
| dotty-staging/dotty | tests/run-macros/i8514b/Macro_1.scala | Scala | apache-2.0 | 346 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import java.util.Properties
import joptsimple.OptionParser
import kafka.utils.Implicits._
import kafka.server.{KafkaServer, KafkaServerStartable}
import kafka.utils.{CommandLineUtils, Exit, Logging}
import org.apache.kafka.common.utils.{Java, LoggingSignalHandler, OperatingSystem, Utils}
import scala.jdk.CollectionConverters._
object Kafka extends Logging {
def getPropsFromArgs(args: Array[String]): Properties = {
val optionParser = new OptionParser(false)
val overrideOpt = optionParser.accepts("override", "Optional property that should override values set in server.properties file")
.withRequiredArg()
.ofType(classOf[String])
// This is just to make the parameter show up in the help output, we are not actually using this due the
// fact that this class ignores the first parameter which is interpreted as positional and mandatory
// but would not be mandatory if --version is specified
// This is a bit of an ugly crutch till we get a chance to rework the entire command line parsing
optionParser.accepts("version", "Print version information and exit.")
if (args.length == 0 || args.contains("--help")) {
CommandLineUtils.printUsageAndDie(optionParser, "USAGE: java [options] %s server.properties [--override property=value]*".format(classOf[KafkaServer].getSimpleName()))
}
if (args.contains("--version")) {
CommandLineUtils.printVersionAndDie()
}
val props = Utils.loadProps(args(0))
if (args.length > 1) {
val options = optionParser.parse(args.slice(1, args.length): _*)
if (options.nonOptionArguments().size() > 0) {
CommandLineUtils.printUsageAndDie(optionParser, "Found non argument parameters: " + options.nonOptionArguments().toArray.mkString(","))
}
props ++= CommandLineUtils.parseKeyValueArgs(options.valuesOf(overrideOpt).asScala)
}
props
}
def main(args: Array[String]): Unit = {
try {
val serverProps = getPropsFromArgs(args)
val kafkaServerStartable = KafkaServerStartable.fromProps(serverProps)
try {
if (!OperatingSystem.IS_WINDOWS && !Java.isIbmJdk)
new LoggingSignalHandler().register()
} catch {
case e: ReflectiveOperationException =>
warn("Failed to register optional signal handler that logs a message when the process is terminated " +
s"by a signal. Reason for registration failure is: $e", e)
}
// attach shutdown handler to catch terminating signals as well as normal termination
Exit.addShutdownHook("kafka-shutdown-hook", kafkaServerStartable.shutdown)
kafkaServerStartable.startup()
kafkaServerStartable.awaitShutdown()
}
catch {
case e: Throwable =>
fatal("Exiting Kafka due to fatal exception", e)
Exit.exit(1)
}
Exit.exit(0)
}
}
| sslavic/kafka | core/src/main/scala/kafka/Kafka.scala | Scala | apache-2.0 | 3,675 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.mllib
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.tree.{DecisionTree, impurity}
import org.apache.spark.mllib.tree.configuration.{Algo, Strategy}
import org.apache.spark.mllib.tree.configuration.Algo._
import org.apache.spark.mllib.tree.model.DecisionTreeModel
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
/**
* An example runner for decision tree. Run with
* {{{
* ./bin/spark-example org.apache.spark.examples.mllib.DecisionTreeRunner [options]
* }}}
* If you use it as a template to create your own app, please use `spark-submit` to submit your app.
*/
object DecisionTreeRunner {
object ImpurityType extends Enumeration {
type ImpurityType = Value
val Gini, Entropy, Variance = Value
}
import ImpurityType._
case class Params(
input: String = null,
algo: Algo = Classification,
maxDepth: Int = 5,
impurity: ImpurityType = Gini,
maxBins: Int = 100)
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("DecisionTreeRunner") {
head("DecisionTreeRunner: an example decision tree app.")
opt[String]("algo")
.text(s"algorithm (${Algo.values.mkString(",")}), default: ${defaultParams.algo}")
.action((x, c) => c.copy(algo = Algo.withName(x)))
opt[String]("impurity")
.text(s"impurity type (${ImpurityType.values.mkString(",")}), " +
s"default: ${defaultParams.impurity}")
.action((x, c) => c.copy(impurity = ImpurityType.withName(x)))
opt[Int]("maxDepth")
.text(s"max depth of the tree, default: ${defaultParams.maxDepth}")
.action((x, c) => c.copy(maxDepth = x))
opt[Int]("maxBins")
.text(s"max number of bins, default: ${defaultParams.maxBins}")
.action((x, c) => c.copy(maxBins = x))
arg[String]("<input>")
.text("input paths to labeled examples in dense format (label,f0 f1 f2 ...)")
.required()
.action((x, c) => c.copy(input = x))
checkConfig { params =>
if (params.algo == Classification &&
(params.impurity == Gini || params.impurity == Entropy)) {
success
} else if (params.algo == Regression && params.impurity == Variance) {
success
} else {
failure(s"Algo ${params.algo} is not compatible with impurity ${params.impurity}.")
}
}
}
parser.parse(args, defaultParams).map { params =>
run(params)
}.getOrElse {
sys.exit(1)
}
}
def run(params: Params) {
val conf = new SparkConf().setAppName("DecisionTreeRunner")
val sc = new SparkContext(conf)
// Load training data and cache it.
val examples = MLUtils.loadLabeledData(sc, params.input).cache()
val splits = examples.randomSplit(Array(0.8, 0.2))
val training = splits(0).cache()
val test = splits(1).cache()
val numTraining = training.count()
val numTest = test.count()
println(s"numTraining = $numTraining, numTest = $numTest.")
examples.unpersist(blocking = false)
val impurityCalculator = params.impurity match {
case Gini => impurity.Gini
case Entropy => impurity.Entropy
case Variance => impurity.Variance
}
val strategy = new Strategy(params.algo, impurityCalculator, params.maxDepth, params.maxBins)
val model = DecisionTree.train(training, strategy)
if (params.algo == Classification) {
val accuracy = accuracyScore(model, test)
println(s"Test accuracy = $accuracy.")
}
if (params.algo == Regression) {
val mse = meanSquaredError(model, test)
println(s"Test mean squared error = $mse.")
}
sc.stop()
}
/**
* Calculates the classifier accuracy.
*/
private def accuracyScore(
model: DecisionTreeModel,
data: RDD[LabeledPoint],
threshold: Double = 0.5): Double = {
def predictedValue(features: Vector): Double = {
if (model.predict(features) < threshold) 0.0 else 1.0
}
val correctCount = data.filter(y => predictedValue(y.features) == y.label).count()
val count = data.count()
correctCount.toDouble / count
}
/**
* Calculates the mean squared error for regression.
*/
private def meanSquaredError(tree: DecisionTreeModel, data: RDD[LabeledPoint]): Double = {
data.map { y =>
val err = tree.predict(y.features) - y.label
err * err
}.mean()
}
}
| adobe-research/spark-cluster-deployment | initial-deployment-puppet/modules/spark/files/spark/examples/src/main/scala/org/apache/spark/examples/mllib/DecisionTreeRunner.scala | Scala | apache-2.0 | 5,464 |
package xyz.ariwaranosai.tidori.dom
import org.scalajs.dom.html.{BR, Span}
import scalatags.JsDom.all._
/**
* Created by ariwaranosai on 2017/1/9.
*
*/
object DomElements {
val cursor: Span = span(`class`:="typed_cursor")("|").render
def htmlBR: BR = br().render
}
| ariwaranosai/tidori | src/main/scala/xyz/ariwaranosai/tidori/dom/DomElements.scala | Scala | mit | 279 |
package commons.validations.constraints
case object PrefixOrSuffixWithWhiteSpaces extends Constraint
| Dasiu/play-framework-test-project | app/commons/validations/constraints/PrefixOrSuffixWithWhiteSpaces.scala | Scala | mit | 102 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd.
* All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache
* License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied.
*
* See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
// TODO make this a package object
package com.snowplowanalytics.snowplow.storage.kinesis
// Amazon
import com.amazonaws.services.kinesis.connectors.elasticsearch.ElasticsearchObject
// Scalaz
import scalaz._
import Scalaz._
package object elasticsearch {
/**
* The original tab separated enriched event together with
* a validated ElasticsearchObject created from it (or list of errors
* if the creation process failed)
* Can't use NonEmptyList as it isn't serializable
*/
type ValidatedRecord = (String, Validation[List[String], JsonRecord])
type EmitterInput = (String, Validation[List[String], ElasticsearchObject])
}
| mdavid/lessig-bigdata | lib/snowplow/4-storage/kinesis-elasticsearch-sink/src/main/scala/com.snowplowanalytics.snowplow.storage.kinesis/package.scala | Scala | mit | 1,368 |
package chana
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.server.Directives
import akka.stream.ActorMaterializer
import akka.util.Timeout
import chana.rest.RestRouteAkka
import scala.concurrent.duration._
/**
* Chana REST service
*/
object ChanaAkkaHttp extends scala.App {
implicit val system = ActorSystem("ChanaSystem")
implicit val materializer = ActorMaterializer()
implicit val dispatcher = system.dispatcher
val route = Directives.respondWithHeader(RawHeader("Access-Control-Allow-Origin", "*")) {
new ChanaRouteAkka(system).route
}
val webConfig = system.settings.config.getConfig("chana.web")
val source = Http().bind(interface = webConfig.getString("interface"), port = webConfig.getInt("port"))
source.runForeach { conn =>
conn.handleWith(route)
}
}
class ChanaRouteAkka(val system: ActorSystem) extends RestRouteAkka with Directives {
val readTimeout: Timeout = 5.seconds
val writeTimeout: Timeout = 5.seconds
val route = ping ~ restApi
}
| matthewtt/chana | src/main/scala/chana/ChanaAkkaHttp.scala | Scala | apache-2.0 | 1,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.orc
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.ql.io.orc.{OrcFile, Reader}
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
import org.apache.spark.sql.types.StructType
private[orc] object OrcFileOperator extends Logging {
/**
* Retrieves an ORC file reader from a given path. The path can point to either a directory or a
* single ORC file. If it points to a directory, it picks any non-empty ORC file within that
* directory.
*
* The reader returned by this method is mainly used for two purposes:
*
* 1. Retrieving file metadata (schema and compression codecs, etc.)
* 2. Read the actual file content (in this case, the given path should point to the target file)
*
* @note As recorded by SPARK-8501, ORC writes an empty schema (<code>struct<></code) to an
* ORC file if the file contains zero rows. This is OK for Hive since the schema of the
* table is managed by metastore. But this becomes a problem when reading ORC files
* directly from HDFS via Spark SQL, because we have to discover the schema from raw ORC
* files. So this method always tries to find an ORC file whose schema is non-empty, and
* create the result reader from that file. If no such file is found, it returns `None`.
* @todo Needs to consider all files when schema evolution is taken into account.
*/
def getFileReader(basePath: String, config: Option[Configuration] = None): Option[Reader] = {
def isWithNonEmptySchema(path: Path, reader: Reader): Boolean = {
reader.getObjectInspector match {
case oi: StructObjectInspector if oi.getAllStructFieldRefs.size() == 0 =>
logInfo(
s"ORC file $path has empty schema, it probably contains no rows. " +
"Trying to read another ORC file to figure out the schema.")
false
case _ => true
}
}
val conf = config.getOrElse(new Configuration)
val fs = {
val hdfsPath = new Path(basePath)
hdfsPath.getFileSystem(conf)
}
listOrcFiles(basePath, conf).iterator.map { path =>
path -> OrcFile.createReader(fs, path)
}.collectFirst {
case (path, reader) if isWithNonEmptySchema(path, reader) => reader
}
}
def readSchema(paths: Seq[String], conf: Option[Configuration]): Option[StructType] = {
// Take the first file where we can open a valid reader if we can find one. Otherwise just
// return None to indicate we can't infer the schema.
paths.flatMap(getFileReader(_, conf)).headOption.map { reader =>
val readerInspector = reader.getObjectInspector.asInstanceOf[StructObjectInspector]
val schema = readerInspector.getTypeName
logDebug(s"Reading schema from file $paths, got Hive schema string: $schema")
CatalystSqlParser.parseDataType(schema).asInstanceOf[StructType]
}
}
def getObjectInspector(
path: String, conf: Option[Configuration]): Option[StructObjectInspector] = {
getFileReader(path, conf).map(_.getObjectInspector.asInstanceOf[StructObjectInspector])
}
def listOrcFiles(pathStr: String, conf: Configuration): Seq[Path] = {
// TODO: Check if the paths coming in are already qualified and simplify.
val origPath = new Path(pathStr)
val fs = origPath.getFileSystem(conf)
val paths = SparkHadoopUtil.get.listLeafStatuses(fs, origPath)
.filterNot(_.isDirectory)
.map(_.getPath)
.filterNot(_.getName.startsWith("_"))
.filterNot(_.getName.startsWith("."))
paths
}
}
| gioenn/xSpark | sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileOperator.scala | Scala | apache-2.0 | 4,599 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import java.io.IOException
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import akka.util.ByteString
import org.specs2.mutable.Specification
import org.specs2.specification.AfterAll
import play.core.test.FakeRequest
import play.api.http.ParserConfiguration
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration.Duration
class RawBodyParserSpec extends Specification with AfterAll {
implicit val system = ActorSystem("raw-body-parser-spec")
implicit val materializer = ActorMaterializer()
def afterAll(): Unit = {
materializer.shutdown()
system.terminate()
}
val config = ParserConfiguration()
val parse = PlayBodyParsers()
def parse(body: ByteString, memoryThreshold: Int = config.maxMemoryBuffer, maxLength: Long = config.maxDiskBuffer)(parser: BodyParser[RawBuffer] = parse.raw(memoryThreshold, maxLength)): Either[Result, RawBuffer] = {
val request = FakeRequest(method = "GET", "/x")
Await.result(parser(request).run(Source.single(body)), Duration.Inf)
}
"Raw Body Parser" should {
"parse a strict body" >> {
val body = ByteString("lorem ipsum")
// Feed a strict element rather than a singleton source, strict element triggers
// fast path with zero materialization.
Await.result(parse.raw.apply(FakeRequest()).run(body), Duration.Inf) must beRight.like {
case rawBuffer => rawBuffer.asBytes() must beSome.like {
case outBytes => outBytes mustEqual body
}
}
}
"parse a simple body" >> {
val body = ByteString("lorem ipsum")
"successfully" in {
parse(body)() must beRight.like {
case rawBuffer => rawBuffer.asBytes() must beSome.like {
case outBytes => outBytes mustEqual body
}
}
}
"using a future" in {
import scala.concurrent.ExecutionContext.Implicits.global
parse(body)(parse.flatten(Future.successful(parse.raw()))) must beRight.like {
case rawBuffer => rawBuffer.asBytes() must beSome.like {
case outBytes =>
outBytes mustEqual body
}
}
}
}
"close the raw buffer after parsing the body" in {
val body = ByteString("lorem ipsum")
parse(body, memoryThreshold = 1)() must beRight.like {
case rawBuffer =>
rawBuffer.push(ByteString("This fails because the stream was closed!")) must throwA[IOException]
}
}
"fail to parse longer than allowed body" in {
val msg = ByteString("lorem ipsum")
parse(msg, maxLength = 1)() must beLeft
}
}
}
| Shenker93/playframework | framework/src/play/src/test/scala/play/api/mvc/RawBodyParserSpec.scala | Scala | apache-2.0 | 2,753 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.json
import java.io._
import java.nio.charset.{Charset, StandardCharsets, UnsupportedCharsetException}
import java.nio.file.Files
import java.sql.{Date, Timestamp}
import java.util.Locale
import com.fasterxml.jackson.core.JsonFactory
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.io.SequenceFile.CompressionType
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.{SparkException, TestUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{functions => F, _}
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.ExternalRDD
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.json.JsonInferSchema.compatibleType
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
class TestFileFilter extends PathFilter {
override def accept(path: Path): Boolean = path.getParent.getName != "p=2"
}
class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
import testImplicits._
def testFile(fileName: String): String = {
Thread.currentThread().getContextClassLoader.getResource(fileName).toString
}
test("Type promotion") {
def checkTypePromotion(expected: Any, actual: Any) {
assert(expected.getClass == actual.getClass,
s"Failed to promote ${actual.getClass} to ${expected.getClass}.")
assert(expected == actual,
s"Promoted value ${actual}(${actual.getClass}) does not equal the expected value " +
s"${expected}(${expected.getClass}).")
}
val factory = new JsonFactory()
def enforceCorrectType(value: Any, dataType: DataType): Any = {
val writer = new StringWriter()
Utils.tryWithResource(factory.createGenerator(writer)) { generator =>
generator.writeObject(value)
generator.flush()
}
val dummyOption = new JSONOptions(Map.empty[String, String], "GMT")
val dummySchema = StructType(Seq.empty)
val parser = new JacksonParser(dummySchema, dummyOption)
Utils.tryWithResource(factory.createParser(writer.toString)) { jsonParser =>
jsonParser.nextToken()
val converter = parser.makeConverter(dataType)
converter.apply(jsonParser)
}
}
val intNumber: Int = 2147483647
checkTypePromotion(intNumber, enforceCorrectType(intNumber, IntegerType))
checkTypePromotion(intNumber.toLong, enforceCorrectType(intNumber, LongType))
checkTypePromotion(intNumber.toDouble, enforceCorrectType(intNumber, DoubleType))
checkTypePromotion(
Decimal(intNumber), enforceCorrectType(intNumber, DecimalType.SYSTEM_DEFAULT))
val longNumber: Long = 9223372036854775807L
checkTypePromotion(longNumber, enforceCorrectType(longNumber, LongType))
checkTypePromotion(longNumber.toDouble, enforceCorrectType(longNumber, DoubleType))
checkTypePromotion(
Decimal(longNumber), enforceCorrectType(longNumber, DecimalType.SYSTEM_DEFAULT))
val doubleNumber: Double = 1.7976931348623157E308d
checkTypePromotion(doubleNumber.toDouble, enforceCorrectType(doubleNumber, DoubleType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber * 1000L)),
enforceCorrectType(intNumber, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(intNumber.toLong * 1000L)),
enforceCorrectType(intNumber.toLong, TimestampType))
val strTime = "2014-09-30 12:34:56"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf(strTime)),
enforceCorrectType(strTime, TimestampType))
val strDate = "2014-10-15"
checkTypePromotion(
DateTimeUtils.fromJavaDate(Date.valueOf(strDate)), enforceCorrectType(strDate, DateType))
val ISO8601Time1 = "1970-01-01T01:00:01.0Z"
val ISO8601Time2 = "1970-01-01T02:00:01-01:00"
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(3601000)),
enforceCorrectType(ISO8601Time1, TimestampType))
checkTypePromotion(DateTimeUtils.fromJavaTimestamp(new Timestamp(10801000)),
enforceCorrectType(ISO8601Time2, TimestampType))
val ISO8601Date = "1970-01-01"
checkTypePromotion(DateTimeUtils.millisToDays(32400000),
enforceCorrectType(ISO8601Date, DateType))
}
test("Get compatible type") {
def checkDataType(t1: DataType, t2: DataType, expected: DataType) {
var actual = compatibleType(t1, t2)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
actual = compatibleType(t2, t1)
assert(actual == expected,
s"Expected $expected as the most general data type for $t1 and $t2, found $actual")
}
// NullType
checkDataType(NullType, BooleanType, BooleanType)
checkDataType(NullType, IntegerType, IntegerType)
checkDataType(NullType, LongType, LongType)
checkDataType(NullType, DoubleType, DoubleType)
checkDataType(NullType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(NullType, StringType, StringType)
checkDataType(NullType, ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(NullType, StructType(Nil), StructType(Nil))
checkDataType(NullType, NullType, NullType)
// BooleanType
checkDataType(BooleanType, BooleanType, BooleanType)
checkDataType(BooleanType, IntegerType, StringType)
checkDataType(BooleanType, LongType, StringType)
checkDataType(BooleanType, DoubleType, StringType)
checkDataType(BooleanType, DecimalType.SYSTEM_DEFAULT, StringType)
checkDataType(BooleanType, StringType, StringType)
checkDataType(BooleanType, ArrayType(IntegerType), StringType)
checkDataType(BooleanType, StructType(Nil), StringType)
// IntegerType
checkDataType(IntegerType, IntegerType, IntegerType)
checkDataType(IntegerType, LongType, LongType)
checkDataType(IntegerType, DoubleType, DoubleType)
checkDataType(IntegerType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(IntegerType, StringType, StringType)
checkDataType(IntegerType, ArrayType(IntegerType), StringType)
checkDataType(IntegerType, StructType(Nil), StringType)
// LongType
checkDataType(LongType, LongType, LongType)
checkDataType(LongType, DoubleType, DoubleType)
checkDataType(LongType, DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT)
checkDataType(LongType, StringType, StringType)
checkDataType(LongType, ArrayType(IntegerType), StringType)
checkDataType(LongType, StructType(Nil), StringType)
// DoubleType
checkDataType(DoubleType, DoubleType, DoubleType)
checkDataType(DoubleType, DecimalType.SYSTEM_DEFAULT, DoubleType)
checkDataType(DoubleType, StringType, StringType)
checkDataType(DoubleType, ArrayType(IntegerType), StringType)
checkDataType(DoubleType, StructType(Nil), StringType)
// DecimalType
checkDataType(DecimalType.SYSTEM_DEFAULT, DecimalType.SYSTEM_DEFAULT,
DecimalType.SYSTEM_DEFAULT)
checkDataType(DecimalType.SYSTEM_DEFAULT, StringType, StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, ArrayType(IntegerType), StringType)
checkDataType(DecimalType.SYSTEM_DEFAULT, StructType(Nil), StringType)
// StringType
checkDataType(StringType, StringType, StringType)
checkDataType(StringType, ArrayType(IntegerType), StringType)
checkDataType(StringType, StructType(Nil), StringType)
// ArrayType
checkDataType(ArrayType(IntegerType), ArrayType(IntegerType), ArrayType(IntegerType))
checkDataType(ArrayType(IntegerType), ArrayType(LongType), ArrayType(LongType))
checkDataType(ArrayType(IntegerType), ArrayType(StringType), ArrayType(StringType))
checkDataType(ArrayType(IntegerType), StructType(Nil), StringType)
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, false), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, true), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType), ArrayType(IntegerType, true))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, false), ArrayType(IntegerType, false))
checkDataType(
ArrayType(IntegerType, false), ArrayType(IntegerType, true), ArrayType(IntegerType, true))
// StructType
checkDataType(StructType(Nil), StructType(Nil), StructType(Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(StructField("f1", IntegerType, true) :: Nil),
StructType(Nil),
StructType(StructField("f1", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil),
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
StructType(
StructField("f2", IntegerType, true) :: Nil),
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil))
checkDataType(
StructType(
StructField("f1", IntegerType, true) :: Nil),
DecimalType.SYSTEM_DEFAULT,
StringType)
}
test("Complex field and type inferring with null in sampling") {
val jsonDF = spark.read.json(jsonNullStruct)
val expectedSchema = StructType(
StructField("headers", StructType(
StructField("Charset", StringType, true) ::
StructField("Host", StringType, true) :: Nil)
, true) ::
StructField("ip", StringType, true) ::
StructField("nullstr", StringType, true):: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select nullstr, headers.Host from jsonTable"),
Seq(Row("", "1.abc.com"), Row("", null), Row("", null), Row(null, null))
)
}
test("Primitive field and type inferring") {
val jsonDF = spark.read.json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Complex field and type inferring") {
val jsonDF = spark.read.json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(DoubleType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(DecimalType(21, 0), true), true) ::
StructField("arrayOfBoolean", ArrayType(BooleanType, true), true) ::
StructField("arrayOfDouble", ArrayType(DoubleType, true), true) ::
StructField("arrayOfInteger", ArrayType(LongType, true), true) ::
StructField("arrayOfLong", ArrayType(LongType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", BooleanType, true) ::
StructField("field2", DecimalType(20, 0), true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(LongType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", 2.1)
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row(true, "str1", null),
Row(false, null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"),
Row(5, null)
)
}
test("GetField operation on complex data type") {
val jsonDF = spark.read.json(complexFieldAndType1)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
// Getting all values of a specific field from an array of structs.
checkAnswer(
sql("select arrayOfStruct.field1, arrayOfStruct.field2 from jsonTable"),
Row(Seq(true, false, null), Seq("str1", null, null))
)
}
test("Type conflict in primitive field values") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("num_bool", StringType, true) ::
StructField("num_num_1", LongType, true) ::
StructField("num_num_2", DoubleType, true) ::
StructField("num_num_3", DoubleType, true) ::
StructField("num_str", StringType, true) ::
StructField("str_bool", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("true", 11L, null, 1.1, "13.1", "str1") ::
Row("12", null, 21474836470.9, null, null, "true") ::
Row("false", 21474836470L, 92233720368547758070d, 100, "str1", "false") ::
Row(null, 21474836570L, 1.1, 21474836470L, "92233720368547758070", null) :: Nil
)
// Number and Boolean conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_bool - 10 from jsonTable where num_bool > 11"),
Row(2)
)
// Widening to LongType
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 11"),
Row(21474836370L) :: Row(21474836470L) :: Nil
)
checkAnswer(
sql("select num_num_1 - 100 from jsonTable where num_num_1 > 10"),
Row(-89) :: Row(21474836370L) :: Row(21474836470L) :: Nil
)
// Widening to DecimalType
checkAnswer(
sql("select num_num_2 + 1.3 from jsonTable where num_num_2 > 1.1"),
Row(21474836472.2) ::
Row(92233720368547758071.3) :: Nil
)
// Widening to Double
checkAnswer(
sql("select num_num_3 + 1.2 from jsonTable where num_num_3 > 1.1"),
Row(101.2) :: Row(21474836471.2) :: Nil
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 14d"),
Row(92233720368547758071.2)
)
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str >= 92233720368547758060"),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue)
)
// String and Boolean conflict: resolve the type as string.
checkAnswer(
sql("select * from jsonTable where str_bool = 'str1'"),
Row("true", 11L, null, 1.1, "13.1", "str1")
)
}
ignore("Type conflict in primitive field values (Ignored)") {
val jsonDF = spark.read.json(primitiveFieldValueTypeConflict)
jsonDF.createOrReplaceTempView("jsonTable")
// Right now, the analyzer does not promote strings in a boolean expression.
// Number and Boolean conflict: resolve the type as boolean in this query.
checkAnswer(
sql("select num_bool from jsonTable where NOT num_bool"),
Row(false)
)
checkAnswer(
sql("select str_bool from jsonTable where NOT str_bool"),
Row(false)
)
// Right now, the analyzer does not know that num_bool should be treated as a boolean.
// Number and Boolean conflict: resolve the type as boolean in this query.
checkAnswer(
sql("select num_bool from jsonTable where num_bool"),
Row(true)
)
checkAnswer(
sql("select str_bool from jsonTable where str_bool"),
Row(false)
)
// The plan of the following DSL is
// Project [(CAST(num_str#65:4, DoubleType) + 1.2) AS num#78]
// Filter (CAST(CAST(num_str#65:4, DoubleType), DecimalType) > 92233720368547758060)
// ExistingRdd [num_bool#61,num_num_1#62L,num_num_2#63,num_num_3#64,num_str#65,str_bool#66]
// We should directly cast num_str to DecimalType and also need to do the right type promotion
// in the Project.
checkAnswer(
jsonDF.
where('num_str >= BigDecimal("92233720368547758060")).
select(('num_str + 1.2).as("num")),
Row(new java.math.BigDecimal("92233720368547758071.2").doubleValue())
)
// The following test will fail. The type of num_str is StringType.
// So, to evaluate num_str + 1.2, we first need to use Cast to convert the type.
// In our test data, one value of num_str is 13.1.
// The result of (CAST(num_str#65:4, DoubleType) + 1.2) for this value is 14.299999999999999,
// which is not 14.3.
// Number and String conflict: resolve the type as number in this query.
checkAnswer(
sql("select num_str + 1.2 from jsonTable where num_str > 13"),
Row(BigDecimal("14.3")) :: Row(BigDecimal("92233720368547758071.2")) :: Nil
)
}
test("Type conflict in complex field values") {
val jsonDF = spark.read.json(complexFieldValueTypeConflict)
val expectedSchema = StructType(
StructField("array", ArrayType(LongType, true), true) ::
StructField("num_struct", StringType, true) ::
StructField("str_array", StringType, true) ::
StructField("struct", StructType(
StructField("field", StringType, true) :: Nil), true) ::
StructField("struct_array", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq(), "11", "[1,2,3]", Row(null), "[]") ::
Row(null, """{"field":false}""", null, null, "{}") ::
Row(Seq(4, 5, 6), null, "str", Row(null), "[7,8,9]") ::
Row(Seq(7), "{}", """["str1","str2",33]""", Row("str"), """{"field":true}""") :: Nil
)
}
test("Type conflict in array elements") {
val jsonDF = spark.read.json(arrayElementTypeConflict)
val expectedSchema = StructType(
StructField("array1", ArrayType(StringType, true), true) ::
StructField("array2", ArrayType(StructType(
StructField("field", LongType, true) :: Nil), true), true) ::
StructField("array3", ArrayType(StringType, true), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(Seq("1", "1.1", "true", null, "[]", "{}", "[2,3,4]",
"""{"field":"str"}"""), Seq(Row(214748364700L), Row(1)), null) ::
Row(null, null, Seq("""{"field":"str"}""", """{"field":1}""")) ::
Row(null, null, Seq("1", "2", "3")) :: Nil
)
// Treat an element as a number.
checkAnswer(
sql("select array1[0] + 1 from jsonTable where array1 is not null"),
Row(2)
)
}
test("Handling missing fields") {
val jsonDF = spark.read.json(missingFields)
val expectedSchema = StructType(
StructField("a", BooleanType, true) ::
StructField("b", LongType, true) ::
StructField("c", ArrayType(LongType, true), true) ::
StructField("d", StructType(
StructField("field", BooleanType, true) :: Nil), true) ::
StructField("e", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
}
test("Loading a JSON dataset from a text file") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Loading a JSON dataset primitivesAsString returns schema with primitive types as strings") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.option("primitivesAsString", "true").json(path)
val expectedSchema = StructType(
StructField("bigInteger", StringType, true) ::
StructField("boolean", StringType, true) ::
StructField("double", StringType, true) ::
StructField("integer", StringType, true) ::
StructField("long", StringType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row("92233720368547758070",
"true",
"1.7976931348623157E308",
"10",
"21474836470",
null,
"this is a simple string.")
)
}
test("Loading a JSON dataset primitivesAsString returns complex fields as strings") {
val jsonDF = spark.read.option("primitivesAsString", "true").json(complexFieldAndType1)
val expectedSchema = StructType(
StructField("arrayOfArray1", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfArray2", ArrayType(ArrayType(StringType, true), true), true) ::
StructField("arrayOfBigInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfBoolean", ArrayType(StringType, true), true) ::
StructField("arrayOfDouble", ArrayType(StringType, true), true) ::
StructField("arrayOfInteger", ArrayType(StringType, true), true) ::
StructField("arrayOfLong", ArrayType(StringType, true), true) ::
StructField("arrayOfNull", ArrayType(StringType, true), true) ::
StructField("arrayOfString", ArrayType(StringType, true), true) ::
StructField("arrayOfStruct", ArrayType(
StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) ::
StructField("field3", StringType, true) :: Nil), true), true) ::
StructField("struct", StructType(
StructField("field1", StringType, true) ::
StructField("field2", StringType, true) :: Nil), true) ::
StructField("structWithArrayFields", StructType(
StructField("field1", ArrayType(StringType, true), true) ::
StructField("field2", ArrayType(StringType, true), true) :: Nil), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from jsonTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from jsonTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] from jsonTable"),
Row("922337203685477580700", "-922337203685477580800", null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from jsonTable"),
Row(Seq("1", "2", "3"), Seq("1.1", "2.1", "3.1"))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from jsonTable"),
Row("str2", "2.1")
)
// Access elements of an array of structs.
checkAnswer(
sql("select arrayOfStruct[0], arrayOfStruct[1], arrayOfStruct[2], arrayOfStruct[3] " +
"from jsonTable"),
Row(
Row("true", "str1", null),
Row("false", null, null),
Row(null, null, null),
null)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from jsonTable"),
Row(
Row("true", "92233720368547758070"),
"true",
"92233720368547758070") :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from jsonTable"),
Row(Seq("4", "5", "6"), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] from jsonTable"),
Row("5", null)
)
}
test("Loading a JSON dataset prefersDecimal returns schema with float types as BigDecimal") {
val jsonDF = spark.read.option("prefersDecimal", "true").json(primitiveFieldAndType)
val expectedSchema = StructType(
StructField("bigInteger", DecimalType(20, 0), true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DecimalType(17, -292), true) ::
StructField("integer", LongType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select * from jsonTable"),
Row(BigDecimal("92233720368547758070"),
true,
BigDecimal("1.7976931348623157E308"),
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Find compatible types even if inferred DecimalType is not capable of other IntegralType") {
val mixedIntegerAndDoubleRecords = Seq(
"""{"a": 3, "b": 1.1}""",
s"""{"a": 3.1, "b": 0.${"0" * 38}1}""").toDS()
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(mixedIntegerAndDoubleRecords)
// The values in `a` field will be decimals as they fit in decimal. For `b` field,
// they will be doubles as `1.0E-39D` does not fit.
val expectedSchema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DoubleType, true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(
jsonDF,
Row(BigDecimal("3"), 1.1D) ::
Row(BigDecimal("3.1"), 1.0E-39D) :: Nil
)
}
test("Infer big integers correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.json(bigIntegerRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `92233720368547758070`.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(20, 0), true) :: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E38D, BigDecimal("92233720368547758070")))
}
test("Infer floating-point values correctly even when it does not fit in decimal") {
val jsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords)
// The value in `a` field will be a double as it does not fit in decimal. For `b` field,
// it will be a decimal as `0.01` by having a precision equal to the scale.
val expectedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(2, 2), true):: Nil)
assert(expectedSchema === jsonDF.schema)
checkAnswer(jsonDF, Row(1.0E-39D, BigDecimal("0.01")))
val mergedJsonDF = spark.read
.option("prefersDecimal", "true")
.json(floatingValueRecords.union(bigIntegerRecords))
val expectedMergedSchema = StructType(
StructField("a", DoubleType, true) ::
StructField("b", DecimalType(22, 2), true):: Nil)
assert(expectedMergedSchema === mergedJsonDF.schema)
checkAnswer(
mergedJsonDF,
Row(1.0E-39D, BigDecimal("0.01")) ::
Row(1.0E38D, BigDecimal("92233720368547758070")) :: Nil
)
}
test("Loading a JSON dataset from a text file with SQL") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.toURI.toString
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
sql(
s"""
|CREATE TEMPORARY VIEW jsonTableSQL
|USING org.apache.spark.sql.json
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(
sql("select * from jsonTableSQL"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas") {
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema = StructType(
StructField("bigInteger", DecimalType.SYSTEM_DEFAULT, true) ::
StructField("boolean", BooleanType, true) ::
StructField("double", DoubleType, true) ::
StructField("integer", IntegerType, true) ::
StructField("long", LongType, true) ::
StructField("null", StringType, true) ::
StructField("string", StringType, true) :: Nil)
val jsonDF1 = spark.read.schema(schema).json(path)
assert(schema === jsonDF1.schema)
jsonDF1.createOrReplaceTempView("jsonTable1")
checkAnswer(
sql("select * from jsonTable1"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
val jsonDF2 = spark.read.schema(schema).json(primitiveFieldAndType)
assert(schema === jsonDF2.schema)
jsonDF2.createOrReplaceTempView("jsonTable2")
checkAnswer(
sql("select * from jsonTable2"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
null,
"this is a simple string.")
)
}
test("Applying schemas with MapType") {
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val jsonWithSimpleMap = spark.read.schema(schemaWithSimpleMap).json(mapType1)
jsonWithSimpleMap.createOrReplaceTempView("jsonWithSimpleMap")
checkAnswer(
sql("select `map` from jsonWithSimpleMap"),
Row(Map("a" -> 1)) ::
Row(Map("b" -> 2)) ::
Row(Map("c" -> 3)) ::
Row(Map("c" -> 1, "d" -> 4)) ::
Row(Map("e" -> null)) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['c'] from jsonWithSimpleMap"),
Row(null) ::
Row(null) ::
Row(3) ::
Row(1) ::
Row(null) :: Nil
)
}
val innerStruct = StructType(
StructField("field1", ArrayType(IntegerType, true), true) ::
StructField("field2", IntegerType, true) :: Nil)
val schemaWithComplexMap = StructType(
StructField("map", MapType(StringType, innerStruct, true), false) :: Nil)
val jsonWithComplexMap = spark.read.schema(schemaWithComplexMap).json(mapType2)
jsonWithComplexMap.createOrReplaceTempView("jsonWithComplexMap")
checkAnswer(
sql("select `map` from jsonWithComplexMap"),
Row(Map("a" -> Row(Seq(1, 2, 3, null), null))) ::
Row(Map("b" -> Row(null, 2))) ::
Row(Map("c" -> Row(Seq(), 4))) ::
Row(Map("c" -> Row(null, 3), "d" -> Row(Seq(null), null))) ::
Row(Map("e" -> null)) ::
Row(Map("f" -> Row(null, null))) :: Nil
)
withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
checkAnswer(
sql("select `map`['a'].field1, `map`['c'].field2 from jsonWithComplexMap"),
Row(Seq(1, 2, 3, null), null) ::
Row(null, null) ::
Row(null, 4) ::
Row(null, 3) ::
Row(null, null) ::
Row(null, null) :: Nil
)
}
}
test("SPARK-2096 Correctly parse dot notations") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select arrayOfStruct[0].field1, arrayOfStruct[0].field2 from jsonTable"),
Row(true, "str1")
)
checkAnswer(
sql(
"""
|select complexArrayOfStruct[0].field1[1].inner2[0], complexArrayOfStruct[1].field2[0][1]
|from jsonTable
""".stripMargin),
Row("str2", 6)
)
}
test("SPARK-3390 Complex arrays") {
val jsonDF = spark.read.json(complexFieldAndType2)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select arrayOfArray1[0][0][0], arrayOfArray1[1][0][1], arrayOfArray1[1][1][0]
|from jsonTable
""".stripMargin),
Row(5, 7, 8)
)
checkAnswer(
sql(
"""
|select arrayOfArray2[0][0][0].inner1, arrayOfArray2[1][0],
|arrayOfArray2[1][1][1].inner2[0], arrayOfArray2[2][0][0].inner3[0][0].inner4
|from jsonTable
""".stripMargin),
Row("str1", Nil, "str4", 2)
)
}
test("SPARK-3308 Read top level JSON arrays") {
val jsonDF = spark.read.json(jsonArray)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql(
"""
|select a, b, c
|from jsonTable
""".stripMargin),
Row("str_a_1", null, null) ::
Row("str_a_2", null, null) ::
Row(null, "str_b_3", null) ::
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
}
test("Corrupt records: FAILFAST mode") {
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.json(corruptRecords)
}.getMessage
assert(exceptionOne.contains(
"Malformed records are detected in schema inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema("a string")
.json(corruptRecords)
.collect()
}.getMessage
assert(exceptionTwo.contains(
"Malformed records are detected in record parsing. Parse Mode: FAILFAST."))
}
test("Corrupt records: DROPMALFORMED mode") {
val schemaOne = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val schemaTwo = StructType(
StructField("a", StringType, true) :: Nil)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDFOne = spark.read
.option("mode", "DROPMALFORMED")
.json(corruptRecords)
checkAnswer(
jsonDFOne,
Row("str_a_4", "str_b_4", "str_c_4") :: Nil
)
assert(jsonDFOne.schema === schemaOne)
val jsonDFTwo = spark.read
.option("mode", "DROPMALFORMED")
.schema(schemaTwo)
.json(corruptRecords)
checkAnswer(
jsonDFTwo,
Row("str_a_4") :: Nil)
assert(jsonDFTwo.schema === schemaTwo)
}
test("SPARK-19641: Additional corrupt records: DROPMALFORMED mode") {
val schema = new StructType().add("dummy", StringType)
// `DROPMALFORMED` mode should skip corrupt records
val jsonDF = spark.read
.option("mode", "DROPMALFORMED")
.json(additionalCorruptRecords)
checkAnswer(
jsonDF,
Row("test"))
assert(jsonDF.schema === schema)
}
test("Corrupt records: PERMISSIVE mode, without designated column for malformed records") {
val schema = StructType(
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val jsonDF = spark.read.schema(schema).json(corruptRecords)
checkAnswer(
jsonDF.select($"a", $"b", $"c"),
Seq(
// Corrupted records are replaced with null
Row(null, null, null),
Row(null, null, null),
Row(null, null, null),
Row("str_a_4", "str_b_4", "str_c_4"),
Row(null, null, null))
)
}
test("Corrupt records: PERMISSIVE mode, with designated column for malformed records") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
val jsonDF = spark.read.json(corruptRecords)
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
jsonDF.select($"a", $"b", $"c", $"_unparsed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
checkAnswer(
jsonDF.filter($"_unparsed".isNull).select($"a", $"b", $"c"),
Row("str_a_4", "str_b_4", "str_c_4")
)
checkAnswer(
jsonDF.filter($"_unparsed".isNotNull).select($"_unparsed"),
Row("{") ::
Row("""{"a":1, b:2}""") ::
Row("""{"a":{, b:3}""") ::
Row("]") :: Nil
)
}
}
test("SPARK-13953 Rename the corrupt record field via option") {
val jsonDF = spark.read
.option("columnNameOfCorruptRecord", "_malformed")
.json(corruptRecords)
val schema = StructType(
StructField("_malformed", StringType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
jsonDF.selectExpr("a", "b", "c", "_malformed"),
Row(null, null, null, "{") ::
Row(null, null, null, """{"a":1, b:2}""") ::
Row(null, null, null, """{"a":{, b:3}""") ::
Row("str_a_4", "str_b_4", "str_c_4", null) ::
Row(null, null, null, "]") :: Nil
)
}
test("SPARK-4068: nulls in arrays") {
val jsonDF = spark.read.json(nullsInArrays)
jsonDF.createOrReplaceTempView("jsonTable")
val schema = StructType(
StructField("field1",
ArrayType(ArrayType(ArrayType(ArrayType(StringType, true), true), true), true), true) ::
StructField("field2",
ArrayType(ArrayType(
StructType(StructField("Test", LongType, true) :: Nil), true), true), true) ::
StructField("field3",
ArrayType(ArrayType(
StructType(StructField("Test", StringType, true) :: Nil), true), true), true) ::
StructField("field4",
ArrayType(ArrayType(ArrayType(LongType, true), true), true), true) :: Nil)
assert(schema === jsonDF.schema)
checkAnswer(
sql(
"""
|SELECT field1, field2, field3, field4
|FROM jsonTable
""".stripMargin),
Row(Seq(Seq(null), Seq(Seq(Seq("Test")))), null, null, null) ::
Row(null, Seq(null, Seq(Row(1))), null, null) ::
Row(null, null, Seq(Seq(null), Seq(Row("2"))), null) ::
Row(null, null, null, Seq(Seq(null, Seq(1, 2, 3)))) :: Nil
)
}
test("SPARK-4228 DataFrame to JSON") {
val schema1 = StructType(
StructField("f1", IntegerType, false) ::
StructField("f2", StringType, false) ::
StructField("f3", BooleanType, false) ::
StructField("f4", ArrayType(StringType), nullable = true) ::
StructField("f5", IntegerType, true) :: Nil)
val rowRDD1 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v5 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(values(0).toInt, values(1), values(2).toBoolean, r.split(",").toList, v5)
}
val df1 = spark.createDataFrame(rowRDD1, schema1)
df1.createOrReplaceTempView("applySchema1")
val df2 = df1.toDF
val result = df2.toJSON.collect()
// scalastyle:off
assert(result(0) === "{\\"f1\\":1,\\"f2\\":\\"A1\\",\\"f3\\":true,\\"f4\\":[\\"1\\",\\" A1\\",\\" true\\",\\" null\\"]}")
assert(result(3) === "{\\"f1\\":4,\\"f2\\":\\"D4\\",\\"f3\\":true,\\"f4\\":[\\"4\\",\\" D4\\",\\" true\\",\\" 2147483644\\"],\\"f5\\":2147483644}")
// scalastyle:on
val schema2 = StructType(
StructField("f1", StructType(
StructField("f11", IntegerType, false) ::
StructField("f12", BooleanType, false) :: Nil), false) ::
StructField("f2", MapType(StringType, IntegerType, true), false) :: Nil)
val rowRDD2 = unparsedStrings.map { r =>
val values = r.split(",").map(_.trim)
val v4 = try values(3).toInt catch {
case _: NumberFormatException => null
}
Row(Row(values(0).toInt, values(2).toBoolean), Map(values(1) -> v4))
}
val df3 = spark.createDataFrame(rowRDD2, schema2)
df3.createOrReplaceTempView("applySchema2")
val df4 = df3.toDF
val result2 = df4.toJSON.collect()
assert(result2(1) === "{\\"f1\\":{\\"f11\\":2,\\"f12\\":false},\\"f2\\":{\\"B2\\":null}}")
assert(result2(3) === "{\\"f1\\":{\\"f11\\":4,\\"f12\\":true},\\"f2\\":{\\"D4\\":2147483644}}")
val jsonDF = spark.read.json(primitiveFieldAndType)
val primTable = spark.read.json(jsonDF.toJSON)
primTable.createOrReplaceTempView("primitiveTable")
checkAnswer(
sql("select * from primitiveTable"),
Row(new java.math.BigDecimal("92233720368547758070"),
true,
1.7976931348623157E308,
10,
21474836470L,
"this is a simple string.")
)
val complexJsonDF = spark.read.json(complexFieldAndType1)
val compTable = spark.read.json(complexJsonDF.toJSON)
compTable.createOrReplaceTempView("complexTable")
// Access elements of a primitive array.
checkAnswer(
sql("select arrayOfString[0], arrayOfString[1], arrayOfString[2] from complexTable"),
Row("str1", "str2", null)
)
// Access an array of null values.
checkAnswer(
sql("select arrayOfNull from complexTable"),
Row(Seq(null, null, null, null))
)
// Access elements of a BigInteger array (we use DecimalType internally).
checkAnswer(
sql("select arrayOfBigInteger[0], arrayOfBigInteger[1], arrayOfBigInteger[2] " +
" from complexTable"),
Row(new java.math.BigDecimal("922337203685477580700"),
new java.math.BigDecimal("-922337203685477580800"), null)
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray1[0], arrayOfArray1[1] from complexTable"),
Row(Seq("1", "2", "3"), Seq("str1", "str2"))
)
// Access elements of an array of arrays.
checkAnswer(
sql("select arrayOfArray2[0], arrayOfArray2[1] from complexTable"),
Row(Seq(1.0, 2.0, 3.0), Seq(1.1, 2.1, 3.1))
)
// Access elements of an array inside a filed with the type of ArrayType(ArrayType).
checkAnswer(
sql("select arrayOfArray1[1][1], arrayOfArray2[1][1] from complexTable"),
Row("str2", 2.1)
)
// Access a struct and fields inside of it.
checkAnswer(
sql("select struct, struct.field1, struct.field2 from complexTable"),
Row(
Row(true, new java.math.BigDecimal("92233720368547758070")),
true,
new java.math.BigDecimal("92233720368547758070")) :: Nil
)
// Access an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1, structWithArrayFields.field2 from complexTable"),
Row(Seq(4, 5, 6), Seq("str1", "str2"))
)
// Access elements of an array field of a struct.
checkAnswer(
sql("select structWithArrayFields.field1[1], structWithArrayFields.field2[3] " +
"from complexTable"),
Row(5, null)
)
}
test("Dataset toJSON doesn't construct rdd") {
val containsRDD = spark.emptyDataFrame.toJSON.queryExecution.logical.find {
case ExternalRDD(_, _) => true
case _ => false
}
assert(containsRDD.isEmpty, "Expected logical plan of toJSON to not contain an RDD")
}
test("JSONRelation equality test") {
withTempPath(dir => {
val path = dir.getCanonicalFile.toURI.toString
sparkContext.parallelize(1 to 100)
.map(i => s"""{"a": 1, "b": "str$i"}""").saveAsTextFile(path)
val d1 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
val d2 = DataSource(
spark,
userSpecifiedSchema = None,
partitionColumns = Array.empty[String],
bucketSpec = None,
className = classOf[JsonFileFormat].getCanonicalName,
options = Map("path" -> path)).resolveRelation()
assert(d1 === d2)
})
}
test("SPARK-6245 JsonInferSchema.infer on empty RDD") {
// This is really a test that it doesn't throw an exception
val emptySchema = JsonInferSchema.infer(
empty.rdd,
new JSONOptions(Map.empty[String, String], "GMT"),
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("SPARK-7565 MapType in JsonRDD") {
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempDir { dir =>
val schemaWithSimpleMap = StructType(
StructField("map", MapType(StringType, IntegerType, true), false) :: Nil)
val df = spark.read.schema(schemaWithSimpleMap).json(mapType1)
val path = dir.getAbsolutePath
df.write.mode("overwrite").parquet(path)
// order of MapType is not defined
assert(spark.read.parquet(path).count() == 5)
val df2 = spark.read.json(corruptRecords)
df2.write.mode("overwrite").parquet(path)
checkAnswer(spark.read.parquet(path), df2.collect())
}
}
}
test("SPARK-8093 Erase empty structs") {
val emptySchema = JsonInferSchema.infer(
emptyRecords.rdd,
new JSONOptions(Map.empty[String, String], "GMT"),
CreateJacksonParser.string)
assert(StructType(Seq()) === emptySchema)
}
test("JSON with Partition") {
def makePartition(rdd: RDD[String], parent: File, partName: String, partValue: Any): File = {
val p = new File(parent, s"$partName=${partValue.toString}")
rdd.saveAsTextFile(p.getCanonicalPath)
p
}
withTempPath(root => {
val d1 = new File(root, "d1=1")
// root/dt=1/col1=abc
val p1_col1 = makePartition(
sparkContext.parallelize(2 to 5).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abc")
// root/dt=1/col1=abd
val p2 = makePartition(
sparkContext.parallelize(6 to 10).map(i => s"""{"a": 1, "b": "str$i"}"""),
d1,
"col1",
"abd")
spark.read.json(root.getAbsolutePath).createOrReplaceTempView("test_myjson_with_part")
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abc'"), Row(4))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1 and col1='abd'"), Row(5))
checkAnswer(sql(
"SELECT count(a) FROM test_myjson_with_part where d1 = 1"), Row(9))
})
}
test("backward compatibility") {
// This test we make sure our JSON support can read JSON data generated by previous version
// of Spark generated through toJSON method and JSON data source.
// The data is generated by the following program.
// Here are a few notes:
// - Spark 1.5.0 cannot save timestamp data. So, we manually added timestamp field (col13)
// in the JSON object.
// - For Spark before 1.5.1, we do not generate UDTs. So, we manually added the UDT value to
// JSON objects generated by those Spark versions (col17).
// - If the type is NullType, we do not write data out.
// Create the schema.
val struct =
StructType(
StructField("f1", FloatType, true) ::
StructField("f2", ArrayType(BooleanType), true) :: Nil)
val dataTypes =
Seq(
StringType, BinaryType, NullType, BooleanType,
ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DecimalType(6, 5),
DateType, TimestampType,
ArrayType(IntegerType), MapType(StringType, LongType), struct,
new UDT.MyDenseVectorUDT())
val fields = dataTypes.zipWithIndex.map { case (dataType, index) =>
StructField(s"col$index", dataType, nullable = true)
}
val schema = StructType(fields)
val constantValues =
Seq(
"a string in binary".getBytes(StandardCharsets.UTF_8),
null,
true,
1.toByte,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75,
new java.math.BigDecimal(s"1234.23456"),
new java.math.BigDecimal(s"1.23456"),
java.sql.Date.valueOf("2015-01-01"),
java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123"),
Seq(2, 3, 4),
Map("a string" -> 2000L),
Row(4.75.toFloat, Seq(false, true)),
new UDT.MyDenseVector(Array(0.25, 2.25, 4.25)))
val data =
Row.fromSeq(Seq("Spark " + spark.sparkContext.version) ++ constantValues) :: Nil
// Data generated by previous versions.
// scalastyle:off
val existingJSONData =
"""{"col0":"Spark 1.2.2","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.3.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.4.1","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"2015-01-01","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" ::
"""{"col0":"Spark 1.5.0","col1":"YSBzdHJpbmcgaW4gYmluYXJ5","col3":true,"col4":1,"col5":2,"col6":3,"col7":9223372036854775807,"col8":0.25,"col9":0.75,"col10":1234.23456,"col11":1.23456,"col12":"16436","col13":"2015-01-01 23:50:59.123","col14":[2,3,4],"col15":{"a string":2000},"col16":{"f1":4.75,"f2":[false,true]},"col17":[0.25,2.25,4.25]}""" :: Nil
// scalastyle:on
// Generate data for the current version.
val df = spark.createDataFrame(spark.sparkContext.parallelize(data, 1), schema)
withTempPath { path =>
df.write.format("json").mode("overwrite").save(path.getCanonicalPath)
// df.toJSON will convert internal rows to external rows first and then generate
// JSON objects. While, df.write.format("json") will write internal rows directly.
val allJSON =
existingJSONData ++
df.toJSON.collect() ++
sparkContext.textFile(path.getCanonicalPath).collect()
Utils.deleteRecursively(path)
sparkContext.parallelize(allJSON, 1).saveAsTextFile(path.getCanonicalPath)
// Read data back with the schema specified.
val col0Values =
Seq(
"Spark 1.2.2",
"Spark 1.3.1",
"Spark 1.3.1",
"Spark 1.4.1",
"Spark 1.4.1",
"Spark 1.5.0",
"Spark 1.5.0",
"Spark " + spark.sparkContext.version,
"Spark " + spark.sparkContext.version)
val expectedResult = col0Values.map { v =>
Row.fromSeq(Seq(v) ++ constantValues)
}
checkAnswer(
spark.read.format("json").schema(schema).load(path.getCanonicalPath),
expectedResult
)
}
}
test("SPARK-11544 test pathfilter") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(2)
df.write.json(path + "/p=1")
df.write.json(path + "/p=2")
assert(spark.read.json(path).count() === 4)
val extraOptions = Map(
"mapred.input.pathFilter.class" -> classOf[TestFileFilter].getName,
"mapreduce.input.pathFilter.class" -> classOf[TestFileFilter].getName
)
assert(spark.read.options(extraOptions).json(path).count() === 2)
}
}
test("SPARK-12057 additional corrupt records do not throw exceptions") {
// Test if we can query corrupt records.
withSQLConf(SQLConf.COLUMN_NAME_OF_CORRUPT_RECORD.key -> "_unparsed") {
withTempView("jsonTable") {
val schema = StructType(
StructField("_unparsed", StringType, true) ::
StructField("dummy", StringType, true) :: Nil)
{
// We need to make sure we can infer the schema.
val jsonDF = spark.read.json(additionalCorruptRecords)
assert(jsonDF.schema === schema)
}
{
val jsonDF = spark.read.schema(schema).json(additionalCorruptRecords)
jsonDF.createOrReplaceTempView("jsonTable")
// In HiveContext, backticks should be used to access columns starting with a underscore.
checkAnswer(
sql(
"""
|SELECT dummy, _unparsed
|FROM jsonTable
""".stripMargin),
Row("test", null) ::
Row(null, """[1,2,3]""") ::
Row(null, """":"test", "a":1}""") ::
Row(null, """42""") ::
Row(null, """ ","ian":"test"}""") :: Nil
)
}
}
}
}
test("Parse JSON rows having an array type and a struct type in the same field.") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
arrayAndStructRecords.map(record => record.replaceAll("\\n", " ")).write.text(path)
val schema =
StructType(
StructField("a", StructType(
StructField("b", StringType) :: Nil
)) :: Nil)
val jsonDF = spark.read.schema(schema).json(path)
assert(jsonDF.count() == 2)
}
}
test("SPARK-12872 Support to specify the option for compression codec") {
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "gZiP")
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("SPARK-13543 Write the output as uncompressed via option()") {
val extraOptions = Map[String, String](
"mapreduce.output.fileoutputformat.compress" -> "true",
"mapreduce.output.fileoutputformat.compress.type" -> CompressionType.BLOCK.toString,
"mapreduce.output.fileoutputformat.compress.codec" -> classOf[GzipCodec].getName,
"mapreduce.map.output.compress" -> "true",
"mapreduce.map.output.compress.codec" -> classOf[GzipCodec].getName
)
withTempDir { dir =>
val dir = Utils.createTempDir()
dir.delete()
val path = dir.getCanonicalPath
primitiveFieldAndType.map(record => record.replaceAll("\\n", " ")).write.text(path)
val jsonDF = spark.read.json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.format("json")
.option("compression", "none")
.options(extraOptions)
.save(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(!_.getName.endsWith(".json.gz")))
val jsonCopy = spark.read
.format("json")
.options(extraOptions)
.load(jsonDir)
assert(jsonCopy.count == jsonDF.count)
val jsonCopySome = jsonCopy.selectExpr("string", "long", "boolean")
val jsonDFSome = jsonDF.selectExpr("string", "long", "boolean")
checkAnswer(jsonCopySome, jsonDFSome)
}
}
test("Casting long as timestamp") {
withTempView("jsonTable") {
val schema = (new StructType).add("ts", TimestampType)
val jsonDF = spark.read.schema(schema).json(timestampAsLong)
jsonDF.createOrReplaceTempView("jsonTable")
checkAnswer(
sql("select ts from jsonTable"),
Row(java.sql.Timestamp.valueOf("2016-01-02 03:04:05"))
)
}
}
test("wide nested json table") {
val nested = (1 to 100).map { i =>
s"""
|"c$i": $i
""".stripMargin
}.mkString(", ")
val json = s"""
|{"a": [{$nested}], "b": [{$nested}]}
""".stripMargin
val df = spark.read.json(Seq(json).toDS())
assert(df.schema.size === 2)
df.collect()
}
test("Write dates correctly with dateFormat option") {
val customSchema = new StructType(Array(StructField("date", DateType, true)))
withTempDir { dir =>
// With dateFormat option.
val datesWithFormatPath = s"${dir.getCanonicalPath}/datesWithFormat.json"
val datesWithFormat = spark.read
.schema(customSchema)
.option("dateFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
datesWithFormat.write
.format("json")
.option("dateFormat", "yyyy/MM/dd")
.save(datesWithFormatPath)
// This will load back the dates as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringDatesWithFormat = spark.read
.schema(stringSchema)
.json(datesWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26"),
Row("2014/10/27"),
Row("2016/01/28"))
checkAnswer(stringDatesWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/26 18:00"),
Row("2014/10/27 18:30"),
Row("2016/01/28 20:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
}
}
test("Write timestamps correctly with timestampFormat option and timeZone option") {
val customSchema = new StructType(Array(StructField("date", TimestampType, true)))
withTempDir { dir =>
// With dateFormat option and timeZone option.
val timestampsWithFormatPath = s"${dir.getCanonicalPath}/timestampsWithFormat.json"
val timestampsWithFormat = spark.read
.schema(customSchema)
.option("timestampFormat", "dd/MM/yyyy HH:mm")
.json(datesRecords)
timestampsWithFormat.write
.format("json")
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.save(timestampsWithFormatPath)
// This will load back the timestamps as string.
val stringSchema = StructType(StructField("date", StringType, true) :: Nil)
val stringTimestampsWithFormat = spark.read
.schema(stringSchema)
.json(timestampsWithFormatPath)
val expectedStringDatesWithFormat = Seq(
Row("2015/08/27 01:00"),
Row("2014/10/28 01:30"),
Row("2016/01/29 04:00"))
checkAnswer(stringTimestampsWithFormat, expectedStringDatesWithFormat)
val readBack = spark.read
.schema(customSchema)
.option("timestampFormat", "yyyy/MM/dd HH:mm")
.option(DateTimeUtils.TIMEZONE_OPTION, "GMT")
.json(timestampsWithFormatPath)
checkAnswer(readBack, timestampsWithFormat)
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val records = Seq("""{"a": 3, "b": 1.1}""", """{"a": 3.1, "b": 0.000001}""").toDS()
val schema = StructType(
StructField("a", DecimalType(21, 1), true) ::
StructField("b", DecimalType(7, 6), true) :: Nil)
val df1 = spark.read.option("prefersDecimal", "true").json(records)
assert(df1.schema == schema)
val df2 = spark.read.option("PREfersdecimaL", "true").json(records)
assert(df2.schema == schema)
}
test("SPARK-18352: Parse normal multi-line JSON files (compressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.option("compression", "GzIp")
.text(path)
assert(new File(path).listFiles().exists(_.getName.endsWith(".gz")))
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write
.option("compression", "gZiP")
.json(jsonDir)
assert(new File(jsonDir).listFiles().exists(_.getName.endsWith(".json.gz")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Parse normal multi-line JSON files (uncompressed)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
primitiveFieldAndType
.toDF("value")
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
val jsonDir = new File(dir, "json").getCanonicalPath
jsonDF.coalesce(1).write.json(jsonDir)
val compressedFiles = new File(jsonDir).listFiles()
assert(compressedFiles.exists(_.getName.endsWith(".json")))
val originalData = spark.read.json(primitiveFieldAndType)
checkAnswer(jsonDF, originalData)
checkAnswer(spark.read.schema(originalData.schema).json(jsonDir), originalData)
}
}
test("SPARK-18352: Expect one JSON document per file") {
// the json parser terminates as soon as it sees a matching END_OBJECT or END_ARRAY token.
// this might not be the optimal behavior but this test verifies that only the first value
// is parsed and the rest are discarded.
// alternatively the parser could continue parsing following objects, which may further reduce
// allocations by skipping the line reader entirely
withTempPath { dir =>
val path = dir.getCanonicalPath
spark
.createDataFrame(Seq(Tuple1("{}{invalid}")))
.coalesce(1)
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).json(path)
// no corrupt record column should be created
assert(jsonDF.schema === StructType(Seq()))
// only the first object should be read
assert(jsonDF.count() === 1)
}
}
test("SPARK-18352: Handle multi-line corrupt documents (PERMISSIVE)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "PERMISSIVE").json(path)
assert(jsonDF.count() === corruptRecordCount)
assert(jsonDF.schema === new StructType()
.add("_corrupt_record", StringType)
.add("dummy", StringType))
val counts = jsonDF
.join(
additionalCorruptRecords.toDF("value"),
F.regexp_replace($"_corrupt_record", "(^\\\\s+|\\\\s+$)", "") === F.trim($"value"),
"outer")
.agg(
F.count($"dummy").as("valid"),
F.count($"_corrupt_record").as("corrupt"),
F.count("*").as("count"))
checkAnswer(counts, Row(1, 4, 6))
}
}
test("SPARK-19641: Handle multi-line corrupt documents (DROPMALFORMED)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val jsonDF = spark.read.option("multiLine", true).option("mode", "DROPMALFORMED").json(path)
checkAnswer(jsonDF, Seq(Row("test")))
}
}
test("SPARK-18352: Handle multi-line corrupt documents (FAILFAST)") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val corruptRecordCount = additionalCorruptRecords.count().toInt
assert(corruptRecordCount === 5)
additionalCorruptRecords
.toDF("value")
// this is the minimum partition count that avoids hash collisions
.repartition(corruptRecordCount * 4, F.hash($"value"))
.write
.text(path)
val schema = new StructType().add("dummy", StringType)
// `FAILFAST` mode should throw an exception for corrupt records.
val exceptionOne = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.json(path)
}
assert(exceptionOne.getMessage.contains("Malformed records are detected in schema " +
"inference. Parse Mode: FAILFAST."))
val exceptionTwo = intercept[SparkException] {
spark.read
.option("multiLine", true)
.option("mode", "FAILFAST")
.schema(schema)
.json(path)
.collect()
}
assert(exceptionTwo.getMessage.contains("Malformed records are detected in record " +
"parsing. Parse Mode: FAILFAST."))
}
}
test("Throw an exception if a `columnNameOfCorruptRecord` field violates requirements") {
val columnNameOfCorruptRecord = "_unparsed"
val schema = StructType(
StructField(columnNameOfCorruptRecord, IntegerType, true) ::
StructField("a", StringType, true) ::
StructField("b", StringType, true) ::
StructField("c", StringType, true) :: Nil)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "Permissive")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(corruptRecords)
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
// We use `PERMISSIVE` mode by default if invalid string is given.
withTempPath { dir =>
val path = dir.getCanonicalPath
corruptRecords.toDF("value").write.text(path)
val errMsg = intercept[AnalysisException] {
spark.read
.option("mode", "permm")
.option("columnNameOfCorruptRecord", columnNameOfCorruptRecord)
.schema(schema)
.json(path)
.collect
}.getMessage
assert(errMsg.startsWith("The field for corrupt records must be string type and nullable"))
}
}
test("SPARK-18772: Parse special floats correctly") {
val jsons = Seq(
"""{"a": "NaN"}""",
"""{"a": "Infinity"}""",
"""{"a": "-Infinity"}""")
// positive cases
val checks: Seq[Double => Boolean] = Seq(
_.isNaN,
_.isPosInfinity,
_.isNegInfinity)
Seq(FloatType, DoubleType).foreach { dt =>
jsons.zip(checks).foreach { case (json, check) =>
val ds = spark.read
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(json).toDS())
.select($"a".cast(DoubleType)).as[Double]
assert(check(ds.first()))
}
}
// negative cases
Seq(FloatType, DoubleType).foreach { dt =>
val lowerCasedJsons = jsons.map(_.toLowerCase(Locale.ROOT))
// The special floats are case-sensitive so these cases below throw exceptions.
lowerCasedJsons.foreach { lowerCasedJson =>
val e = intercept[SparkException] {
spark.read
.option("mode", "FAILFAST")
.schema(StructType(Seq(StructField("a", dt))))
.json(Seq(lowerCasedJson).toDS())
.collect()
}
assert(e.getMessage.contains("Cannot parse"))
}
}
}
test("SPARK-21610: Corrupt records are not handled properly when creating a dataframe " +
"from a file") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val data =
"""{"field": 1}
|{"field": 2}
|{"field": "3"}""".stripMargin
Seq(data).toDF().repartition(1).write.text(path)
val schema = new StructType().add("field", ByteType).add("_corrupt_record", StringType)
// negative cases
val msg = intercept[AnalysisException] {
spark.read.schema(schema).json(path).select("_corrupt_record").collect()
}.getMessage
assert(msg.contains("only include the internal corrupt record column"))
intercept[catalyst.errors.TreeNodeException[_]] {
spark.read.schema(schema).json(path).filter($"_corrupt_record".isNotNull).count()
}
// workaround
val df = spark.read.schema(schema).json(path).cache()
assert(df.filter($"_corrupt_record".isNotNull).count() == 1)
assert(df.filter($"_corrupt_record".isNull).count() == 2)
checkAnswer(
df.select("_corrupt_record"),
Row(null) :: Row(null) :: Row("{\\"field\\": \\"3\\"}") :: Nil
)
}
}
def testLineSeparator(lineSep: String): Unit = {
test(s"SPARK-21289: Support line separator - lineSep: '$lineSep'") {
// Read
val data =
s"""
| {"f":
|"a", "f0": 1}$lineSep{"f":
|
|"c", "f0": 2}$lineSep{"f": "d", "f0": 3}
""".stripMargin
val dataWithTrailingLineSep = s"$data$lineSep"
Seq(data, dataWithTrailingLineSep).foreach { lines =>
withTempPath { path =>
Files.write(path.toPath, lines.getBytes(StandardCharsets.UTF_8))
val df = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
// Write
withTempPath { path =>
Seq("a", "b", "c").toDF("value").coalesce(1)
.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val partFile = TestUtils.recursiveList(path).filter(f => f.getName.startsWith("part-")).head
val readBack = new String(Files.readAllBytes(partFile.toPath), StandardCharsets.UTF_8)
assert(
readBack === s"""{"value":"a"}$lineSep{"value":"b"}$lineSep{"value":"c"}$lineSep""")
}
// Roundtrip
withTempPath { path =>
val df = Seq("a", "b", "c").toDF()
df.write.option("lineSep", lineSep).json(path.getAbsolutePath)
val readBack = spark.read.option("lineSep", lineSep).json(path.getAbsolutePath)
checkAnswer(df, readBack)
}
}
}
// scalastyle:off nonascii
Seq("|", "^", "::", "!!!@3", 0x1E.toChar.toString, "아").foreach { lineSep =>
testLineSeparator(lineSep)
}
// scalastyle:on nonascii
test("""SPARK-21289: Support line separator - default value \\r, \\r\\n and \\n""") {
val data =
"{\\"f\\": \\"a\\", \\"f0\\": 1}\\r{\\"f\\": \\"c\\", \\"f0\\": 2}\\r\\n{\\"f\\": \\"d\\", \\"f0\\": 3}\\n"
withTempPath { path =>
Files.write(path.toPath, data.getBytes(StandardCharsets.UTF_8))
val df = spark.read.json(path.getAbsolutePath)
val expectedSchema =
StructType(StructField("f", StringType) :: StructField("f0", LongType) :: Nil)
checkAnswer(df, Seq(("a", 1), ("c", 2), ("d", 3)).toDF())
assert(df.schema === expectedSchema)
}
}
test("SPARK-23849: schema inferring touches less data if samplingRatio < 1.0") {
// Set default values for the DataSource parameters to make sure
// that whole test file is mapped to only one partition. This will guarantee
// reliable sampling of the input file.
withSQLConf(
"spark.sql.files.maxPartitionBytes" -> (128 * 1024 * 1024).toString,
"spark.sql.files.openCostInBytes" -> (4 * 1024 * 1024).toString
)(withTempPath { path =>
val ds = sampledTestData.coalesce(1)
ds.write.text(path.getAbsolutePath)
val readback = spark.read.option("samplingRatio", 0.1).json(path.getCanonicalPath)
assert(readback.schema == new StructType().add("f1", LongType))
})
}
test("SPARK-23849: usage of samplingRatio while parsing a dataset of strings") {
val ds = sampledTestData.coalesce(1)
val readback = spark.read.option("samplingRatio", 0.1).json(ds)
assert(readback.schema == new StructType().add("f1", LongType))
}
test("SPARK-23849: samplingRatio is out of the range (0, 1.0]") {
val ds = spark.range(0, 100, 1, 1).map(_.toString)
val errorMsg0 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", -1).json(ds)
}.getMessage
assert(errorMsg0.contains("samplingRatio (-1.0) should be greater than 0"))
val errorMsg1 = intercept[IllegalArgumentException] {
spark.read.option("samplingRatio", 0).json(ds)
}.getMessage
assert(errorMsg1.contains("samplingRatio (0.0) should be greater than 0"))
val sampled = spark.read.option("samplingRatio", 1.0).json(ds)
assert(sampled.count() == ds.count())
}
test("SPARK-23723: json in UTF-16 with BOM") {
val fileName = "test-data/utf16WithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.option("encoding", "UTF-16")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird"), Row("Doug", "Rood")))
}
test("SPARK-23723: multi-line json in UTF-32BE with BOM") {
val fileName = "test-data/utf32BEWithBOM.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Use user's encoding in reading of multi-line json in UTF-16LE") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16LE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: Unsupported encoding name") {
val invalidCharset = "UTF-128"
val exception = intercept[UnsupportedCharsetException] {
spark.read
.options(Map("encoding" -> invalidCharset, "lineSep" -> "\\n"))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(invalidCharset))
}
test("SPARK-23723: checking that the encoding option is case agnostic") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val jsonDF = spark.read.schema(schema)
.option("multiline", "true")
.options(Map("encoding" -> "uTf-16lE"))
.json(testFile(fileName))
checkAnswer(jsonDF, Seq(Row("Chris", "Baird")))
}
test("SPARK-23723: specified encoding is not matched to actual encoding") {
val fileName = "test-data/utf16LE.json"
val schema = new StructType().add("firstName", StringType).add("lastName", StringType)
val exception = intercept[SparkException] {
spark.read.schema(schema)
.option("mode", "FAILFAST")
.option("multiline", "true")
.options(Map("encoding" -> "UTF-16BE"))
.json(testFile(fileName))
.count()
}
val errMsg = exception.getMessage
assert(errMsg.contains("Malformed records are detected in record parsing"))
}
def checkEncoding(expectedEncoding: String, pathToJsonFiles: String,
expectedContent: String): Unit = {
val jsonFiles = new File(pathToJsonFiles)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("json"))
val actualContent = jsonFiles.map { file =>
new String(Files.readAllBytes(file.toPath), expectedEncoding)
}.mkString.trim
assert(actualContent == expectedContent)
}
test("SPARK-23723: save json in UTF-32BE") {
val encoding = "UTF-32BE"
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = encoding,
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: save json in default encoding - UTF-8") {
withTempPath { path =>
val df = spark.createDataset(Seq(("Dog", 42)))
df.write.json(path.getCanonicalPath)
checkEncoding(
expectedEncoding = "UTF-8",
pathToJsonFiles = path.getCanonicalPath,
expectedContent = """{"_1":"Dog","_2":42}""")
}
}
test("SPARK-23723: wrong output encoding") {
val encoding = "UTF-128"
val exception = intercept[SparkException] {
withTempPath { path =>
val df = spark.createDataset(Seq((0)))
df.write
.options(Map("encoding" -> encoding))
.json(path.getCanonicalPath)
}
}
val baos = new ByteArrayOutputStream()
val ps = new PrintStream(baos, true, "UTF-8")
exception.printStackTrace(ps)
ps.flush()
assert(baos.toString.contains(
"java.nio.charset.UnsupportedCharsetException: UTF-128"))
}
test("SPARK-23723: read back json in UTF-16LE") {
val options = Map("encoding" -> "UTF-16LE", "lineSep" -> "\\n")
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1), ("b", 2), ("c", 3))).repartition(2)
ds.write.options(options).json(path.getCanonicalPath)
val readBack = spark
.read
.options(options)
.json(path.getCanonicalPath)
checkAnswer(readBack.toDF(), ds.toDF())
}
}
test("SPARK-23723: write json in UTF-16/32 with multiline off") {
Seq("UTF-16", "UTF-32").foreach { encoding =>
withTempPath { path =>
val ds = spark.createDataset(Seq(("a", 1))).repartition(1)
ds.write
.option("encoding", encoding)
.option("multiline", false)
.json(path.getCanonicalPath)
val jsonFiles = path.listFiles().filter(_.getName.endsWith("json"))
jsonFiles.foreach { jsonFile =>
val readback = Files.readAllBytes(jsonFile.toPath)
val expected = ("""{"_1":"a","_2":1}""" + "\\n").getBytes(Charset.forName(encoding))
assert(readback === expected)
}
}
}
}
def checkReadJson(lineSep: String, encoding: String, inferSchema: Boolean, id: Int): Unit = {
test(s"SPARK-23724: checks reading json in ${encoding} #${id}") {
val schema = new StructType().add("f1", StringType).add("f2", IntegerType)
withTempPath { path =>
val records = List(("a", 1), ("b", 2))
val data = records
.map(rec => s"""{"f1":"${rec._1}", "f2":${rec._2}}""".getBytes(encoding))
.reduce((a1, a2) => a1 ++ lineSep.getBytes(encoding) ++ a2)
val os = new FileOutputStream(path)
os.write(data)
os.close()
val reader = if (inferSchema) {
spark.read
} else {
spark.read.schema(schema)
}
val readBack = reader
.option("encoding", encoding)
.option("lineSep", lineSep)
.json(path.getCanonicalPath)
checkAnswer(readBack, records.map(rec => Row(rec._1, rec._2)))
}
}
}
// scalastyle:off nonascii
List(
(0, "|", "UTF-8", false),
(1, "^", "UTF-16BE", true),
(2, "::", "ISO-8859-1", true),
(3, "!!!@3", "UTF-32LE", false),
(4, 0x1E.toChar.toString, "UTF-8", true),
(5, "아", "UTF-32BE", false),
(6, "куку", "CP1251", true),
(7, "sep", "utf-8", false),
(8, "\\r\\n", "UTF-16LE", false),
(9, "\\r\\n", "utf-16be", true),
(10, "\\u000d\\u000a", "UTF-32BE", false),
(11, "\\u000a\\u000d", "UTF-8", true),
(12, "===", "US-ASCII", false),
(13, "$^+", "utf-32le", true)
).foreach {
case (testNum, sep, encoding, inferSchema) => checkReadJson(sep, encoding, inferSchema, testNum)
}
// scalastyle:on nonascii
test("SPARK-23724: lineSep should be set if encoding if different from UTF-8") {
val encoding = "UTF-16LE"
val exception = intercept[IllegalArgumentException] {
spark.read
.options(Map("encoding" -> encoding))
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains(
s"""The lineSep option must be specified for the $encoding encoding"""))
}
private val badJson = "\\u0000\\u0000\\u0000A\\u0001AAA"
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is enabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson + """{"a":1}""").toDS().write.text(path)
val expected = s"""${badJson}{"a":1}\\n"""
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", true)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Row(null, expected))
}
}
test("SPARK-23094: permissively read JSON file with leading nulls when multiLine is disabled") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
Seq(badJson, """{"a":1}""").toDS().write.text(path)
val schema = new StructType().add("a", IntegerType).add("_corrupt_record", StringType)
val df = spark.read.format("json")
.option("mode", "PERMISSIVE")
.option("multiLine", false)
.option("encoding", "UTF-8")
.schema(schema).load(path)
checkAnswer(df, Seq(Row(1, null), Row(null, badJson)))
}
}
test("SPARK-23094: permissively parse a dataset contains JSON with leading nulls") {
checkAnswer(
spark.read.option("mode", "PERMISSIVE").option("encoding", "UTF-8").json(Seq(badJson).toDS()),
Row(badJson))
}
test("SPARK-23772 ignore column of all null values or empty array during schema inference") {
withTempPath { tempDir =>
val path = tempDir.getAbsolutePath
// primitive types
Seq(
"""{"a":null, "b":1, "c":3.0}""",
"""{"a":null, "b":null, "c":"string"}""",
"""{"a":null, "b":null, "c":null}""")
.toDS().write.text(path)
var df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
var expectedSchema = new StructType()
.add("b", LongType).add("c", StringType)
assert(df.schema === expectedSchema)
checkAnswer(df, Row(1, "3.0") :: Row(null, "string") :: Row(null, null) :: Nil)
// arrays
Seq(
"""{"a":[2, 1], "b":[null, null], "c":null, "d":[[], [null]], "e":[[], null, [[]]]}""",
"""{"a":[null], "b":[null], "c":[], "d":[null, []], "e":null}""",
"""{"a":null, "b":null, "c":[], "d":null, "e":[null, [], null]}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", ArrayType(LongType))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Array(2, 1)) :: Row(Array(null)) :: Row(null) :: Nil)
// structs
Seq(
"""{"a":{"a1": 1, "a2":"string"}, "b":{}}""",
"""{"a":{"a1": 2, "a2":null}, "b":{"b1":[null]}}""",
"""{"a":null, "b":null}""")
.toDS().write.mode("overwrite").text(path)
df = spark.read.format("json")
.option("dropFieldIfAllNull", true)
.load(path)
expectedSchema = new StructType()
.add("a", StructType(StructField("a1", LongType) :: StructField("a2", StringType)
:: Nil))
assert(df.schema === expectedSchema)
checkAnswer(df, Row(Row(1, "string")) :: Row(Row(2, null)) :: Row(null) :: Nil)
}
}
test("SPARK-24190: restrictions for JSONOptions in read") {
for (encoding <- Set("UTF-16", "UTF-32")) {
val exception = intercept[IllegalArgumentException] {
spark.read
.option("encoding", encoding)
.option("multiLine", false)
.json(testFile("test-data/utf16LE.json"))
.count()
}
assert(exception.getMessage.contains("encoding must not be included in the blacklist"))
}
}
}
| bravo-zhang/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/json/JsonSuite.scala | Scala | apache-2.0 | 92,707 |
package org.jetbrains.plugins.scala
package lang.refactoring.introduceField
import com.intellij.internal.statistic.UsageTrigger
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.markup.RangeHighlighter
import com.intellij.openapi.editor.{Document, Editor}
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.psi.{PsiDocumentManager, PsiElement, PsiFile}
import org.jetbrains.plugins.scala.extensions.childOf
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScEarlyDefinitions
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScExtendsBlock, ScTemplateParents}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.introduceField.ScalaIntroduceFieldHandlerBase._
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil._
import org.jetbrains.plugins.scala.util.ScalaUtils
/**
* Nikolay.Tropin
* 6/27/13
*/
class ScalaIntroduceFieldFromExpressionHandler extends ScalaIntroduceFieldHandlerBase {
private var occurrenceHighlighters = Seq.empty[RangeHighlighter]
def invoke(project: Project, editor: Editor, file: PsiFile, startOffset: Int, endOffset: Int) {
try {
UsageTrigger.trigger(ScalaBundle.message("introduce.field.id"))
PsiDocumentManager.getInstance(project).commitAllDocuments()
ScalaRefactoringUtil.checkFile(file, project, editor, REFACTORING_NAME)
val (expr: ScExpression, types: Array[ScType]) = getExpression(project, editor, file, startOffset, endOffset).
getOrElse(showErrorMessage(ScalaBundle.message("cannot.refactor.not.expression"), project, editor, REFACTORING_NAME))
afterClassChoosing[ScExpression](expr, types, project, editor, file, "Choose class for Introduce Field") {
convertExpressionToField
}
}
catch {
case _: IntroduceException => return
}
}
override def invoke(project: Project, editor: Editor, file: PsiFile, dataContext: DataContext) {
val canBeIntroduced: (ScExpression) => Boolean = ScalaRefactoringUtil.checkCanBeIntroduced(_)
ScalaRefactoringUtil.afterExpressionChoosing(project, editor, file, dataContext, REFACTORING_NAME, canBeIntroduced) {
ScalaRefactoringUtil.trimSpacesAndComments(editor, file)
invoke(project, editor, file, editor.getSelectionModel.getSelectionStart, editor.getSelectionModel.getSelectionEnd)
}
}
override def invoke(project: Project, elements: Array[PsiElement], dataContext: DataContext) {
//nothing
}
def convertExpressionToField(ifc: IntroduceFieldContext[ScExpression]) {
ScalaRefactoringUtil.checkCanBeIntroduced(ifc.element, showErrorMessage(_, ifc.project, ifc.editor, REFACTORING_NAME))
def runWithDialog() {
val settings = new IntroduceFieldSettings(ifc)
if (!settings.canBeInitInDeclaration && !settings.canBeInitLocally) {
ScalaRefactoringUtil.showErrorMessage("Cannot create field from this expression", ifc.project, ifc.editor,
ScalaBundle.message("introduce.field.title"))
} else {
val dialog = getDialog(ifc, settings)
if (dialog.isOK) {
runRefactoring(ifc, settings)
}
}
}
runWithDialog()
}
private def runRefactoringInside(ifc: IntroduceFieldContext[ScExpression], settings: IntroduceFieldSettings[ScExpression]) {
val expression = ScalaRefactoringUtil.expressionToIntroduce(ifc.element)
val mainOcc = ifc.occurrences.filter(_.getStartOffset == ifc.editor.getSelectionModel.getSelectionStart)
val occurrencesToReplace = if (settings.replaceAll) ifc.occurrences else mainOcc
val aClass = ifc.aClass
val checkAnchor: PsiElement = anchorForNewDeclaration(expression, occurrencesToReplace, aClass)
if (checkAnchor == null)
ScalaRefactoringUtil.showErrorMessage("Cannot find place for the new field", ifc.project, ifc.editor, ScalaBundle.message("introduce.field.title"))
val manager = aClass.getManager
val name = settings.name
val typeName = Option(settings.scType).map(_.canonicalText).getOrElse("")
val replacedOccurences = ScalaRefactoringUtil.replaceOccurences(occurrencesToReplace, name, ifc.file)
val anchor = anchorForNewDeclaration(expression, replacedOccurences, aClass)
val initInDecl = settings.initInDeclaration
var createdDeclaration: PsiElement = null
if (initInDecl) {
createdDeclaration = ScalaPsiElementFactory
.createDeclaration(name, typeName, settings.defineVar, expression, manager)
} else {
val underscore = ScalaPsiElementFactory.createExpressionFromText("_", manager)
createdDeclaration = ScalaPsiElementFactory
.createDeclaration(name, typeName, settings.defineVar, underscore, manager)
anchorForInitializer(replacedOccurences, ifc.file) match {
case Some(anchorForInit) =>
val parent = anchorForInit.getParent
val assignStmt = ScalaPsiElementFactory.createExpressionFromText(s"$name = ${expression.getText}", manager)
parent.addBefore(assignStmt, anchorForInit)
parent.addBefore(ScalaPsiElementFactory.createNewLineNode(manager, "\\n").getPsi, anchorForInit)
case None => throw new IntroduceException
}
}
import org.jetbrains.plugins.scala.settings.ScalaApplicationSettings.VisibilityLevel
settings.visibilityLevel match {
case VisibilityLevel.DEFAULT =>
case VisibilityLevel.PRIVATE => createdDeclaration.asInstanceOf[ScMember].setModifierProperty("private", value = true)
case VisibilityLevel.PROTECTED => createdDeclaration.asInstanceOf[ScMember].setModifierProperty("protected", value = true)
}
lazy val document: Document = ifc.editor.getDocument
anchor match {
case (tp: ScTemplateParents) childOf (extBl: ScExtendsBlock) =>
val earlyDef = extBl.addEarlyDefinitions()
createdDeclaration = earlyDef.addAfter(createdDeclaration, earlyDef.getFirstChild)
case _ childOf (ed: ScEarlyDefinitions) if onOneLine(document, ed.getTextRange) =>
def isBlockStmtOrMember(elem: PsiElement) = elem != null && (elem.isInstanceOf[ScBlockStatement] || elem.isInstanceOf[ScMember])
var declaration = createdDeclaration.getText
if (isBlockStmtOrMember(anchor)) declaration += "; "
if (isBlockStmtOrMember(anchor.getPrevSibling)) declaration = "; " + declaration
document.insertString(anchor.getTextRange.getStartOffset, declaration)
PsiDocumentManager.getInstance(ifc.project).commitDocument(document)
case _ childOf parent =>
createdDeclaration = parent.addBefore(createdDeclaration, anchor)
parent.addBefore(ScalaPsiElementFactory.createNewLineNode(manager, "\\n").getPsi, anchor)
}
ScalaPsiUtil.adjustTypes(createdDeclaration)
}
def runRefactoring(ifc: IntroduceFieldContext[ScExpression], settings: IntroduceFieldSettings[ScExpression]) {
val runnable = new Runnable {
def run() = runRefactoringInside(ifc, settings)
}
ScalaUtils.runWriteAction(runnable, ifc.project, REFACTORING_NAME)
ifc.editor.getSelectionModel.removeSelection()
}
protected def getDialog(ifc: IntroduceFieldContext[ScExpression], settings: IntroduceFieldSettings[ScExpression]): ScalaIntroduceFieldDialog = {
val occCount = ifc.occurrences.length
// Add occurrences highlighting
if (occCount > 1)
occurrenceHighlighters = ScalaRefactoringUtil.highlightOccurrences(ifc.project, ifc.occurrences, ifc.editor)
val dialog = new ScalaIntroduceFieldDialog(ifc, settings)
dialog.show()
if (!dialog.isOK) {
if (occCount > 1) {
occurrenceHighlighters.foreach(_.dispose())
occurrenceHighlighters = Seq.empty
}
}
dialog
}
protected override def isSuitableClass(elem: PsiElement, clazz: ScTemplateDefinition): Boolean = true
private def onOneLine(document: Document, range: TextRange): Boolean = {
document.getLineNumber(range.getStartOffset) == document.getLineNumber(range.getEndOffset)
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/introduceField/ScalaIntroduceFieldFromExpressionHandler.scala | Scala | apache-2.0 | 8,483 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import java.util.concurrent.atomic.AtomicReference
/**
* Trait that can be mixed into suites that need code executed before and after running each test.
*
* <table><tr><td class="usage">
* <strong>Recommended Usage</strong>:
* Use trait <code>BeforeAndAfter</code> when you need to perform the same side-effects before and/or after tests, rather than at the beginning
* or end of tests. <em>Note: For more insight into where <code>BeforeAndAfter</code> fits into the big picture, see the </em>
* <a href="FlatSpec.html#sharedFixtures">Shared fixtures</a> section in the documentation for your chosen style trait.</em>
* </td></tr></table>
*
* <p>
* A test <em>fixture</em> is composed of the objects and other artifacts (files, sockets, database
* connections, <em>etc.</em>) tests use to do their work.
* When multiple tests need to work with the same fixtures, it is important to try and avoid
* duplicating the fixture code across those tests. The more code duplication you have in your
* tests, the greater drag the tests will have on refactoring the actual production code.
* Trait <code>BeforeAndAfter</code> offers one way to eliminate such code duplication:
* a <code>before</code> clause that will register code to be run before each test,
* and an <code>after</code> clause that will register code to be run after.
* </p>
*
* <p>
* Here's an example:
* </p>
*
* <pre class="stHighlight">
* package org.scalatest.examples.flatspec.beforeandafter
*
* import org.scalatest._
* import collection.mutable.ListBuffer
*
* class ExampleSpec extends FlatSpec with BeforeAndAfter {
*
* val builder = new StringBuilder
* val buffer = new ListBuffer[String]
*
* before {
* builder.append("ScalaTest is ")
* }
*
* after {
* builder.clear()
* buffer.clear()
* }
*
* "Testing" should "be easy" in {
* builder.append("easy!")
* assert(builder.toString === "ScalaTest is easy!")
* assert(buffer.isEmpty)
* buffer += "sweet"
* }
*
* it should "be fun" in {
* builder.append("fun!")
* assert(builder.toString === "ScalaTest is fun!")
* assert(buffer.isEmpty)
* }
* }
* </pre>
*
* <p>
* The <code>before</code> and <code>after</code> methods can each only be called once per <code>Suite</code>,
* and cannot be invoked after <code>run</code> has been invoked. If either of the registered before or after functions
* complete abruptly with an exception, it will be reported as an aborted suite and no more tests will be attempted in that suite.
* </p>
*
* <p>
* Note that the only way <code>before</code> and <code>after</code> code can communicate with test code is via some side-effecting mechanism, commonly by
* reassigning instance <code>var</code>s or by changing the state of mutable objects held from instance <code>val</code>s (as in this example). If using
* instance <code>var</code>s or mutable objects held from instance <code>val</code>s you wouldn't be able to run tests in parallel in the same instance
* of the test class unless you synchronized access to the shared, mutable state. This is why ScalaTest's <a href="ParallelTestExecution.html"><code>ParallelTestExecution</code></a> trait extends
* <a href="OneInstancePerTest.html"><code>OneInstancePerTest</code></a>. By running each test in its own instance of the class, each test has its own copy of the instance variables, so you
* don't need to synchronize. Were you to mix <code>ParallelTestExecution</code> into the <code>ExampleSuite</code> above, the tests would run in parallel just fine
* without any synchronization needed on the mutable <code>StringBuilder</code> and <code>ListBuffer[String]</code> objects.
* </p>
*
* <p>
* Although <code>BeforeAndAfter</code> provides a minimal-boilerplate way to execute code before and after tests, it isn't designed to enable stackable
* traits, because the order of execution would be non-obvious. If you want to factor out before and after code that is common to multiple test suites, you
* should use trait <a href="BeforeAndAfterEach.html"><code>BeforeAndAfterEach</code></a> instead.
* </p>
*
* <p>
* The advantage this trait has over <code>BeforeAndAfterEach</code> is that its syntax is more concise.
* The main disadvantage is that it is not stackable, whereas <code>BeforeAndAfterEach</code> is. <em>I.e.</em>,
* you can write several traits that extend <code>BeforeAndAfterEach</code> and provide <code>beforeEach</code> methods
* that include a call to <code>super.beforeEach</code>, and mix them together in various combinations. By contrast,
* only one call to the <code>before</code> registration function is allowed in a suite or spec that mixes
* in <code>BeforeAndAfter</code>. In addition, <code>BeforeAndAfterEach</code> allows you to access
* the config map and test name via the <a href="TestData.html"><code>TestData</code></a> passed to its <code>beforeEach</code> and
* <code>afterEach</code> methods, whereas <code>BeforeAndAfter</code>
* gives you no access to the config map.
* </p>
*
* @author Bill Venners
*/
trait BeforeAndAfter extends SuiteMixin { this: Suite =>
private val beforeFunctionAtomic = new AtomicReference[Option[() => Any]](None)
private val afterFunctionAtomic = new AtomicReference[Option[() => Any]](None)
@volatile private var runHasBeenInvoked = false
/**
* Registers code to be executed before each of this suite's tests.
*
* <p>
* This trait's implementation
* of <code>runTest</code> executes the code passed to this method before running
* each test. Thus the code passed to this method can be used to set up a test fixture
* needed by each test.
* </p>
*
* @throws NotAllowedException if invoked more than once on the same <code>Suite</code> or if
* invoked after <code>run</code> has been invoked on the <code>Suite</code>
*/
protected def before(fun: => Any) {
if (runHasBeenInvoked)
throw new NotAllowedException("You cannot call before after run has been invoked (such as, from within a test). It is probably best to move it to the top level of the Suite class so it is executed during object construction.", 0)
val success = beforeFunctionAtomic.compareAndSet(None, Some(() => fun))
if (!success)
throw new NotAllowedException("You are only allowed to call before once in each Suite that mixes in BeforeAndAfter.", 0)
}
/**
* Registers code to be executed after each of this suite's tests.
*
* <p>
* This trait's implementation of <code>runTest</code> executes the code passed to this method after running
* each test. Thus the code passed to this method can be used to tear down a test fixture
* needed by each test.
* </p>
*
* @throws NotAllowedException if invoked more than once on the same <code>Suite</code> or if
* invoked after <code>run</code> has been invoked on the <code>Suite</code>
*/
protected def after(fun: => Any) {
if (runHasBeenInvoked)
throw new NotAllowedException("You cannot call after after run has been invoked (such as, from within a test. It is probably best to move it to the top level of the Suite class so it is executed during object construction.", 0)
val success = afterFunctionAtomic.compareAndSet(None, Some(() => fun))
if (!success)
throw new NotAllowedException("You are only allowed to call after once in each Suite that mixes in BeforeAndAfter.", 0)
}
/**
* Run a test surrounded by calls to the code passed to <code>before</code> and <code>after</code>, if any.
*
* <p>
* This trait's implementation of this method ("this method") invokes
* the function registered with <code>before</code>, if any,
* before running each test and the function registered with <code>after</code>, if any,
* after running each test. It runs each test by invoking <code>super.runTest</code>, passing along
* the five parameters passed to it.
* </p>
*
* <p>
* If any invocation of the function registered with <code>before</code> completes abruptly with an exception, this
* method will complete abruptly with the same exception. If any call to
* <code>super.runTest</code> completes abruptly with an exception, this method
* will complete abruptly with the same exception, however, before doing so, it will
* invoke the function registered with <code>after</code>, if any. If the function registered with <code>after</code>
* <em>also</em> completes abruptly with an exception, this
* method will nevertheless complete abruptly with the exception previously thrown by <code>super.runTest</code>.
* If <code>super.runTest</code> returns normally, but the function registered with <code>after</code> completes abruptly with an
* exception, this method will complete abruptly with the exception thrown by the function registered with <code>after</code>.
* </p>
*
* @param testName the name of one test to run.
* @param args the <code>Args</code> for this run
* @return a <code>Status</code> object that indicates when the test started by this method has completed, and whether or not it failed .
*/
abstract protected override def runTest(testName: String, args: Args): Status = {
var thrownException: Option[Throwable] = None
beforeFunctionAtomic.get match {
case Some(fun) => fun()
case None =>
}
try {
super.runTest(testName, args)
}
catch {
case e: Exception => thrownException = Some(e)
FailedStatus
}
finally {
try {
// Make sure that afterEach is called even if runTest completes abruptly.
afterFunctionAtomic.get match {
case Some(fun) => fun()
case None =>
}
thrownException match {
case Some(e) => throw e
case None =>
}
}
catch {
case laterException: Exception =>
thrownException match { // If both run and afterAll throw an exception, report the test exception
case Some(earlierException) => throw earlierException
case None => throw laterException
}
}
}
}
/**
* This trait's implementation of run sets a flag indicating run has been invoked, after which
* any invocation to <code>before</code> or <code>after</code> will complete abruptly
* with a <code>NotAllowedException</code>.
*
* @param testName an optional name of one test to run. If <code>None</code>, all relevant tests should be run.
* I.e., <code>None</code> acts like a wildcard that means run all relevant tests in this <code>Suite</code>.
* @param args the <code>Args</code> for this run
* @return a <code>Status</code> object that indicates when all tests and nested suites started by this method have completed, and whether or not a failure occurred.
*/
abstract override def run(testName: Option[String], args: Args): Status = {
runHasBeenInvoked = true
super.run(testName, args)
}
}
| travisbrown/scalatest | src/main/scala/org/scalatest/BeforeAndAfter.scala | Scala | apache-2.0 | 11,710 |
/*
---------------------------------------------------------------------------
This software is released under a BSD license, adapted from
http://opensource.org/licenses/bsd-license.php
Copyright (c) 2010-2018, Brian M. Clapper
All rights reserved.
See the accompanying license file for details.
---------------------------------------------------------------------------
*/
package org.clapper.markwrap
class HTMLTextSpec extends BaseSpec {
"MarkWrap.HTML" should "properly render HTML" in {
doType(MarkupType.HTML)
}
"MarkWrap.HTML" should "properly render XHTML" in {
doType(MarkupType.XHTML)
}
private def doType(markupType: MarkupType): Unit = {
val data = List(
("<h1>Test</h2>", "<h1>Test</h2>"),
("<p>_Test_</p>", "<p>_Test_</p>")
)
val parser = MarkWrap.converterFor(markupType)
for ((input, expected) <- data) {
parser.parseToHTML(input) shouldBe expected
}
}
}
| bmc/markwrap | src/test/scala/org/clapper/markwrap/HTMLTextSpec.scala | Scala | bsd-3-clause | 963 |
package sds.classfile.bytecode
import sds.classfile.ClassfileStream
import sds.classfile.ClassfileInformation
import sds.classfile.bytecode.{MnemonicTable => Table}
import sds.classfile.constant_pool.ConstantInfo
class OpcodeInfo(__type: String, _pc: Int) extends ClassfileInformation {
def _type: String = __type
def pc: Int = _pc
override def toString(): String = s"$pc - ${_type}"
}
object OpcodeInfo {
def apply(pc: Int, data: ClassfileStream, pool: Array[ConstantInfo]): OpcodeInfo = {
val opcode: Int = data.byte & 0xff
val opType: String = Table.OPCODES(opcode)
if((0x00 to 0x0f).contains(opcode) || (0x1a to 0x35).contains(opcode) ||
(0x3b to 0x83).contains(opcode) || (0x85 to 0x98).contains(opcode) ||
(0xac to 0xb1).contains(opcode) || opcode == 0xbe || opcode == 0xbf ||
opcode == 0xc2 || opcode == 0xc3 || opcode == 0xca ) {
return new OpcodeInfo(opType, pc)
}
opcode match {
case 0x10 => new PushOpcode(data.byte, opType, pc) /** bipush **/
case 0x11 => new PushOpcode(data.short, opType, pc) /** sipush **/
case 0x12 => new HasReferenceOpcode(data.unsignedByte, pool, opType, pc) /** ldc **/
case 0x13 /** ldc_w **/
| 0x14 /** ldc2_w **/
| 0xb2 /** getstatic **/
| 0xb3 /** putstatic **/
| 0xb4 /** getfield **/
| 0xb5 /** putfield **/
| 0xb6 /** invokevirtual **/
| 0xb7 /** invokespecial **/
| 0xb8 /** invokestatic **/
| 0xbb /** new **/
| 0xc0 /** checkcast **/
| 0xbd /** anewarray **/
| 0xc1 /** instanceof **/
=> new HasReferenceOpcode(data.short, pool, opType, pc)
case 0x15 /** iload **/
| 0x16 /** lload **/
| 0x17 /** fload **/
| 0x18 /** dload **/
| 0x19 /** aload **/
| 0x36 /** istore **/
| 0x37 /** lstore **/
| 0x38 /** fstore **/
| 0x39 /** dstore **/
| 0x3a /** astore **/
| 0xa9 /** ret **/
=> new IndexOpcode(data.unsignedByte, opType, pc)
case 0x84 => new Iinc(data.unsignedByte, data.byte, pc)
case 0x99 /** ifeq **/
| 0x9a /** ifne **/
| 0x9b /** iflt **/
| 0x9c /** ifge **/
| 0x9d /** ifgt **/
| 0x9e /** ifle **/
| 0x9f /** if_icmpeq **/
| 0xa0 /** if_icmpne **/
| 0xa1 /** if_icmplt **/
| 0xa2 /** if_icmpge **/
| 0xa3 /** if_icmpgt **/
| 0xa4 /** if_icmple **/
| 0xa5 /** if_acmpeq **/
| 0xa6 /** if_acmpne **/
| 0xa7 /** goto **/
| 0xa8 /** jsr **/
| 0xc6 /** ifnull **/
| 0xc7 /** ifnonnull **/
=> new BranchOpcode(data.short, opType, pc)
case 0xaa => new TableSwitch(data, pc)
case 0xab => new LookupSwitch(data, pc)
case 0xb9 => new InvokeInterface(data, pool, pc)
case 0xba => new InvokeDynamic(data, pool, pc)
case 0xbc => new NewArray(data.unsignedByte, pc)
case 0xc4 => new Wide(data, pool, pc)
case 0xc5 => new MultiANewArray(data.short, data.byte, pool, pc)
case 0xc8 /** goto_w **/
| 0xc9 /** jsr_w **/
=> new BranchOpcode(data.int, opType, pc)
case 0xfe => new OpcodeInfo(Table.OPCODES(0xcb), pc)
case 0xff => new OpcodeInfo(Table.OPCODES(0xcc), pc)
case _ => throw new IllegalArgumentException("undefined opcode(" + opcode + ")")
}
}
} | g1144146/sds_for_scala | src/main/scala/sds/classfile/bytecode/OpcodeInfo.scala | Scala | apache-2.0 | 3,969 |
package lila.history
import scala.concurrent.duration._
import scala.math.round
import org.joda.time.DateTime
import play.api.libs.json._
import lila.rating.{ Glicko, PerfType }
import lila.user.{ User, Perfs }
final class RatingChartApi(
historyApi: HistoryApi,
mongoCache: lila.memo.MongoCache.Builder,
cacheTtl: FiniteDuration) {
def apply(user: User): Fu[Option[String]] = cache(user) map { chart =>
chart.nonEmpty option chart
}
def singlePerf(user: User, perfType: PerfType): Fu[JsArray] =
historyApi.ratingsMap(user, perfType) map {
ratingsMapToJson(user, _)
} map JsArray.apply
private val cache = mongoCache[User, String](
prefix = "history:rating",
f = (user: User) => build(user) map (~_),
maxCapacity = 64,
timeToLive = cacheTtl,
keyToString = _.id)
private def ratingsMapToJson(user: User, ratingsMap: RatingsMap) = ratingsMap.map {
case (days, rating) =>
val date = user.createdAt plusDays days
Json.arr(date.getYear, date.getMonthOfYear - 1, date.getDayOfMonth, rating)
}
private def build(user: User): Fu[Option[String]] =
historyApi get user.id map2 { (history: History) =>
Json stringify {
Json.toJson {
import lila.rating.PerfType._
List(Bullet, Blitz, Classical, Correspondence, Chess960, KingOfTheHill, ThreeCheck, Antichess, Atomic, Horde, RacingKings, Crazyhouse, Puzzle) map { pt =>
Json.obj(
"name" -> pt.name,
"points" -> ratingsMapToJson(user, history(pt))
)
}
}
}
}
}
| clarkerubber/lila | modules/history/src/main/RatingChartApi.scala | Scala | agpl-3.0 | 1,603 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import kafka.message.Message
import kafka.consumer.ConsumerConfig
import kafka.utils.{VerifiableProperties, ZKConfig, Utils}
/**
* Configuration settings for the kafka server
*/
class KafkaConfig private (val props: VerifiableProperties) extends ZKConfig(props) {
def this(originalProps: Properties) {
this(new VerifiableProperties(originalProps))
props.verify()
}
/*********** General Configuration ***********/
/* the broker id for this server */
val brokerId: Int = props.getIntInRange("broker.id", (0, Int.MaxValue))
/* the maximum size of message that the server can receive */
val messageMaxBytes = props.getIntInRange("message.max.bytes", 1000000, (0, Int.MaxValue))
/* the number of network threads that the server uses for handling network requests */
val numNetworkThreads = props.getIntInRange("num.network.threads", 3, (1, Int.MaxValue))
/* the number of io threads that the server uses for carrying out network requests */
val numIoThreads = props.getIntInRange("num.io.threads", 8, (1, Int.MaxValue))
/* the number of threads to use for various background processing tasks */
val backgroundThreads = props.getIntInRange("background.threads", 4, (1, Int.MaxValue))
/* the number of queued requests allowed before blocking the network threads */
val queuedMaxRequests = props.getIntInRange("queued.max.requests", 500, (1, Int.MaxValue))
/*********** Socket Server Configuration ***********/
/* the port to listen and accept connections on */
val port: Int = props.getInt("port", 6667)
/* hostname of broker. If this is set, it will only bind to this address. If this is not set,
* it will bind to all interfaces, and publish one to ZK */
val hostName: String = props.getString("host.name", null)
/* the SO_SNDBUFF buffer of the socket sever sockets */
val socketSendBufferBytes: Int = props.getInt("socket.send.buffer.bytes", 100*1024)
/* the SO_RCVBUFF buffer of the socket sever sockets */
val socketReceiveBufferBytes: Int = props.getInt("socket.receive.buffer.bytes", 100*1024)
/* the maximum number of bytes in a socket request */
val socketRequestMaxBytes: Int = props.getIntInRange("socket.request.max.bytes", 100*1024*1024, (1, Int.MaxValue))
/*********** Log Configuration ***********/
/* the default number of log partitions per topic */
val numPartitions = props.getIntInRange("num.partitions", 1, (1, Int.MaxValue))
/* the directories in which the log data is kept */
val logDirs = Utils.parseCsvList(props.getString("log.dirs", props.getString("log.dir", "/tmp/kafka-logs")))
require(logDirs.size > 0)
/* the maximum size of a single log file */
val logSegmentBytes = props.getIntInRange("log.segment.bytes", 1*1024*1024*1024, (Message.MinHeaderSize, Int.MaxValue))
/* the maximum size of a single log file for some specific topic */
val logSegmentBytesPerTopicMap = props.getMap("log.segment.bytes.per.topic", _.toInt > 0).mapValues(_.toInt)
/* the maximum time before a new log segment is rolled out */
val logRollHours = props.getIntInRange("log.roll.hours", 24*7, (1, Int.MaxValue))
/* the number of hours before rolling out a new log segment for some specific topic */
val logRollHoursPerTopicMap = props.getMap("log.roll.hours.per.topic", _.toInt > 0).mapValues(_.toInt)
/* the number of hours to keep a log file before deleting it */
val logRetentionHours = props.getIntInRange("log.retention.hours", 24*7, (1, Int.MaxValue))
/* the number of hours to keep a log file before deleting it for some specific topic*/
val logRetentionHoursPerTopicMap = props.getMap("log.retention.hours.per.topic", _.toInt > 0).mapValues(_.toInt)
/* the maximum size of the log before deleting it */
val logRetentionBytes = props.getLong("log.retention.bytes", -1)
/* the maximum size of the log for some specific topic before deleting it */
val logRetentionBytesPerTopicMap = props.getMap("log.retention.bytes.per.topic", _.toLong > 0).mapValues(_.toLong)
/* the frequency in minutes that the log cleaner checks whether any log is eligible for deletion */
val logCleanupIntervalMs = props.getLongInRange("log.retention.check.interval.ms", 5*60*1000, (1, Long.MaxValue))
/* the default cleanup policy for segments beyond the retention window, must be either "delete" or "dedupe" */
val logCleanupPolicy = props.getString("log.cleanup.policy", "delete")
/* the number of background threads to use for log cleaning */
val logCleanerThreads = props.getIntInRange("log.cleaner.threads", 1, (0, Int.MaxValue))
/* the log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average */
val logCleanerIoMaxBytesPerSecond = props.getDouble("log.cleaner.io.max.bytes.per.second", Double.MaxValue)
/* the total memory used for log deduplication across all cleaner threads */
val logCleanerDedupeBufferSize = props.getLongInRange("log.cleaner.dedupe.buffer.size", 500*1024*1024L, (0, Long.MaxValue))
require(logCleanerDedupeBufferSize / logCleanerThreads > 1024*1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.")
/* the total memory used for log cleaner I/O buffers across all cleaner threads */
val logCleanerIoBufferSize = props.getIntInRange("log.cleaner.io.buffer.size", 512*1024, (0, Int.MaxValue))
/* log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value
* will allow more log to be cleaned at once but will lead to more hash collisions */
val logCleanerDedupeBufferLoadFactor = props.getDouble("log.cleaner.io.buffer.load.factor", 0.9d)
/* the amount of time to sleep when there are no logs to clean */
val logCleanerBackoffMs = props.getLongInRange("log.cleaner.backoff.ms", 30*1000, (0L, Long.MaxValue))
/* the minimum ratio of dirty log to total log for a log to eligible for cleaning */
val logCleanerMinCleanRatio = props.getDouble("log.cleaner.min.cleanable.ratio", 0.5)
/* should we enable log cleaning? */
val logCleanerEnable = props.getBoolean("log.cleaner.enable", false)
/* how long are delete records retained? */
val logCleanerDeleteRetentionMs = props.getLong("log.cleaner.delete.retention.ms", 24 * 60 * 60 * 1000L)
/* the maximum size in bytes of the offset index */
val logIndexSizeMaxBytes = props.getIntInRange("log.index.size.max.bytes", 10*1024*1024, (4, Int.MaxValue))
/* the interval with which we add an entry to the offset index */
val logIndexIntervalBytes = props.getIntInRange("log.index.interval.bytes", 4096, (0, Int.MaxValue))
/* the number of messages accumulated on a log partition before messages are flushed to disk */
val logFlushIntervalMessages = props.getIntInRange("log.flush.interval.messages", 10000, (1, Int.MaxValue))
/* the amount of time to wait before deleting a file from the filesystem */
val logDeleteDelayMs = props.getLongInRange("log.segment.delete.delay.ms", 60000, (0, Long.MaxValue))
/* the maximum time in ms that a message in selected topics is kept in memory before flushed to disk, e.g., topic1:3000,topic2: 6000 */
val logFlushIntervalMsPerTopicMap = props.getMap("log.flush.interval.ms.per.topic", _.toInt > 0).mapValues(_.toInt)
/* the frequency in ms that the log flusher checks whether any log needs to be flushed to disk */
val logFlushSchedulerIntervalMs = props.getInt("log.flush.scheduler.interval.ms", 3000)
/* the maximum time in ms that a message in any topic is kept in memory before flushed to disk */
val logFlushIntervalMs = props.getInt("log.flush.interval.ms", logFlushSchedulerIntervalMs)
/* enable auto creation of topic on the server */
val autoCreateTopicsEnable = props.getBoolean("auto.create.topics.enable", true)
/*********** Replication configuration ***********/
/* the socket timeout for controller-to-broker channels */
val controllerSocketTimeoutMs = props.getInt("controller.socket.timeout.ms", 30000)
/* the buffer size for controller-to-broker-channels */
val controllerMessageQueueSize= props.getInt("controller.message.queue.size", 10)
/* default replication factors for automatically created topics */
val defaultReplicationFactor = props.getInt("default.replication.factor", 1)
/* If a follower hasn't sent any fetch requests during this time, the leader will remove the follower from isr */
val replicaLagTimeMaxMs = props.getLong("replica.lag.time.max.ms", 10000)
/* If the lag in messages between a leader and a follower exceeds this number, the leader will remove the follower from isr */
val replicaLagMaxMessages = props.getLong("replica.lag.max.messages", 4000)
/* the socket timeout for network requests */
val replicaSocketTimeoutMs = props.getInt("replica.socket.timeout.ms", ConsumerConfig.SocketTimeout)
/* the socket receive buffer for network requests */
val replicaSocketReceiveBufferBytes = props.getInt("replica.socket.receive.buffer.bytes", ConsumerConfig.SocketBufferSize)
/* the number of byes of messages to attempt to fetch */
val replicaFetchMaxBytes = props.getInt("replica.fetch.max.bytes", ConsumerConfig.FetchSize)
/* max wait time for each fetcher request issued by follower replicas*/
val replicaFetchWaitMaxMs = props.getInt("replica.fetch.wait.max.ms", 500)
/* minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs */
val replicaFetchMinBytes = props.getInt("replica.fetch.min.bytes", 1)
/* number of fetcher threads used to replicate messages from a source broker.
* Increasing this value can increase the degree of I/O parallelism in the follower broker. */
val numReplicaFetchers = props.getInt("num.replica.fetchers", 1)
/* the frequency with which the high watermark is saved out to disk */
val replicaHighWatermarkCheckpointIntervalMs = props.getLong("replica.high.watermark.checkpoint.interval.ms", 5000L)
/* the purge interval (in number of requests) of the fetch request purgatory */
val fetchPurgatoryPurgeIntervalRequests = props.getInt("fetch.purgatory.purge.interval.requests", 10000)
/* the purge interval (in number of requests) of the producer request purgatory */
val producerPurgatoryPurgeIntervalRequests = props.getInt("producer.purgatory.purge.interval.requests", 10000)
/*********** Misc configuration ***********/
/* the maximum size for a metadata entry associated with an offset commit */
val offsetMetadataMaxSize = props.getInt("offset.metadata.max.bytes", 1024)
}
| akosiaris/kafka | core/src/main/scala/kafka/server/KafkaConfig.scala | Scala | apache-2.0 | 11,432 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import scala.concurrent._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.benchmark.{Benchmark, BenchmarkBase}
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Kryo._
import org.apache.spark.serializer.KryoTest._
import org.apache.spark.util.ThreadUtils
/**
* Benchmark for KryoPool vs old "pool of 1".
* To run this benchmark:
* {{{
* 1. without sbt:
* bin/spark-submit --class <this class> --jars <spark core test jar>
* 2. build/sbt "core/test:runMain <this class>"
* 3. generate result:
* SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "core/test:runMain <this class>"
* Results will be written to "benchmarks/KryoSerializerBenchmark-results.txt".
* }}}
*/
object KryoSerializerBenchmark extends BenchmarkBase {
var sc: SparkContext = null
val N = 500
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
val name = "Benchmark KryoPool vs old\"pool of 1\" implementation"
runBenchmark(name) {
val benchmark = new Benchmark(name, N, 10, output = output)
Seq(true, false).foreach(usePool => run(usePool, benchmark))
benchmark.run()
}
}
private def run(usePool: Boolean, benchmark: Benchmark): Unit = {
lazy val sc = createSparkContext(usePool)
benchmark.addCase(s"KryoPool:$usePool") { _ =>
val futures = for (_ <- 0 until N) yield {
Future {
sc.parallelize(0 until 10).map(i => i + 1).count()
}
}
val future = Future.sequence(futures)
ThreadUtils.awaitResult(future, 10.minutes)
}
}
def createSparkContext(usePool: Boolean): SparkContext = {
val conf = new SparkConf()
conf.set(SERIALIZER, "org.apache.spark.serializer.KryoSerializer")
conf.set(KRYO_USER_REGISTRATORS, classOf[MyRegistrator].getName)
conf.set(KRYO_USE_POOL, usePool)
if (sc != null) {
sc.stop()
}
sc = new SparkContext("local-cluster[4,1,1024]", "test", conf)
sc
}
override def afterAll(): Unit = {
if (sc != null) {
sc.stop()
}
}
}
| aosagie/spark | core/src/test/scala/org/apache/spark/serializer/KryoSerializerBenchmark.scala | Scala | apache-2.0 | 3,011 |
package org.jetbrains.plugins.scala.lang.psi.impl.expr
import com.intellij.lang.ASTNode
import com.intellij.psi._
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElementImpl
import org.jetbrains.plugins.scala.lang.psi.api.base.ScConstructor
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.{createComma, createNewLineNode}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
class ScArgumentExprListImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScArgumentExprList {
override def toString: String = "ArgumentList"
def invocationCount: Int = {
callExpression match {
case call: ScMethodCall => call.args.invocationCount + 1
case _ => 1
}
}
def callReference: Option[ScReferenceExpression] = {
getContext match {
case call: ScMethodCall =>
call.deepestInvokedExpr match {
case ref: ScReferenceExpression => Some(ref)
case gen: ScGenericCall =>
gen.referencedExpr match {
case ref: ScReferenceExpression => Some(ref)
case _ => None
}
case _ => None
}
case _ => None
}
}
def callGeneric: Option[ScGenericCall] = {
getContext match {
case call: ScMethodCall =>
call.deepestInvokedExpr match {
case gen: ScGenericCall => Some(gen)
case _ => None
}
case _ => None
}
}
def callExpression: ScExpression = {
getContext match {
case call: ScMethodCall =>
call.getEffectiveInvokedExpr
case _ => null
}
}
def matchedParameters: Seq[(ScExpression, Parameter)] = {
getContext match {
case call: ScMethodCall => call.matchedParameters
case constr: ScConstructor =>
constr.matchedParameters.filter {
case (e, _) => this.isAncestorOf(e)
}
case _ => Seq.empty
}
}
override def addBefore(element: PsiElement, anchor: PsiElement): PsiElement = {
if (anchor == null) {
if (exprs.isEmpty) {
val par: PsiElement = findChildByType[PsiElement](ScalaTokenTypes.tLPARENTHESIS)
if (par == null) return super.addBefore(element, anchor)
super.addAfter(element, par)
} else {
val par: PsiElement = findChildByType[PsiElement](ScalaTokenTypes.tLPARENTHESIS)
if (par == null) return super.addBefore(element, anchor)
super.addAfter(par, createComma)
super.addAfter(par, element)
}
} else {
super.addBefore(element, anchor)
}
}
def addExpr(expr: ScExpression): ScArgumentExprList = {
val par = findChildByType[PsiElement](ScalaTokenTypes.tLPARENTHESIS)
val nextNode = par.getNode.getTreeNext
val node = getNode
val needCommaAndSpace = exprs.nonEmpty
node.addChild(expr.getNode, nextNode)
if (needCommaAndSpace) {
node.addChild(comma, nextNode)
node.addChild(space, nextNode)
}
this
}
def addExprAfter(expr: ScExpression, anchor: PsiElement): ScArgumentExprList = {
val nextNode = anchor.getNode.getTreeNext
val node = getNode
if (nextNode != null) {
node.addChild(comma, nextNode)
node.addChild(space, nextNode)
node.addChild(expr.getNode, nextNode)
} else {
node.addChild(comma)
node.addChild(space)
node.addChild(expr.getNode)
}
this
}
private def comma = createComma.getNode
private def space = createNewLineNode(" ")
}
| gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScArgumentExprListImpl.scala | Scala | apache-2.0 | 3,701 |
package code.comet
import net.liftweb.actor._
import net.liftweb.http._
import js._
import JsCmds._
import JE._
import scala.xml.NodeSeq
object Presentation extends LiftActor with ListenerManager {
object Ask
object Init
val initialStep = "title"
var currentStep:String = initialStep
def createUpdate = currentStep
override def lowPriority = {
case Ask => reply(currentStep)
case Init => this ! initialStep
case id:String => currentStep = id; updateListeners()
}
}
class PresentationComet extends CometActor with CometListener {
override def render = NodeSeq.Empty
override def registerWith = Presentation
override def lowPriority = {
case id:String => partialUpdate(
Call("window.Presentation.goto", id)
)
}
} | joescii/type-prog-impress | src/main/scala/code/comet/Presentation.scala | Scala | apache-2.0 | 767 |
package scala.meta.internal.semanticdb.scalac
import scala.reflect.internal.util.Position
import scala.tools.nsc.reporters.Reporter
import scala.tools.nsc.reporters.StoreReporter
class SemanticdbReporter(underlying: Reporter) extends StoreReporter {
override protected def info0(
pos: Position,
msg: String,
severity: Severity,
force: Boolean): Unit = {
super.info0(pos, msg, severity, force)
severity.id match {
case 0 => underlying.info(pos, msg, force)
case 1 => underlying.warning(pos, msg)
case 2 => underlying.error(pos, msg)
case _ =>
}
}
}
| MasseGuillaume/scalameta | semanticdb/scalac/library/src/main/scala/scala/meta/internal/semanticdb/scalac/SemanticdbReporter.scala | Scala | bsd-3-clause | 614 |
package com.wuyuntao.aeneas.tests.views
import java.util.UUID
import com.wuyuntao.aeneas.View
case class UserByName(val username: String, val id: UUID) extends View | wuyuntao/Aeneas | aeneas-core/src/test/scala/com/wuyuntao/aeneas/tests/views/UserByName.scala | Scala | apache-2.0 | 173 |
package net.node3.scalabot
import org.parboiled2._
object PEGParser {
val special = CharPredicate('-', '^', '_', '^', '[', ']', '\\', '`')
val nick = CharPredicate('{', '}') ++ special
val host = CharPredicate('.') ++ special
val CRLF = CharPredicate('\r', '\n')
val white = CharPredicate('\t', '\f') ++ CRLF
def apply(input: ParserInput) = new PEGParser(input).InputLine.run()
}
class PEGParser(val input: ParserInput) extends Parser {
def InputLine = rule{
Message ~ EOI
}
def Prefix: Rule1[Tokens.Prefix] = rule{
capture(ServerName | Nick) ~ optional('!' ~ capture(User)) ~
optional('@' ~ capture(ServerName)) ~> (Tokens.Prefix(_, _, _))
}
def Command: Rule1[Tokens.Command] = rule {
capture(oneOrMore(CharPredicate.Alpha) | 3.times(CharPredicate.Digit)) ~>
Tokens.Command
}
def Message: Rule1[Tokens.Message] = rule{
optional(':' ~ Prefix ~ Space) ~ Command ~ Params ~> (
Tokens.Message(_, _, _))
}
def Params: Rule1[List[String]] = rule{
optional(Space ~ oneOrMore(!':' ~ capture(oneOrMore(
CharPredicate.Visible -- PEGParser.white))).separatedBy(' ')) ~ End ~> (
(mid: Option[Seq[String]], end: Option[String]) => (mid, end) match{
case (Some(m), Some(e)) => (m :+ e).toList
case (Some(m), None) => m.toList
case (None, Some(e)) => List(e)
case _ => List()
})
}
def End: Rule1[Option[String]] = rule{
optional(Space ~ ':' ~ capture(oneOrMore(ANY)))
}
def Space = rule{ oneOrMore(' ') }
def ServerName = rule{ oneOrMore(CharPredicate.AlphaNum ++ PEGParser.host) }
def Nick = rule{ oneOrMore(CharPredicate.AlphaNum ++ PEGParser.nick) }
def User = rule{ optional('~') ~ Nick }
}
| nadams/scalabot | src/main/scala/Parser.scala | Scala | mit | 1,735 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation._
import leon.lang._
object Mean {
def meanOverflow(x: Int, y: Int): Int = {
require(x <= y && x >= 0 && y >= 0)
(x + y)/2
} ensuring(m => m >= x && m <= y)
}
| regb/leon | src/test/resources/regression/verification/purescala/invalid/Mean.scala | Scala | gpl-3.0 | 239 |
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{
Broadcast,
Flow,
GraphDSL,
Merge,
MergeSorted,
RunnableGraph,
Sink,
Source
}
import akka.testkit.TestKit
import org.scalatest.{AsyncWordSpecLike, MustMatchers}
import scala.concurrent.Future
/**
* Created by erik on 9/6/16.
*/
class DedupeMergeSpec
extends TestKit(ActorSystem("test"))
with AsyncWordSpecLike
with MustMatchers {
implicit val mat = ActorMaterializer()
val seqA = Seq(0, 1, 2, 5, 9, 10)
lazy val sourceA = Source.fromIterator(() => seqA.iterator)
val seqB = Seq(2, 4, 7)
lazy val sourceB = Source.fromIterator(() => seqB.iterator)
val seqC = Seq(2, 6, 9)
lazy val sourceC = Source.fromIterator(() => seqC.iterator)
val seqD = Seq(2, 6, 9, 11, 12)
lazy val sourceD = Source.fromIterator(() => seqD.iterator)
val out = Sink.seq[Int]
val g = RunnableGraph.fromGraph(GraphDSL.create(out) {
implicit builder => sink =>
import GraphDSL.Implicits._
val merge = builder.add(Merge[Int](2))
sourceA ~> merge
sourceB ~> merge ~> sink
ClosedShape
})
def dedupeSortedFlow[T]: Flow[T, T, NotUsed] =
Flow[T]
.scan((Option.empty[T], Option.empty[T])) {
//
case ((None, None), input) ⇒
// first record
(Option(input), None)
case ((Some(prev), None), input) ⇒
if (prev == input) {
// first duplicate
(None, Option(input))
} else {
// normal records
(Option(input), None)
}
case ((None, Some(prev)), input) ⇒
if (prev == input) {
// consequent duplicates
(None, Option(input))
} else {
// new record
(Option(input), None)
}
}
.collect { case (Some(item), _) ⇒ item }
"merge and dedupe" should {
"work via the flow dsl" in {
sourceA
.mergeSorted(sourceB)
.via(dedupeSortedFlow)
.runWith(Sink.seq)
.map { results =>
results mustEqual Seq(0, 1, 2, 4, 5, 7, 9, 10)
}
}
"fold-merge from a list of sources via the flow dsl" in {
val sources = Seq(sourceA, sourceB, sourceC, sourceD)
sources.tail.foldLeft(sources.head)((acc, item) => acc.mergeSorted(item))
.via(dedupeSortedFlow)
.runWith(Sink.seq)
.map { results =>
results mustEqual Seq(0, 1, 2, 4, 5, 6, 7, 9, 10, 11, 12)
}
}
"merge via a graph stage" ignore {
val resultFut: Future[Seq[Int]] = g.run()
resultFut.map { results =>
results mustEqual Seq(0, 1, 2, 4, 5, 7, 9, 10)
}
}
}
}
| easel/akka-streams-intro | src/test/scala/DedupeMergeSpec.scala | Scala | mit | 2,765 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.fixture
private[scalatest] class NoArgTestWrapper[T](val test: () => Any) extends (T => Any) {
def apply(fixture: T): Any = {
test()
}
}
| travisbrown/scalatest | src/main/scala/org/scalatest/fixture/NoArgTestWrapper.scala | Scala | apache-2.0 | 770 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import scala.collection.immutable.TreeSet
import scala.collection.mutable
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.CatalystTypeConverters.convertToScala
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReference
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LeafNode, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* A base class for generated/interpreted predicate
*/
abstract class BasePredicate {
def eval(r: InternalRow): Boolean
/**
* Initializes internal states given the current partition index.
* This is used by nondeterministic expressions to set initial states.
* The default implementation does nothing.
*/
def initialize(partitionIndex: Int): Unit = {}
}
case class InterpretedPredicate(expression: Expression) extends BasePredicate {
override def eval(r: InternalRow): Boolean = expression.eval(r).asInstanceOf[Boolean]
override def initialize(partitionIndex: Int): Unit = {
super.initialize(partitionIndex)
expression.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
}
}
}
/**
* An [[Expression]] that returns a boolean value.
*/
trait Predicate extends Expression {
override def dataType: DataType = BooleanType
}
/**
* The factory object for `BasePredicate`.
*/
object Predicate extends CodeGeneratorWithInterpretedFallback[Expression, BasePredicate] {
override protected def createCodeGeneratedObject(in: Expression): BasePredicate = {
GeneratePredicate.generate(in)
}
override protected def createInterpretedObject(in: Expression): BasePredicate = {
InterpretedPredicate(in)
}
def createInterpreted(e: Expression): InterpretedPredicate = InterpretedPredicate(e)
/**
* Returns a BasePredicate for an Expression, which will be bound to `inputSchema`.
*/
def create(e: Expression, inputSchema: Seq[Attribute]): BasePredicate = {
createObject(bindReference(e, inputSchema))
}
/**
* Returns a BasePredicate for a given bound Expression.
*/
def create(e: Expression): BasePredicate = {
createObject(e)
}
}
trait PredicateHelper extends Logging {
protected def splitConjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case And(cond1, cond2) =>
splitConjunctivePredicates(cond1) ++ splitConjunctivePredicates(cond2)
case other => other :: Nil
}
}
/**
* Find the origin of where the input references of expression exp were scanned in the tree of
* plan, and if they originate from a single leaf node.
* Returns optional tuple with Expression, undoing any projections and aliasing that has been done
* along the way from plan to origin, and the origin LeafNode plan from which all the exp
*/
def findExpressionAndTrackLineageDown(
exp: Expression,
plan: LogicalPlan): Option[(Expression, LogicalPlan)] = {
plan match {
case Project(projectList, child) =>
val aliases = AttributeMap(projectList.collect {
case a @ Alias(child, _) => (a.toAttribute, child)
})
findExpressionAndTrackLineageDown(replaceAlias(exp, aliases), child)
// we can unwrap only if there are row projections, and no aggregation operation
case Aggregate(_, aggregateExpressions, child) =>
val aliasMap = AttributeMap(aggregateExpressions.collect {
case a: Alias if a.child.find(_.isInstanceOf[AggregateExpression]).isEmpty =>
(a.toAttribute, a.child)
})
findExpressionAndTrackLineageDown(replaceAlias(exp, aliasMap), child)
case l: LeafNode if exp.references.subsetOf(l.outputSet) =>
Some((exp, l))
case other =>
other.children.flatMap {
child => if (exp.references.subsetOf(child.outputSet)) {
findExpressionAndTrackLineageDown(exp, child)
} else {
None
}
}.headOption
}
}
protected def splitDisjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case Or(cond1, cond2) =>
splitDisjunctivePredicates(cond1) ++ splitDisjunctivePredicates(cond2)
case other => other :: Nil
}
}
// Substitute any known alias from a map.
protected def replaceAlias(
condition: Expression,
aliases: AttributeMap[Expression]): Expression = {
// Use transformUp to prevent infinite recursion when the replacement expression
// redefines the same ExprId,
condition.transformUp {
case a: Attribute =>
aliases.getOrElse(a, a)
}
}
/**
* Returns true if `expr` can be evaluated using only the output of `plan`. This method
* can be used to determine when it is acceptable to move expression evaluation within a query
* plan.
*
* For example consider a join between two relations R(a, b) and S(c, d).
*
* - `canEvaluate(EqualTo(a,b), R)` returns `true`
* - `canEvaluate(EqualTo(a,c), R)` returns `false`
* - `canEvaluate(Literal(1), R)` returns `true` as literals CAN be evaluated on any plan
*/
protected def canEvaluate(expr: Expression, plan: LogicalPlan): Boolean =
expr.references.subsetOf(plan.outputSet)
/**
* Returns true iff `expr` could be evaluated as a condition within join.
*/
protected def canEvaluateWithinJoin(expr: Expression): Boolean = expr match {
// Non-deterministic expressions are not allowed as join conditions.
case e if !e.deterministic => false
case _: ListQuery | _: Exists =>
// A ListQuery defines the query which we want to search in an IN subquery expression.
// Currently the only way to evaluate an IN subquery is to convert it to a
// LeftSemi/LeftAnti/ExistenceJoin by `RewritePredicateSubquery` rule.
// It cannot be evaluated as part of a Join operator.
// An Exists shouldn't be push into a Join operator too.
false
case e: SubqueryExpression =>
// non-correlated subquery will be replaced as literal
e.children.isEmpty
case a: AttributeReference => true
// PythonUDF will be executed by dedicated physical operator later.
// For PythonUDFs that can't be evaluated in join condition, `ExtractPythonUDFFromJoinCondition`
// will pull them out later.
case _: PythonUDF => true
case e: Unevaluable => false
case e => e.children.forall(canEvaluateWithinJoin)
}
/**
* Convert an expression into conjunctive normal form.
* Definition and algorithm: https://en.wikipedia.org/wiki/Conjunctive_normal_form
* CNF can explode exponentially in the size of the input expression when converting [[Or]]
* clauses. Use a configuration [[SQLConf.MAX_CNF_NODE_COUNT]] to prevent such cases.
*
* @param condition to be converted into CNF.
* @return the CNF result as sequence of disjunctive expressions. If the number of expressions
* exceeds threshold on converting `Or`, `Seq.empty` is returned.
*/
protected def conjunctiveNormalForm(condition: Expression): Seq[Expression] = {
val postOrderNodes = postOrderTraversal(condition)
val resultStack = new mutable.Stack[Seq[Expression]]
val maxCnfNodeCount = SQLConf.get.maxCnfNodeCount
// Bottom up approach to get CNF of sub-expressions
while (postOrderNodes.nonEmpty) {
val cnf = postOrderNodes.pop() match {
case _: And =>
val right = resultStack.pop()
val left = resultStack.pop()
left ++ right
case _: Or =>
// For each side, there is no need to expand predicates of the same references.
// So here we can aggregate predicates of the same qualifier as one single predicate,
// for reducing the size of pushed down predicates and corresponding codegen.
val right = groupExpressionsByQualifier(resultStack.pop())
val left = groupExpressionsByQualifier(resultStack.pop())
// Stop the loop whenever the result exceeds the `maxCnfNodeCount`
if (left.size * right.size > maxCnfNodeCount) {
logInfo(s"As the result size exceeds the threshold $maxCnfNodeCount. " +
"The CNF conversion is skipped and returning Seq.empty now. To avoid this, you can " +
s"raise the limit ${SQLConf.MAX_CNF_NODE_COUNT.key}.")
return Seq.empty
} else {
for { x <- left; y <- right } yield Or(x, y)
}
case other => other :: Nil
}
resultStack.push(cnf)
}
if (resultStack.length != 1) {
logWarning("The length of CNF conversion result stack is supposed to be 1. There might " +
"be something wrong with CNF conversion.")
return Seq.empty
}
resultStack.top
}
private def groupExpressionsByQualifier(expressions: Seq[Expression]): Seq[Expression] = {
expressions.groupBy(_.references.map(_.qualifier)).map(_._2.reduceLeft(And)).toSeq
}
/**
* Iterative post order traversal over a binary tree built by And/Or clauses with two stacks.
* For example, a condition `(a And b) Or c`, the postorder traversal is
* (`a`,`b`, `And`, `c`, `Or`).
* Following is the complete algorithm. After step 2, we get the postorder traversal in
* the second stack.
* 1. Push root to first stack.
* 2. Loop while first stack is not empty
* 2.1 Pop a node from first stack and push it to second stack
* 2.2 Push the children of the popped node to first stack
*
* @param condition to be traversed as binary tree
* @return sub-expressions in post order traversal as a stack.
* The first element of result stack is the leftmost node.
*/
private def postOrderTraversal(condition: Expression): mutable.Stack[Expression] = {
val stack = new mutable.Stack[Expression]
val result = new mutable.Stack[Expression]
stack.push(condition)
while (stack.nonEmpty) {
val node = stack.pop()
node match {
case Not(a And b) => stack.push(Or(Not(a), Not(b)))
case Not(a Or b) => stack.push(And(Not(a), Not(b)))
case Not(Not(a)) => stack.push(a)
case a And b =>
result.push(node)
stack.push(a)
stack.push(b)
case a Or b =>
result.push(node)
stack.push(a)
stack.push(b)
case _ =>
result.push(node)
}
}
result
}
}
@ExpressionDescription(
usage = "_FUNC_ expr - Logical not.")
case class Not(child: Expression)
extends UnaryExpression with Predicate with ImplicitCastInputTypes with NullIntolerant {
override def toString: String = s"NOT $child"
override def inputTypes: Seq[DataType] = Seq(BooleanType)
// +---------+-----------+
// | CHILD | NOT CHILD |
// +---------+-----------+
// | TRUE | FALSE |
// | FALSE | TRUE |
// | UNKNOWN | UNKNOWN |
// +---------+-----------+
protected override def nullSafeEval(input: Any): Any = !input.asInstanceOf[Boolean]
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"!($c)")
}
override def sql: String = s"(NOT ${child.sql})"
}
/**
* Evaluates to `true` if `values` are returned in `query`'s result set.
*/
case class InSubquery(values: Seq[Expression], query: ListQuery)
extends Predicate with Unevaluable {
@transient private lazy val value: Expression = if (values.length > 1) {
CreateNamedStruct(values.zipWithIndex.flatMap {
case (v: NamedExpression, _) => Seq(Literal(v.name), v)
case (v, idx) => Seq(Literal(s"_$idx"), v)
})
} else {
values.head
}
override def checkInputDataTypes(): TypeCheckResult = {
if (values.length != query.childOutputs.length) {
TypeCheckResult.TypeCheckFailure(
s"""
|The number of columns in the left hand side of an IN subquery does not match the
|number of columns in the output of subquery.
|#columns in left hand side: ${values.length}.
|#columns in right hand side: ${query.childOutputs.length}.
|Left side columns:
|[${values.map(_.sql).mkString(", ")}].
|Right side columns:
|[${query.childOutputs.map(_.sql).mkString(", ")}].""".stripMargin)
} else if (!DataType.equalsStructurally(
query.dataType, value.dataType, ignoreNullability = true)) {
val mismatchedColumns = values.zip(query.childOutputs).flatMap {
case (l, r) if l.dataType != r.dataType =>
Seq(s"(${l.sql}:${l.dataType.catalogString}, ${r.sql}:${r.dataType.catalogString})")
case _ => None
}
TypeCheckResult.TypeCheckFailure(
s"""
|The data type of one or more elements in the left hand side of an IN subquery
|is not compatible with the data type of the output of the subquery
|Mismatched columns:
|[${mismatchedColumns.mkString(", ")}]
|Left side:
|[${values.map(_.dataType.catalogString).mkString(", ")}].
|Right side:
|[${query.childOutputs.map(_.dataType.catalogString).mkString(", ")}].""".stripMargin)
} else {
TypeUtils.checkForOrderingExpr(value.dataType, s"function $prettyName")
}
}
override def children: Seq[Expression] = values :+ query
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ($query)"
override def sql: String = s"(${value.sql} IN (${query.sql}))"
}
/**
* Evaluates to `true` if `list` contains `value`.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "expr1 _FUNC_(expr2, expr3, ...) - Returns true if `expr` equals to any valN.",
arguments = """
Arguments:
* expr1, expr2, expr3, ... - the arguments must be same type.
""",
examples = """
Examples:
> SELECT 1 _FUNC_(1, 2, 3);
true
> SELECT 1 _FUNC_(2, 3, 4);
false
> SELECT named_struct('a', 1, 'b', 2) _FUNC_(named_struct('a', 1, 'b', 1), named_struct('a', 1, 'b', 3));
false
> SELECT named_struct('a', 1, 'b', 2) _FUNC_(named_struct('a', 1, 'b', 2), named_struct('a', 1, 'b', 3));
true
""")
// scalastyle:on line.size.limit
case class In(value: Expression, list: Seq[Expression]) extends Predicate {
require(list != null, "list should not be null")
override def checkInputDataTypes(): TypeCheckResult = {
val mismatchOpt = list.find(l => !DataType.equalsStructurally(l.dataType, value.dataType,
ignoreNullability = true))
if (mismatchOpt.isDefined) {
TypeCheckResult.TypeCheckFailure(s"Arguments must be same type but were: " +
s"${value.dataType.catalogString} != ${mismatchOpt.get.dataType.catalogString}")
} else {
TypeUtils.checkForOrderingExpr(value.dataType, s"function $prettyName")
}
}
override def children: Seq[Expression] = value +: list
lazy val inSetConvertible = list.forall(_.isInstanceOf[Literal])
private lazy val ordering = TypeUtils.getInterpretedOrdering(value.dataType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ${list.mkString("(", ",", ")")}"
override def eval(input: InternalRow): Any = {
val evaluatedValue = value.eval(input)
if (evaluatedValue == null) {
null
} else {
var hasNull = false
list.foreach { e =>
val v = e.eval(input)
if (v == null) {
hasNull = true
} else if (ordering.equiv(v, evaluatedValue)) {
return true
}
}
if (hasNull) {
null
} else {
false
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaDataType = CodeGenerator.javaType(value.dataType)
val valueGen = value.genCode(ctx)
val listGen = list.map(_.genCode(ctx))
// inTmpResult has 3 possible values:
// -1 means no matches found and there is at least one value in the list evaluated to null
val HAS_NULL = -1
// 0 means no matches found and all values in the list are not null
val NOT_MATCHED = 0
// 1 means one value in the list is matched
val MATCHED = 1
val tmpResult = ctx.freshName("inTmpResult")
val valueArg = ctx.freshName("valueArg")
// All the blocks are meant to be inside a do { ... } while (false); loop.
// The evaluation of variables can be stopped when we find a matching value.
val listCode = listGen.map(x =>
s"""
|${x.code}
|if (${x.isNull}) {
| $tmpResult = $HAS_NULL; // ${ev.isNull} = true;
|} else if (${ctx.genEqual(value.dataType, valueArg, x.value)}) {
| $tmpResult = $MATCHED; // ${ev.isNull} = false; ${ev.value} = true;
| continue;
|}
""".stripMargin)
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = listCode,
funcName = "valueIn",
extraArguments = (javaDataType, valueArg) :: (CodeGenerator.JAVA_BYTE, tmpResult) :: Nil,
returnType = CodeGenerator.JAVA_BYTE,
makeSplitFunction = body =>
s"""
|do {
| $body
|} while (false);
|return $tmpResult;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$tmpResult = $funcCall;
|if ($tmpResult == $MATCHED) {
| continue;
|}
""".stripMargin
}.mkString("\\n"))
ev.copy(code =
code"""
|${valueGen.code}
|byte $tmpResult = $HAS_NULL;
|if (!${valueGen.isNull}) {
| $tmpResult = $NOT_MATCHED;
| $javaDataType $valueArg = ${valueGen.value};
| do {
| $codes
| } while (false);
|}
|final boolean ${ev.isNull} = ($tmpResult == $HAS_NULL);
|final boolean ${ev.value} = ($tmpResult == $MATCHED);
""".stripMargin)
}
override def sql: String = {
val valueSQL = value.sql
val listSQL = list.map(_.sql).mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
/**
* Optimized version of In clause, when all filter values of In clause are
* static.
*/
case class InSet(child: Expression, hset: Set[Any]) extends UnaryExpression with Predicate {
require(hset != null, "hset could not be null")
override def toString: String = s"$child INSET ${hset.mkString("(", ",", ")")}"
@transient private[this] lazy val hasNull: Boolean = hset.contains(null)
override def nullable: Boolean = child.nullable || hasNull
protected override def nullSafeEval(value: Any): Any = {
if (set.contains(value)) {
true
} else if (hasNull) {
null
} else {
false
}
}
@transient lazy val set: Set[Any] = child.dataType match {
case t: AtomicType if !t.isInstanceOf[BinaryType] => hset
case _: NullType => hset
case _ =>
// for structs use interpreted ordering to be able to compare UnsafeRows with non-UnsafeRows
TreeSet.empty(TypeUtils.getInterpretedOrdering(child.dataType)) ++ (hset - null)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (canBeComputedUsingSwitch && hset.size <= SQLConf.get.optimizerInSetSwitchThreshold) {
genCodeWithSwitch(ctx, ev)
} else {
genCodeWithSet(ctx, ev)
}
}
private def canBeComputedUsingSwitch: Boolean = child.dataType match {
case ByteType | ShortType | IntegerType | DateType => true
case _ => false
}
private def genCodeWithSet(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
val setTerm = ctx.addReferenceObj("set", set)
val setIsNull = if (hasNull) {
s"${ev.isNull} = !${ev.value};"
} else {
""
}
s"""
|${ev.value} = $setTerm.contains($c);
|$setIsNull
""".stripMargin
})
}
// spark.sql.optimizer.inSetSwitchThreshold has an appropriate upper limit,
// so the code size should not exceed 64KB
private def genCodeWithSwitch(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val caseValuesGen = hset.filter(_ != null).map(Literal(_).genCode(ctx))
val valueGen = child.genCode(ctx)
val caseBranches = caseValuesGen.map(literal =>
code"""
case ${literal.value}:
${ev.value} = true;
break;
""")
val switchCode = if (caseBranches.size > 0) {
code"""
switch (${valueGen.value}) {
${caseBranches.mkString("\\n")}
default:
${ev.isNull} = $hasNull;
}
"""
} else {
s"${ev.isNull} = $hasNull;"
}
ev.copy(code =
code"""
${valueGen.code}
${CodeGenerator.JAVA_BOOLEAN} ${ev.isNull} = ${valueGen.isNull};
${CodeGenerator.JAVA_BOOLEAN} ${ev.value} = false;
if (!${valueGen.isNull}) {
$switchCode
}
""")
}
override def sql: String = {
val valueSQL = child.sql
val listSQL = hset.toSeq
.map(elem => Literal(elem, child.dataType).sql)
.mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical AND.")
case class And(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "&&"
override def sqlOperator: String = "AND"
// +---------+---------+---------+---------+
// | AND | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | FALSE | UNKNOWN |
// | FALSE | FALSE | FALSE | FALSE |
// | UNKNOWN | UNKNOWN | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == false) {
false
} else {
val input2 = right.eval(input)
if (input2 == false) {
false
} else {
if (input1 != null && input2 != null) {
true
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `false`, if any of them is `false` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.value} = false;
if (${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = FalseLiteral)
} else {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = false;
if (!${eval1.isNull} && !${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && !${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = true;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical OR.")
case class Or(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "||"
override def sqlOperator: String = "OR"
// +---------+---------+---------+---------+
// | OR | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | TRUE | TRUE |
// | FALSE | TRUE | FALSE | UNKNOWN |
// | UNKNOWN | TRUE | UNKNOWN | UNKNOWN |
// +---------+---------+---------+---------+
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == true) {
true
} else {
val input2 = right.eval(input)
if (input2 == true) {
true
} else {
if (input1 != null && input2 != null) {
false
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `true`, if any of them is `true` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.isNull = FalseLiteral
ev.copy(code = code"""
${eval1.code}
boolean ${ev.value} = true;
if (!${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = FalseLiteral)
} else {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = true;
if (!${eval1.isNull} && ${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && ${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = false;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
abstract class BinaryComparison extends BinaryOperator with Predicate {
// Note that we need to give a superset of allowable input types since orderable types are not
// finitely enumerable. The allowable types are checked below by checkInputDataTypes.
override def inputType: AbstractDataType = AnyDataType
override def checkInputDataTypes(): TypeCheckResult = super.checkInputDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
TypeUtils.checkForOrderingExpr(left.dataType, this.getClass.getSimpleName)
case failure => failure
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (CodeGenerator.isPrimitiveType(left.dataType)
&& left.dataType != BooleanType // java boolean doesn't support > or < operator
&& left.dataType != FloatType
&& left.dataType != DoubleType) {
// faster version
defineCodeGen(ctx, ev, (c1, c2) => s"$c1 $symbol $c2")
} else {
defineCodeGen(ctx, ev, (c1, c2) => s"${ctx.genComp(left.dataType, c1, c2)} $symbol 0")
}
}
protected lazy val ordering: Ordering[Any] = TypeUtils.getInterpretedOrdering(left.dataType)
}
object BinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = Some((e.left, e.right))
}
/** An extractor that matches both standard 3VL equality and null-safe equality. */
object Equality {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case EqualTo(l, r) => Some((l, r))
case EqualNullSafe(l, r) => Some((l, r))
case _ => None
}
}
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` equals `expr2`, or false otherwise.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be used in equality comparison. Map type is not supported.
For complex types such array/struct, the data types of fields must be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1 _FUNC_ '1';
true
> SELECT true _FUNC_ NULL;
NULL
> SELECT NULL _FUNC_ NULL;
NULL
""")
case class EqualTo(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "="
// +---------+---------+---------+---------+
// | = | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | FALSE | UNKNOWN |
// | FALSE | FALSE | TRUE | UNKNOWN |
// | UNKNOWN | UNKNOWN | UNKNOWN | UNKNOWN |
// +---------+---------+---------+---------+
protected override def nullSafeEval(left: Any, right: Any): Any = ordering.equiv(left, right)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => ctx.genEqual(left.dataType, c1, c2))
}
}
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison
@ExpressionDescription(
usage = """
expr1 _FUNC_ expr2 - Returns same result as the EQUAL(=) operator for non-null operands,
but returns true if both are null, false if one of the them is null.
""",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be used in equality comparison. Map type is not supported.
For complex types such array/struct, the data types of fields must be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1 _FUNC_ '1';
true
> SELECT true _FUNC_ NULL;
false
> SELECT NULL _FUNC_ NULL;
true
""")
case class EqualNullSafe(left: Expression, right: Expression) extends BinaryComparison {
override def symbol: String = "<=>"
override def nullable: Boolean = false
// +---------+---------+---------+---------+
// | <=> | TRUE | FALSE | UNKNOWN |
// +---------+---------+---------+---------+
// | TRUE | TRUE | FALSE | FALSE |
// | FALSE | FALSE | TRUE | FALSE |
// | UNKNOWN | FALSE | FALSE | TRUE |
// +---------+---------+---------+---------+
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
val input2 = right.eval(input)
if (input1 == null && input2 == null) {
true
} else if (input1 == null || input2 == null) {
false
} else {
ordering.equiv(input1, input2)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
val equalCode = ctx.genEqual(left.dataType, eval1.value, eval2.value)
ev.copy(code = eval1.code + eval2.code + code"""
boolean ${ev.value} = (${eval1.isNull} && ${eval2.isNull}) ||
(!${eval1.isNull} && !${eval2.isNull} && $equalCode);""", isNull = FalseLiteral)
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 1 _FUNC_ 2;
true
> SELECT 1.1 _FUNC_ '1';
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
true
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class LessThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "<"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than or equal to `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1.0 _FUNC_ '1';
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
true
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class LessThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "<="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lteq(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 1;
true
> SELECT 2 _FUNC_ '1.1';
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
false
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class GreaterThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = ">"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than or equal to `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 1;
true
> SELECT 2.0 _FUNC_ '2.1';
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
false
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class GreaterThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = ">="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gteq(input1, input2)
}
/**
* IS UNKNOWN and IS NOT UNKNOWN are the same as IS NULL and IS NOT NULL, respectively,
* except that the input expression must be of a boolean type.
*/
object IsUnknown {
def apply(child: Expression): Predicate = {
new IsNull(child) with ExpectsInputTypes {
override def inputTypes: Seq[DataType] = Seq(BooleanType)
override def sql: String = s"(${child.sql} IS UNKNOWN)"
}
}
}
object IsNotUnknown {
def apply(child: Expression): Predicate = {
new IsNotNull(child) with ExpectsInputTypes {
override def inputTypes: Seq[DataType] = Seq(BooleanType)
override def sql: String = s"(${child.sql} IS NOT UNKNOWN)"
}
}
}
| ConeyLiu/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala | Scala | apache-2.0 | 36,607 |
package io.cumulus.controllers
import io.cumulus.models.user.User
import io.cumulus.models.user.session.{AuthenticationToken, UserSession}
import io.cumulus.services.SessionService
import io.cumulus.validation.AppError
import play.api.i18n.{I18nSupport, Lang, Messages}
import play.api.libs.json.Format
import play.api.mvc.{ActionFilter, Request, RequestHeader, Result}
import scala.concurrent.{ExecutionContext, Future}
/**
* Authentication for Cumulus controllers, using the `SessionService` to load users.
*/
trait UserAuthenticationSupport extends AuthenticationSupport[AuthenticationToken, UserSession] with I18nSupport { self =>
def sessionService: SessionService
/** Format of the session. */
implicit val format: Format[AuthenticationToken] =
AuthenticationToken.format
/** Retrieve authentication from the sessions */
def retrieveAuthentication(
request: Request[_],
token: AuthenticationToken
): Future[Either[AppError, UserSession]] =
sessionService.findValidSession(request.remoteAddress, token)
/** Generate a session from the authentication. */
def generateSession(session: UserSession): AuthenticationToken =
AuthenticationToken.create(session)
/** Override i18n to find the preferred language by the user's specification rather than the request locale. */
override implicit def request2Messages(implicit request: RequestHeader): Messages =
request match {
case authenticatedRequest: AuthenticatedRequest[_] =>
messagesApi.preferred(Seq(Lang(authenticatedRequest.authenticatedSession.lang)))
case otherRequest =>
// Fallback if not an authenticated request
super.request2Messages(otherRequest)
}
/** Filter for admin-only actions. */
val WithAdmin: ActionFilter[AuthenticatedRequest] =
new ActionFilter[AuthenticatedRequest] {
override protected def filter[A](request: AuthenticatedRequest[A]): Future[Option[Result]] = Future.successful {
if (!request.authenticatedSession.user.isAdmin)
Some(AppError.unauthorized.toResult(request))
else
None
}
override protected def executionContext: ExecutionContext =
ec
}
/**
* Implicit converter from an authenticated request to user.
*/
implicit def authenticatedRequestToUser(implicit request: AuthenticatedRequest[_]): User =
request.authenticatedSession.user
}
| Cumulus-Cloud/cumulus | server/cumulus-core/src/main/scala/io/cumulus/controllers/UserAuthenticationSupport.scala | Scala | mit | 2,412 |
/*
* Copyright (c) 2016. Fengguo (Hugo) Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.cit.intellij.jawa.lang.psi.stubs.impl
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubElement}
import com.intellij.util.io.StringRef
import org.argus.cit.intellij.jawa.lang.psi.JawaExtendsAndImplementsClause
import org.argus.cit.intellij.jawa.lang.psi.stubs.JawaExtendsAndImplementsClauseStub
import org.argus.jawa.core.JawaType
/**
* @author <a href="mailto:[email protected]">Fengguo Wei</a>
*/
class JawaExtendsAndImplementsClausesStubImpl [ParentPsi <: PsiElement](parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement])
extends StubBaseWrapper[JawaExtendsAndImplementsClause](parent, elemType) with JawaExtendsAndImplementsClauseStub {
private var extType: StringRef = _
private var impTypes: Array[StringRef] = _
def this(parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
extType: String,
impTypes: Array[String]) = {
this(parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
this.extType = StringRef.fromString(extType)
this.impTypes = impTypes map StringRef.fromString
}
def this(parent: StubElement[ParentPsi],
elemType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
extType: StringRef,
impTypes: Array[StringRef]) = {
this(parent, elemType.asInstanceOf[IStubElementType[StubElement[PsiElement], PsiElement]])
this.extType = extType
this.impTypes = impTypes
}
override def getExtendType: JawaType = {
if(this.extType != null) new JawaType(StringRef.toString(this.extType))
else null
}
override def getInterfaceTypes: Array[JawaType] = {
this.impTypes.map(t => new JawaType(StringRef.toString(t)))
}
}
| arguslab/argus-cit-intellij | src/main/scala/org/argus/cit/intellij/jawa/lang/psi/stubs/impl/JawaExtendsAndImplementsClausesStubImpl.scala | Scala | epl-1.0 | 2,320 |
package net.lshift.diffa.participants.web
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{RequestBody, RequestMethod, RequestMapping}
/**
* Simple controller that receives and dumps reports.
*/
@Controller
@RequestMapping(Array("/reports"))
class ReportsListenerController {
@RequestMapping(value=Array("display"), method=Array(RequestMethod.POST))
def display(@RequestBody body:String) = {
println("Got report: ")
println(body)
"ok"
}
} | aprescott/diffa | participants-web/src/main/scala/net/lshift/diffa/participants/web/ReportsListenerController.scala | Scala | apache-2.0 | 509 |
/*
* Copyright (C) 2016 Department for Business, Energy and Industrial Strategy
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package rifs.business.controllers
import javax.inject.Inject
import play.api.Logger
import play.api.libs.json.Json
import play.api.mvc.{Action, Controller}
import rifs.business.actions.OpportunityAction
import rifs.business.data.OpportunityOps
import rifs.business.models.OpportunityId
import rifs.business.notifications.NotificationService
import rifs.business.restmodels.OpportunitySummary
import scala.concurrent.{ExecutionContext, Future}
class OpportunityController @Inject()(opportunities: OpportunityOps,
OpportunityAction: OpportunityAction,
notifications: NotificationService)
(implicit val ec: ExecutionContext)
extends Controller with ControllerUtils {
def byId(id: OpportunityId) = OpportunityAction(id)(request => Ok(Json.toJson(request.opportunity)))
def getSummaries = Action.async(opportunities.summaries.map(os => Ok(Json.toJson(os))))
def getOpenSummaries = Action.async(opportunities.openSummaries.map(os => Ok(Json.toJson(os))))
def getOpen = Action.async(opportunities.findOpen.map(os => Ok(Json.toJson(os))))
def updateSummary(id: OpportunityId) = Action.async(parse.json[OpportunitySummary]) { implicit request =>
val summary = request.body
if (summary.id != id) Future.successful(BadRequest(s"id provided on url was ${id.id}, but does not match id of body: ${summary.id.id}"))
else opportunities.updateSummary(request.body).map(_ => NoContent)
}
def publish(id: OpportunityId) = OpportunityAction(id).async { implicit request =>
import rifs.business.Config.config.rifs.{email => emailConfig}
request.opportunity.publishedAt match {
case None => opportunities.publish(id).flatMap {
case Some(d) =>
val mgrMail = emailConfig.dummymanager
notifications.notifyManager(id, emailConfig.replyto, mgrMail).
map { em =>
if (em.isEmpty) Logger.warn("Failed to find the published opportunity")
}.recover {
case t =>
Logger.error(s"Failed to send email to $mgrMail on an opportunity publishing", t)
None
}.map { _ => Ok(Json.toJson(d)) }
case None => Future.successful(NotFound)
}
case Some(_) => Future.successful(BadRequest(s"Opportunity with id ${id.id} has already been published"))
}
}
def duplicate(id: OpportunityId) = Action.async(opportunities.duplicate(id).map(jsonResult(_)))
def saveDescription(id: OpportunityId, sectionNum: Int) = Action.async(parse.json[String]) { implicit request =>
val description = request.body.trim match {
case "" => None
case s => Some(s)
}
opportunities.saveSectionDescription(id, sectionNum, description).map {
case 0 => NotFound
case _ => NoContent
}
}
}
| UKGovernmentBEIS/rifs-business | src/main/scala/rifs/business/controllers/OpportunityController.scala | Scala | gpl-3.0 | 3,603 |
package collins.solr
import java.util.Date
import collins.models.Asset
import collins.models.AssetMeta.ValueType.Boolean
import collins.models.AssetMeta.ValueType.Double
import collins.models.AssetMeta.ValueType.Integer
import collins.models.AssetMeta.ValueType.String
import collins.models.AssetMetaValue
import collins.models.IpAddresses
import collins.models.IpmiInfo
import collins.models.MetaWrapper
import collins.models.Truthy
import collins.solr.UpperCaseString.UppercaseString2String
import collins.solr.UpperCaseString.string2UpperCaseString
import collins.util.views.Formatter
import collins.solr.Solr.AssetSolrDocument
import collins.solr.SolrKeyFlag._
/**
* asset meta values are all converted into strings with the meta name as the
* solr key, using group_id to group values in to multi-valued keys
*/
object AssetSerializer extends SolrSerializer[Asset](AssetDocType) {
val generatedFields = SolrKey("NUM_DISKS", Integer, Dynamic, SingleValued, Sortable) :: SolrKey("KEYS", String, Dynamic, MultiValued, NotSortable) :: Nil
val res = AssetDocType.keyResolver
def getFields(asset: Asset, indexTime: Date) = postProcess {
val opt = Map[SolrKey, Option[SolrValue]](
res("UPDATED").get -> asset.updated.map{t => SolrStringValue(Formatter.solrDateFormat(t), StrictUnquoted)},
res("DELETED").get -> asset.deleted.map{t => SolrStringValue(Formatter.solrDateFormat(t), StrictUnquoted)},
res("STATE").get -> asset.getState.map{s => SolrStringValue(s.name, StrictUnquoted)},
res("IP_ADDRESS").get -> {
val a = IpAddresses.findAllByAsset(asset, false)
if (a.size > 0) {
val addresses = SolrMultiValue(MultiSet.fromSeq(a.map{a => SolrStringValue(a.dottedAddress, StrictUnquoted)}))
Some(addresses)
} else {
None
}
}
).collect{case(k, Some(v)) => (k,v)}
val ipmi: AssetSolrDocument = IpmiInfo.findByAsset(asset).map{ipmi => Map(
res(IpmiInfo.Enum.IpmiAddress.toString).get -> SolrStringValue(ipmi.dottedAddress, StrictUnquoted)
)}.getOrElse(Map())
opt ++ ipmi ++ Map[SolrKey, SolrValue](
res("ID").get -> SolrIntValue(asset.id.toInt),
res("TAG").get -> SolrStringValue(asset.tag, StrictUnquoted),
res("STATUS").get -> SolrStringValue(asset.getStatus.name, StrictUnquoted),
res("TYPE").get -> SolrStringValue(asset.getType.name, StrictUnquoted),
res("CREATED").get -> SolrStringValue(Formatter.solrDateFormat(asset.created), StrictUnquoted)
) ++ serializeMetaValues(AssetMetaValue.findByAsset(asset, false))
}
def getUUID(asset: Asset) = asset.id
//FIXME: The parsing logic here is duplicated in AssetMeta.validateValue
def serializeMetaValues(values: Seq[MetaWrapper]): AssetSolrDocument = {
def process(build: AssetSolrDocument, remain: Seq[MetaWrapper]): AssetSolrDocument = remain match {
case head :: tail => {
val newval = head.getValueType() match {
case Boolean => SolrBooleanValue((new Truthy(head.getValue())).isTruthy)
case Integer => SolrIntValue(java.lang.Integer.parseInt(head.getValue()))
case Double => SolrDoubleValue(java.lang.Double.parseDouble(head.getValue()))
case _ => SolrStringValue(head.getValue(), StrictUnquoted)
}
val solrKey = res(head.getName()).get
val mergedval = build.get(solrKey) match {
case Some(exist) => exist match {
case s: SolrSingleValue => SolrMultiValue(MultiSet(s, newval), newval.valueType)
case m: SolrMultiValue => m + newval
}
case None => newval
}
process(build + (solrKey -> mergedval), tail)
}
case _ => build
}
process(Map(), values)
}
def postProcess(doc: AssetSolrDocument): AssetSolrDocument = {
val disks:Option[Tuple2[SolrKey, SolrValue]] = doc.find{case (k,v) => k.name == "DISK_SIZE_BYTES"}.map{case (k,v) => (res("NUM_DISKS").get -> SolrIntValue(v match {
case s:SolrSingleValue => 1
case SolrMultiValue(vals, _) => vals.size.toInt
}))}
val newFields = List(disks).flatten.toMap
val almostDone = doc ++ newFields
val keyList = SolrMultiValue(MultiSet.fromSeq(almostDone.map{case (k,v) => SolrStringValue(k.name, StrictUnquoted)}.toSeq), String)
//val sortKeys = almostDone.map{case(k,v) => k.sortify(v)}.flatten
almostDone + (res("KEYS").get -> keyList)
}
}
| funzoneq/collins | app/collins/solr/asset/AssetSerializer.scala | Scala | apache-2.0 | 4,433 |
/* sbt -- Simple Build Tool
* Copyright 2010, 2011 Mark Harrah
*/
package object sbt extends sbt.std.TaskExtra with sbt.Types with sbt.ProcessExtra with sbt.impl.DependencyBuilders
with sbt.PathExtra with sbt.ProjectExtra with sbt.DependencyFilterExtra with sbt.BuildExtra
{
@deprecated("Use SettingKey, which is a drop-in replacement.", "0.11.1")
type ScopedSetting[T] = SettingKey[T]
@deprecated("Use TaskKey, which is a drop-in replacement.", "0.11.1")
type ScopedTask[T] = TaskKey[T]
@deprecated("Use InputKey, which is a drop-in replacement.", "0.11.1")
type ScopedInput[T] = InputKey[T]
type Setting[T] = Project.Setting[T]
type ScopedKey[T] = Project.ScopedKey[T]
type SettingsDefinition = Project.SettingsDefinition
type File = java.io.File
type URI = java.net.URI
type URL = java.net.URL
implicit def maybeToOption[S](m: xsbti.Maybe[S]): Option[S] =
if(m.isDefined) Some(m.get) else None
def uri(s: String): URI = new URI(s)
def file(s: String): File = new File(s)
def url(s: String): URL = new URL(s)
final val ThisScope = Scope.ThisScope
final val GlobalScope = Scope.GlobalScope
import sbt.{Configurations => C}
final val Compile = C.Compile
final val Test = C.Test
final val Runtime = C.Runtime
final val IntegrationTest = C.IntegrationTest
final val Default = C.Default
final val Docs = C.Docs
final val Sources = C.Sources
final val Provided = C.Provided
// java.lang.System is more important, so don't alias this one
// final val System = C.System
final val Optional = C.Optional
def config(s: String): Configuration = Configurations.config(s)
}
| kuochaoyi/xsbt | sbt/package.scala | Scala | bsd-3-clause | 1,602 |
package controllers.backend
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.language.higherKinds
import slick.lifted.RunnableCompiled
import com.overviewdocs.database.Database
import models.pagination.{Page,PageInfo,PageRequest}
trait DbBackend {
protected val database: Database
import database.api._
/** Returns a Page[T] based on an item query, uncompiled.
*
* This is the only way to handle `WHERE ... IN (...)` queries. It takes
* more CPU than `RunnableCompiled` queries.
*/
def page[E, U, C[_]](itemsQ: Query[E, U, C], countQ: Rep[Int], pageRequest: PageRequest): Future[Page[U]] = {
// Sequential, so Postgres can benefit from a hot cache on the second query
val action = for {
items <- itemsQ.to[Vector].result
count <- countQ.result
} yield Page(items, PageInfo(pageRequest, count))
database.run(action)
}
/** Returns a Page[T] based on item and count queries.
*
* The itemsQ is compiled, so it must already include
* `.drop(offset).take(limit)`. That makes `pageRequest` seems a redundant,
* but let's not lose sleep over it.
*/
def page[T](itemsQ: RunnableCompiled[_, Seq[T]], countQ: RunnableCompiled[_, Int], pageRequest: PageRequest): Future[Page[T]] = {
// Sequential, so Postgres can benefit from a hot cache on the second query
val action = for {
items <- itemsQ.result
count <- countQ.result
} yield Page(items.toVector, PageInfo(pageRequest, count))
database.run(action)
}
def emptyPage[T](pageRequest: PageRequest) = Future.successful(Page(Vector[T](), PageInfo(pageRequest, 0)))
}
| overview/overview-server | web/app/controllers/backend/DbBackend.scala | Scala | agpl-3.0 | 1,676 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.cluster.sdv.generated
import java.util.TimeZone
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers._
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider.TIMESERIES
import org.apache.carbondata.core.util.CarbonProperties
class TimeSeriesPreAggregateTestCase extends QueryTest with BeforeAndAfterAll {
val timeSeries = TIMESERIES.toString
val timeZonePre = TimeZone.getDefault
val csvPath = s"$resourcesPath/Data/timeseriestest.csv"
override def beforeAll: Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
TimeZone.setDefault(TimeZone.getTimeZone(System.getProperty("user.timezone")))
sql("drop table if exists mainTable")
sql(
"CREATE TABLE mainTable(mytime timestamp, name string, age int) STORED BY 'org.apache" +
".carbondata.format'")
sql(
s"""
| CREATE DATAMAP agg0_second ON TABLE mainTable
| USING '$timeSeries'
| DMPROPERTIES (
| 'EVENT_TIME'='mytime',
| 'SECOND_GRANULARITY'='1')
| AS SELECT mytime, SUM(age) FROM mainTable
| GROUP BY mytime
""".stripMargin)
sql(
s"""
| CREATE DATAMAP agg0_minute ON TABLE mainTable
| USING '$timeSeries'
| DMPROPERTIES (
| 'EVENT_TIME'='mytime',
| 'minute_granularity'='1')
| AS SELECT mytime, SUM(age) FROM mainTable
| GROUP BY mytime
""".stripMargin)
sql(
s"""
| CREATE DATAMAP agg0_hour ON TABLE mainTable
| USING '$timeSeries'
| DMPROPERTIES (
| 'EVENT_TIME'='mytime',
| 'HOUR_GRANULARITY'='1')
| AS SELECT mytime, SUM(age) FROM mainTable
| GROUP BY mytime
""".stripMargin)
sql(
s"""
| CREATE DATAMAP agg0_day ON TABLE mainTable
| USING '$timeSeries'
| DMPROPERTIES (
| 'EVENT_TIME'='mytime',
| 'DAY_GRANULARITY'='1')
| AS SELECT mytime, SUM(age) FROM mainTable
| GROUP BY mytime
""".stripMargin)
sql(
s"""
| CREATE DATAMAP agg0_month ON TABLE mainTable
| USING '$timeSeries'
| DMPROPERTIES (
| 'EVENT_TIME'='mytime',
| 'MONTH_GRANULARITY'='1')
| AS SELECT mytime, SUM(age) FROM mainTable
| GROUP BY mytime
""".stripMargin)
sql(
s"""
| CREATE DATAMAP agg0_year ON TABLE mainTable
| USING '$timeSeries'
| DMPROPERTIES (
| 'EVENT_TIME'='mytime',
| 'year_granularity'='1')
| AS SELECT mytime, SUM(age) FROM mainTable
| GROUP BY mytime
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$csvPath' into table mainTable")
}
test("TimeSeriesPreAggregateTestCase_001") {
val expected = sql("select cast(date_format(mytime, 'YYYY') as timestamp) as mytime,sum(age) " +
"from mainTable group by date_format(mytime , 'YYYY')")
val actual = sql("select * from maintable_agg0_year")
checkAnswer(actual, expected)
}
test("TimeSeriesPreAggregateTestCase_002") {
val expected = sql(
"select cast(date_format(mytime, 'YYYY-MM') as timestamp) as mytime,sum(age) " +
"from mainTable group by date_format(mytime , 'YYYY-MM')")
val actual = sql("select * from maintable_agg0_month")
checkAnswer(actual, expected)
}
test("TimeSeriesPreAggregateTestCase_003") {
val expected = sql(
"select cast(date_format(mytime, 'YYYY-MM-dd') as timestamp) as mytime,sum(age) " +
"from mainTable group by date_format(mytime , 'YYYY-MM-dd')")
val actual = sql("select * from maintable_agg0_day")
checkAnswer(actual, expected)
}
test("TimeSeriesPreAggregateTestCase_004") {
val expected = sql(
"select cast(date_format(mytime, 'YYYY-MM-dd HH') as timestamp) as mytime,sum(age) " +
"from mainTable group by date_format(mytime , 'YYYY-MM-dd HH')")
val actual = sql("select * from maintable_agg0_hour")
checkAnswer(actual, expected)
}
test("TimeSeriesPreAggregateTestCase_005") {
val expected = sql(
"select cast(date_format(mytime, 'YYYY-MM-dd HH:mm') as timestamp) as mytime,sum(age) " +
"from mainTable group by date_format(mytime , 'YYYY-MM-dd HH:mm')")
val actual = sql("select * from maintable_agg0_minute")
checkAnswer(actual, expected)
}
test("TimeSeriesPreAggregateTestCase_006") {
val expected = sql(
"select cast(date_format(mytime, 'YYYY-MM-dd HH:mm:ss') as timestamp) as mytime,sum(age) " +
"from mainTable group by date_format(mytime , 'YYYY-MM-dd HH:mm:ss')")
val actual = sql("select * from maintable_agg0_second")
checkAnswer(actual, expected)
}
//test case for compaction
test("TimeSeriesPreAggregateTestCase_007") {
sql(s"LOAD DATA LOCAL INPATH '$csvPath' into table mainTable")
sql(s"LOAD DATA LOCAL INPATH '$csvPath' into table mainTable")
sql(s"LOAD DATA LOCAL INPATH '$csvPath' into table mainTable")
sql("alter table maintable compact 'minor'")
val segmentNamesSecond = sql("show segments for table maintable_agg0_second").collect()
.map(_.get(0).toString)
segmentNamesSecond should equal(Array("3", "2", "1", "0.1", "0"))
val segmentNamesMinute = sql("show segments for table maintable_agg0_minute").collect()
.map(_.get(0).toString)
segmentNamesMinute should equal(Array("3", "2", "1", "0.1", "0"))
val segmentNamesHour = sql("show segments for table maintable_agg0_hour").collect()
.map(_.get(0).toString)
segmentNamesHour should equal(Array("3", "2", "1", "0.1", "0"))
val segmentNamesday = sql("show segments for table maintable_agg0_day").collect()
.map(_.get(0).toString)
segmentNamesday should equal(Array("3", "2", "1", "0.1", "0"))
val segmentNamesmonth = sql("show segments for table maintable_agg0_month").collect()
.map(_.get(0).toString)
segmentNamesmonth should equal(Array("3", "2", "1", "0.1", "0"))
val segmentNamesyear = sql("show segments for table maintable_agg0_year").collect()
.map(_.get(0).toString)
segmentNamesyear should equal(Array("3", "2", "1", "0.1", "0"))
}
override def afterAll: Unit = {
TimeZone.setDefault(timeZonePre)
sql("drop table if exists mainTable")
}
}
| sgururajshetty/carbondata | integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/TimeSeriesPreAggregateTestCase.scala | Scala | apache-2.0 | 7,374 |
package io.buoyant.linkerd.protocol
import com.twitter.conversions.storage._
import com.twitter.conversions.time._
import com.twitter.finagle.{Service, ServiceFactory, Stack, param}
import com.twitter.finagle.http.{param => hparam}
import com.twitter.finagle.http.{Request, Response, Status, Version}
import com.twitter.finagle.service.{Retries, RetryBudget}
import com.twitter.finagle.stack.nilStack
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.io.Reader
import com.twitter.util.{Future, MockTimer, Promise, Time}
import io.buoyant.linkerd.protocol.http.ResponseClassifiers
import io.buoyant.test.Awaits
import org.scalatest.FunSuite
import org.scalatest.concurrent.Eventually
import scala.language.reflectiveCalls
class HttpInitializerTest extends FunSuite with Awaits with Eventually {
test("path stack: services are not closed until streams are complete") {
// Build a path stack that is controllable with promises. This is
// only really useful for a single request.
val serviceP, responseP, bodyP, respondingP, closedP = Promise[Unit]
val http = new HttpInitializer {
val svc = new Service[Request, Response] {
def apply(req: Request) = {
val rw = Reader.writable()
val rsp = Response(req.version, Status.Ok, rw)
val _ = bodyP.before(rw.close())
respondingP.setDone()
responseP.before(Future.value(rsp))
}
override def close(d: Time) = {
closedP.setDone()
Future.Unit
}
}
val sf = ServiceFactory { () => serviceP.before(Future.value(svc)) }
def make(params: Stack.Params = Stack.Params.empty) =
(defaultRouter.pathStack ++ Stack.Leaf(Stack.Role("leaf"), sf)).make(params)
}
// The factory is returned immediately because it is wrapped in a
// FactoryToService.
val factory = http.make()
val svcf = factory()
assert(svcf.isDefined)
val svc = await(svcf)
// When a request is processed, first the service must be acquired
// from the service factory, and then the response must be
// returned from the service.
val rspf = svc(Request())
assert(!rspf.isDefined)
assert(!respondingP.isDefined)
serviceP.setDone()
eventually { assert(respondingP.isDefined) }
assert(!rspf.isDefined)
responseP.setDone()
eventually { assert(rspf.isDefined) }
// Once the response is returned, FactoryToService tries to close
// the service factory. Ensure that the service is not closed
// until the response body is completely sent.
val rsp = await(rspf)
assert(rsp.isChunked)
assert(!closedP.isDefined)
// When the response body is written, it must be fully read from
// response before the service will be closed.
bodyP.setDone()
assert(!closedP.isDefined)
assert(await(rsp.reader.read(1)) == None)
eventually { assert(closedP.isDefined) }
}
test("path stack: retries") {
@volatile var requests = 0
val http = new HttpInitializer {
val sf = ServiceFactory.const(Service.mk[Request, Response] { req =>
requests += 1
Future.value(Response(req.version, Status.InternalServerError))
})
def make(params: Stack.Params = Stack.Params.empty) =
(defaultRouter.pathStack ++ Stack.Leaf(Stack.Role("leaf"), sf)).make(params)
}
val budget = RetryBudget(10.seconds, 0, 0.5)
val params = Stack.Params.empty +
param.ResponseClassifier(ResponseClassifiers.RetryableReadFailures) +
Retries.Budget(budget)
val factory = http.make(params)
val service = await(factory())
// First request just returns, since retry budget hasn't yet accrued.
val response0 = await(service(Request()))
assert(requests == 1)
// The second request is retryable because of the 50% retry
// budget.
val response1 = await(service(Request()))
assert(requests == 3)
}
class WildErr extends Exception
test("path stack: error handling") {
@volatile var requests = 0
val http = new HttpInitializer {
val sf = ServiceFactory.const(Service.mk[Request, Response] { req =>
requests += 1
Future.exception(new WildErr)
})
def make(params: Stack.Params = Stack.Params.empty) =
(defaultRouter.pathStack ++ Stack.Leaf(Stack.Role("leaf"), sf)).make(params)
}
val stats = new InMemoryStatsReceiver
val factory = http.make(Stack.Params.empty + param.Stats(stats))
val service = await(factory())
val response = await(service(Request()))
assert(requests == 1)
assert(response.status == Status.BadGateway)
assert(response.headerMap.contains("l5d-err"))
val counter = Seq("failures", "io.buoyant.linkerd.protocol.HttpInitializerTest$WildErr")
assert(stats.counters.get(counter) == Some(1))
}
test("server has codec parameters from router") {
val maxChunkSize = hparam.MaxChunkSize(10.kilobytes)
val maxHeaderSize = hparam.MaxHeaderSize(20.kilobytes)
val maxInitLineSize = hparam.MaxInitialLineSize(30.kilobytes)
val maxReqSize = hparam.MaxRequestSize(40.kilobytes)
val maxRspSize = hparam.MaxResponseSize(50.kilobytes)
val streaming = hparam.Streaming(false)
val compression = hparam.CompressionLevel(3)
val router = HttpInitializer.router
.configured(maxChunkSize).configured(maxHeaderSize).configured(maxInitLineSize)
.configured(maxReqSize).configured(maxRspSize)
.configured(streaming).configured(compression)
.serving(HttpServerConfig(None, None).mk(HttpInitializer, "yolo"))
.initialize()
assert(router.servers.size == 1)
val sparams = router.servers.head.params
assert(sparams[hparam.MaxChunkSize] == maxChunkSize)
assert(sparams[hparam.MaxHeaderSize] == maxHeaderSize)
assert(sparams[hparam.MaxInitialLineSize] == maxInitLineSize)
assert(sparams[hparam.MaxRequestSize] == maxReqSize)
assert(sparams[hparam.MaxResponseSize] == maxRspSize)
assert(sparams[hparam.Streaming] == streaming)
assert(sparams[hparam.CompressionLevel] == compression)
}
}
| hhtpcd/linkerd | linkerd/protocol/http/src/test/scala/io/buoyant/linkerd/protocol/HttpInitializerTest.scala | Scala | apache-2.0 | 6,113 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import akka.actor.{ActorRef, Cancellable, ActorPath}
import kafka.manager.features.KMJMXMetricsFeature
import kafka.manager.utils.FiniteQueue
import org.joda.time.DateTime
import scala.collection.immutable.Queue
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Try
/**
* @author hiral
*/
import ActorModel._
case class BrokerViewCacheActorConfig(kafkaStateActorPath: ActorPath,
clusterContext: ClusterContext,
longRunningPoolConfig: LongRunningPoolConfig,
updatePeriod: FiniteDuration = 10 seconds)
class BrokerViewCacheActor(config: BrokerViewCacheActorConfig) extends LongRunningPoolActor {
private[this] val ZERO = BigDecimal(0)
private[this] var cancellable : Option[Cancellable] = None
private[this] var topicIdentities : Map[String, TopicIdentity] = Map.empty
private[this] var previousTopicDescriptionsOption : Option[TopicDescriptions] = None
private[this] var topicDescriptionsOption : Option[TopicDescriptions] = None
private[this] var topicConsumerMap : Map[String, Iterable[String]] = Map.empty
private[this] var consumerIdentities : Map[String, ConsumerIdentity] = Map.empty
private[this] var consumerDescriptionsOption : Option[ConsumerDescriptions] = None
private[this] var brokerListOption : Option[BrokerList] = None
private[this] var brokerMetrics : Map[Int, BrokerMetrics] = Map.empty
private[this] val brokerTopicPartitions : mutable.Map[Int, BVView] = new mutable.HashMap[Int, BVView]
private[this] val topicMetrics: mutable.Map[String, mutable.Map[Int, BrokerMetrics]] =
new mutable.HashMap[String, mutable.Map[Int, BrokerMetrics]]()
private[this] var combinedBrokerMetric : Option[BrokerMetrics] = None
private[this] val EMPTY_BVVIEW = BVView(Map.empty, config.clusterContext, Option(BrokerMetrics.DEFAULT))
private[this] var brokerMessagesPerSecCountHistory : Map[Int, Queue[BrokerMessagesPerSecCount]] = Map.empty
override def preStart() = {
log.info("Started actor %s".format(self.path))
log.info("Scheduling updater for %s".format(config.updatePeriod))
cancellable = Some(
context.system.scheduler.schedule(0 seconds,
config.updatePeriod,
self,
BVForceUpdate)(context.system.dispatcher,self)
)
}
@scala.throws[Exception](classOf[Exception])
override def postStop(): Unit = {
log.info("Stopped actor %s".format(self.path))
log.info("Cancelling updater...")
Try(cancellable.map(_.cancel()))
super.postStop()
}
override protected def longRunningPoolConfig: LongRunningPoolConfig = config.longRunningPoolConfig
override protected def longRunningQueueFull(): Unit = {
log.error("Long running pool queue full, skipping!")
}
private def produceBViewWithBrokerClusterState(bv: BVView, id: Int) : BVView = {
val bcs = for {
metrics <- bv.metrics
cbm <- combinedBrokerMetric
} yield {
val perMessages = if(cbm.messagesInPerSec.oneMinuteRate > 0) {
BigDecimal(metrics.messagesInPerSec.oneMinuteRate / cbm.messagesInPerSec.oneMinuteRate * 100D).setScale(3, BigDecimal.RoundingMode.HALF_UP)
} else ZERO
val perIncoming = if(cbm.bytesInPerSec.oneMinuteRate > 0) {
BigDecimal(metrics.bytesInPerSec.oneMinuteRate / cbm.bytesInPerSec.oneMinuteRate * 100D).setScale(3, BigDecimal.RoundingMode.HALF_UP)
} else ZERO
val perOutgoing = if(cbm.bytesOutPerSec.oneMinuteRate > 0) {
BigDecimal(metrics.bytesOutPerSec.oneMinuteRate / cbm.bytesOutPerSec.oneMinuteRate * 100D).setScale(3, BigDecimal.RoundingMode.HALF_UP)
} else ZERO
BrokerClusterStats(perMessages, perIncoming, perOutgoing)
}
val messagesPerSecCountHistory = brokerMessagesPerSecCountHistory.get(id)
if(bcs.isDefined) {
bv.copy(stats = bcs, messagesPerSecCountHistory = messagesPerSecCountHistory)
} else {
bv.copy(messagesPerSecCountHistory = messagesPerSecCountHistory)
}
}
private def allBrokerViews(): Map[Int, BVView] = {
var bvs = mutable.Map[Int, BVView]()
for (key <- brokerTopicPartitions.keySet.toSeq.sorted) {
val bv = brokerTopicPartitions.get(key).map { bv => produceBViewWithBrokerClusterState(bv, key) }
if (bv.isDefined) {
bvs.put(key, bv.get)
}
}
bvs.toMap
}
override def processActorRequest(request: ActorRequest): Unit = {
request match {
case BVForceUpdate =>
log.info("Updating broker view...")
//ask for topic descriptions
val lastUpdateMillisOption: Option[Long] = topicDescriptionsOption.map(_.lastUpdateMillis)
context.actorSelection(config.kafkaStateActorPath).tell(KSGetAllTopicDescriptions(lastUpdateMillisOption), self)
context.actorSelection(config.kafkaStateActorPath).tell(KSGetAllConsumerDescriptions(lastUpdateMillisOption), self)
context.actorSelection(config.kafkaStateActorPath).tell(KSGetBrokers, self)
case BVGetViews =>
sender ! allBrokerViews()
case BVGetView(id) =>
sender ! brokerTopicPartitions.get(id).map { bv =>
produceBViewWithBrokerClusterState(bv, id)
}
case BVGetBrokerMetrics =>
sender ! brokerMetrics
case BVGetTopicMetrics(topic) =>
sender ! topicMetrics.get(topic).map(m => m.values.foldLeft(BrokerMetrics.DEFAULT)((acc,bm) => acc + bm))
case BVGetTopicIdentities =>
sender ! topicIdentities
case BVGetTopicConsumerMap =>
sender ! topicConsumerMap
case BVGetConsumerIdentities =>
sender ! consumerIdentities
case BVUpdateTopicMetricsForBroker(id, metrics) =>
metrics.foreach {
case (topic, bm) =>
val tm = topicMetrics.getOrElse(topic, new mutable.HashMap[Int, BrokerMetrics])
tm.put(id, bm)
topicMetrics.put(topic, tm)
}
case BVUpdateBrokerMetrics(id, metrics) =>
brokerMetrics += (id -> metrics)
combinedBrokerMetric = Option(brokerMetrics.values.foldLeft(BrokerMetrics.DEFAULT)((acc, m) => acc + m))
val updatedBVView = brokerTopicPartitions.getOrElse(id, EMPTY_BVVIEW).copy(metrics = Option(metrics))
brokerTopicPartitions.put(id, updatedBVView)
val now = DateTime.now()
val messagesCount = BrokerMessagesPerSecCount(now, metrics.messagesInPerSec.count)
brokerMessagesPerSecCountHistory += (id -> brokerMessagesPerSecCountHistory.get(id).map {
history =>
history.enqueueFinite(messagesCount, 10)
}.getOrElse {
Queue(messagesCount)
})
case any: Any => log.warning("bvca : processActorRequest : Received unknown message: {}", any)
}
}
override def processActorResponse(response: ActorResponse): Unit = {
response match {
case td: TopicDescriptions =>
previousTopicDescriptionsOption = topicDescriptionsOption
topicDescriptionsOption = Some(td)
updateView()
case cd: ConsumerDescriptions =>
consumerDescriptionsOption = Some(cd)
updateView()
case bl: BrokerList =>
brokerListOption = Some(bl)
updateView()
case any: Any => log.warning("bvca : processActorResponse : Received unknown message: {}", any)
}
}
implicit def queue2finitequeue[A](q: Queue[A]): FiniteQueue[A] = new FiniteQueue[A](q)
private[this] def updateView(): Unit = {
updateViewForBrokersAndTopics()
updateViewsForConsumers()
}
private[this] def updateViewForBrokersAndTopics(): Unit = {
for {
brokerList <- brokerListOption
topicDescriptions <- topicDescriptionsOption
previousDescriptionsMap: Option[Map[String, TopicDescription]] = previousTopicDescriptionsOption.map(_.descriptions.map(td => (td.topic, td)).toMap)
} {
val topicIdentity : IndexedSeq[TopicIdentity] = topicDescriptions.descriptions.map {
tdCurrent =>
TopicIdentity.from(brokerList.list.size,tdCurrent,None, config.clusterContext, previousDescriptionsMap.flatMap(_.get(tdCurrent.topic)))
}
topicIdentities = topicIdentity.map(ti => (ti.topic, ti)).toMap
val topicPartitionByBroker = topicIdentity.flatMap(
ti => ti.partitionsByBroker.map(btp => (ti,btp.id,btp.partitions))).groupBy(_._2)
//check for 2*broker list size since we schedule 2 jmx calls for each broker
if (config.clusterContext.clusterFeatures.features(KMJMXMetricsFeature) && hasCapacityFor(2*brokerListOption.size)) {
implicit val ec = longRunningExecutionContext
val brokerLookup = brokerList.list.map(bi => bi.id -> bi).toMap
topicPartitionByBroker.foreach {
case (brokerId, topicPartitions) =>
val brokerInfoOpt = brokerLookup.get(brokerId)
brokerInfoOpt.foreach {
broker =>
longRunning {
Future {
val tryResult = KafkaJMX.doWithConnection(broker.host, broker.jmxPort) {
mbsc =>
topicPartitions.map {
case (topic, id, partitions) =>
(topic.topic,
KafkaMetrics.getBrokerMetrics(config.clusterContext.config.version, mbsc, Option(topic.topic)))
}
}
val result = tryResult match {
case scala.util.Failure(t) =>
log.error(t, s"Failed to get topic metrics for broker $broker")
topicPartitions.map {
case (topic, id, partitions) =>
(topic.topic, BrokerMetrics.DEFAULT)
}
case scala.util.Success(bm) => bm
}
self.tell(BVUpdateTopicMetricsForBroker(broker.id,result), ActorRef.noSender)
}
}
}
}
brokerList.list.foreach {
broker =>
longRunning {
Future {
val tryResult = KafkaJMX.doWithConnection(broker.host, broker.jmxPort) {
mbsc =>
KafkaMetrics.getBrokerMetrics(config.clusterContext.config.version, mbsc)
}
val result = tryResult match {
case scala.util.Failure(t) =>
log.error(t, s"Failed to get broker metrics for $broker")
BrokerMetrics.DEFAULT
case scala.util.Success(bm) => bm
}
self.tell(BVUpdateBrokerMetrics(broker.id,result), ActorRef.noSender)
}
}
}
} else if(config.clusterContext.clusterFeatures.features(KMJMXMetricsFeature)) {
log.warning("Not scheduling update of JMX for all brokers, not enough capacity!")
}
topicPartitionByBroker.foreach {
case (brokerId, topicPartitions) =>
val topicPartitionsMap: Map[TopicIdentity, IndexedSeq[Int]] = topicPartitions.map {
case (topic, id, partitions) =>
(topic, partitions)
}.toMap
brokerTopicPartitions.put(
brokerId, BVView(topicPartitionsMap, config.clusterContext, brokerMetrics.get(brokerId)))
}
}
}
private[this] def updateViewsForConsumers(): Unit = {
for {
consumerDescriptions <- consumerDescriptionsOption
} {
val consumerIdentity : IndexedSeq[ConsumerIdentity] = consumerDescriptions.descriptions.map(
ConsumerIdentity.from(_, config.clusterContext))
consumerIdentities = consumerIdentity.map(ci => (ci.consumerGroup, ci)).toMap
val c2tMap = consumerDescriptions.descriptions.map{cd: ConsumerDescription =>
(cd.consumer, cd.topics.keys.toList)}.toMap
topicConsumerMap = c2tMap.values.flatten.map(v => (v, c2tMap.keys.filter(c2tMap(_).contains(v)))).toMap
}
}
}
| Flipkart/kafka-manager | app/kafka/manager/BrokerViewCacheActor.scala | Scala | apache-2.0 | 12,236 |
package com.airtonjal.poc.pchr
import com.fasterxml.jackson.databind.{ObjectMapper, JsonNode}
import com.fasterxml.jackson.databind.node._
import com.airtonjal.poc.cell.CellsInfo
import com.airtonjal.poc.geolocation.trilateration.{NonLinearLeastSquaresSolver, TrilaterationFunction}
import com.airtonjal.poc.json.JsonUtils._
import com.airtonjal.poc.pchr.PCHRSchema._
import org.slf4j.LoggerFactory
import org.apache.commons.math3.fitting.leastsquares.LevenbergMarquardtOptimizer
import scala.collection.mutable.ListBuffer
import scala.math._
import scala.collection.JavaConversions._
import scala.util.control.NonFatal
/**
* GeoLocation simple implementation
* @author <a href="mailto:[email protected]">Airton Libório</a>
*/
object Geolocation {
private val log = LoggerFactory.getLogger(getClass())
val MOBILE_ANTENNA_HEIGHT = 1.5
val CORRECTION_FACTOR = 3 // Between 0 and 3
val MINIMUM_MEASUREMENTS = 3 // Minimum number of measurements to use trilateration algorithm
val DL_FC_MHZ = 2152.6
val mapper = new ObjectMapper
def geolocate(root: JsonNode): Unit = {
findNode(root, Some(EVENTS_NAME)) match {
case Some(events: ArrayNode) => {
for(event <- events) {
findNode(event, Some(MEASUREMENTS_NAME)) match {
case Some(measurements: ArrayNode) => {
var positions = new ListBuffer[(Double, Double)]()
var distances = new ListBuffer[Double]()
var propgDelayNode = None : Option[JsonNode]
var latitude = None: Option[Double]
var longitude = None: Option[Double]
for (measurement <- measurements) {
findNode(measurement, Some(PCHRSchema.PROPG_DELAY.toString)) match { // Tries to find a node with propagation delay
case Some(propNode: IntNode) => propgDelayNode = Some(measurement)
case _ =>
}
val latNode = findNode(measurement, Some(CellsInfo.Fields.LAT.toString))
val lonNode = findNode(measurement, Some(CellsInfo.Fields.LON.toString))
// val dlFcMhz = findNode(measurement, CellsInfo.Fields.DL_FC_MHZ.toString)
val antennaHeightNode = findNode(measurement, Some(CellsInfo.Fields.ANTENNA_HEIGHT.toString))
val rscpNode = findNode(measurement, Some(PCHRSchema.CELL_RSCP.toString))
val maxTxPowerNode = findNode(measurement, Some(CellsInfo.Fields.MAX_TX_POWER.toString))
(rscpNode, latNode, lonNode, antennaHeightNode, maxTxPowerNode) match {
case(Some(rscp: IntNode), Some(lat: DoubleNode), Some(lon: DoubleNode),
Some(antennaHeight: IntNode), Some(maxTxPower: IntNode)) => {
if (antennaHeight.asInt > 0 && rscp.asInt() != 0) {
val distance = calculateDistance(lat.asDouble(), lon.asDouble(), DL_FC_MHZ, // Hardcoding because of csv value
antennaHeight.asDouble(), rscp.asDouble(), maxTxPower.asDouble())
positions = positions :+ (lon.asDouble(), lat.asDouble())
distances = distances :+ distance
}
}
case _ => log.trace("Not enough info to geolocate")
}
}
var method: String = null
propgDelayNode match {
case Some(propMeasurement: ObjectNode) => {
method = "Azimuth"
val propgDelayNode = findNode(propMeasurement, Some(PCHRSchema.PROPG_DELAY.toString))
val latNode = findNode(propMeasurement, Some(CellsInfo.Fields.LAT.toString))
val lonNode = findNode(propMeasurement, Some(CellsInfo.Fields.LON.toString))
val azimuthNode = findNode(propMeasurement, Some(CellsInfo.Fields.AZIMUTH.toString))
(propgDelayNode, latNode, lonNode, azimuthNode) match {
case(Some(propgDelay: IntNode), Some(lat: DoubleNode), Some(lon: DoubleNode), Some(azimuth: IntNode)) => {
doAzimuth(propgDelay.asInt, lon.asDouble, lat.asDouble, azimuth.asInt) match {
case Some(latLon) => longitude = Some(latLon._1); latitude = Some(latLon._2)
case _ =>
}
}
case _ =>
}
}
case _ =>
try {
if (positions.size >= MINIMUM_MEASUREMENTS) {
method = "Trilateration"
val positionsArray = positions.map(t => Array(t._1, t._2)).toArray
val trilaterationFunction = new TrilaterationFunction(positionsArray, distances.toArray)
val solver = new NonLinearLeastSquaresSolver(trilaterationFunction, new LevenbergMarquardtOptimizer())
val optimum = solver.solve()
val latLon = optimum.getPoint.toArray
latitude = Some(latLon(0))
longitude = Some(latLon(1))
}
} catch {
case NonFatal(error) => {
log.error(error.getMessage)
add(event, mapper.createObjectNode(), GEOLOCATION)
add(event, new TextNode(error.getMessage), GEOLOCATION + ".Exception")
}
}
}
(latitude, longitude) match {
case (Some(lat: Double), Some(lon: Double)) => {
if (!event.has(LATITUDE)) {
add(event, mapper.createObjectNode(), GEOLOCATION)
add(event, new DoubleNode(lat), GEOLOCATION + "." + LATITUDE)
add(event, new DoubleNode(lon), GEOLOCATION + "." + LONGITUDE)
add(event, new TextNode(method), GEOLOCATION + "." + METHOD)
}
}
case _ =>
}
}
case _ => // No measurements found
}
}
}
case _ => // No events found
}
remove(root, "CommonInfo")
remove(root, "HhoInfo")
remove(root, "RrcRelInfo")
remove(root, "RrcRelTitInfo")
remove(root, "RABInfo")
remove(root, "StatInfo")
remove(root, "SingAccess")
remove(root, "ShoInfos")
remove(root, "SysHoOut")
remove(root, "NetOptInfo")
}
private def doAzimuth(propagationDelay: Int, longitude: Double, latitude: Double, azimuth: Int) : Option[(Double, Double)] = {
distanceByPropagationDelay(propagationDelay) match {
case Some(distInDegrees: Double) =>
Some(longitude + distInDegrees * cos(reduceToRadians(azimuth)), latitude + distInDegrees * sin(reduceToRadians(azimuth)))
case _ =>
None
}
}
private def reduceToRadians(azimuth: Double) : Double =
if (azimuth <= 90) (90 - azimuth) * Pi / 180
else (450 - azimuth) * Pi / 180
private def distanceByPropagationDelay(propagationDelay : Int) : Option[Double] = {
if (propagationDelay == 0) return None
val distanceInMeters = propagationDelay * 234 + 117.0
// Distancia equivalente em graus de latitude e longitude
Some(distanceInMeters / 111000)
}
private def calculateDistance(lat: Double, lon: Double, dlFcMhz: Double, antennaHeight: Double, rscp: Double, maxTxPower: Double): Double = {
// Fator de correção da altura da antena do móvel
val a = 3.2 * pow(log10(11.75 * MOBILE_ANTENNA_HEIGHT), 2) - 4.97
val loss = (maxTxPower/10) - rscp
val distanceInKm = pow(10, (loss - 46.3 -33.9 * log10(dlFcMhz) + 13.82 * log10(antennaHeight) + a - CORRECTION_FACTOR) / (44.9 - 6.55 * log10(antennaHeight)))
// Distancia equivamente em graus de latitude e longitude
val distanceInDegrees = distanceInKm / 111
return distanceInDegrees
}
}
| airtonjal/Big-Data-Pipeline | commons/src/main/scala/com/airtonjal/poc/pchr/Geolocation.scala | Scala | mit | 8,013 |
package inloopio.math.vector
import java.util.Random
import scala.reflect.ClassTag
/**
* @author Caoyuan Deng
*/
class InputOutputPointSet[T <: InputOutputPoint: ClassTag] protected (val inputOutputPoints: Array[T]) {
private val inputDimension = inputOutputPoints(0).input.dimension
private var inputMeans = Array.ofDim[Double](inputDimension)
private var inputStdDeviations = Array.ofDim[Double](inputDimension)
private var inputNormalized = Array.ofDim[Boolean](inputDimension)
private val outputDimension = inputOutputPoints(0).output.dimension
private var outputMeans = Array.ofDim[Double](outputDimension)
private var outputStdDeviations = Array.ofDim[Double](outputDimension)
private var outputNormalized = Array.ofDim[Boolean](outputDimension)
def apply(idx: Int): T = {
inputOutputPoints(idx)
}
def update(idx: Int, iop: T) {
inputOutputPoints(idx) = iop
}
def size: Int = {
inputOutputPoints.length
}
def randomizeOrder {
val random = new Random(System.currentTimeMillis)
val n = size
var i = 0
while (i < n) {
val next = random.nextInt(n - i)
val iop = inputOutputPoints(next)
inputOutputPoints(next) = inputOutputPoints(i)
inputOutputPoints(i) = iop
i += 1
}
}
def cloneWithRandomizedOrder: InputOutputPointSet[T] = {
val size1 = size
val newPoints = Array.ofDim[T](size1)
System.arraycopy(inputOutputPoints, 0, newPoints, 0, size1)
val newSet = new InputOutputPointSet(newPoints)
newSet.inputMeans = Array.ofDim[Double](inputMeans.length)
System.arraycopy(inputMeans, 0, newSet.inputMeans, 0, inputMeans.length)
newSet.inputStdDeviations = Array.ofDim[Double](inputStdDeviations.length)
System.arraycopy(inputStdDeviations, 0, newSet.inputStdDeviations, 0, inputStdDeviations.length)
newSet.outputMeans = Array.ofDim[Double](outputMeans.length)
System.arraycopy(outputMeans, 0, newSet.outputMeans, 0, outputMeans.length)
newSet.outputStdDeviations = Array.ofDim[Double](outputStdDeviations.length)
System.arraycopy(outputStdDeviations, 0, newSet.outputStdDeviations, 0, outputStdDeviations.length)
newSet.randomizeOrder
newSet
}
/**
* Normalize values to:
* mean: 0
* standard deviation: 1
* range: about [-1, 1]
*/
def normalizeInputs(dimensionIdx: Int) {
val n = inputOutputPoints.length
val values = Array.ofDim[Double](n)
var i = 0
while (i < n) {
values(i) = inputOutputPoints(i).input(dimensionIdx)
i += 1
}
val normalized = normalize_ZScore(values)
i = 0
while (i < n) {
inputOutputPoints(i).input(dimensionIdx) = normalized(i)
i += 1
}
inputMeans(dimensionIdx) = normalized(n)
inputStdDeviations(dimensionIdx) = normalized(n + 1)
inputNormalized(dimensionIdx) = true
}
/**
* Normalize values to:
* mean: 0
* standard deviation: 1
* range: about [-1, 1]
*
* @NOTICE
* If the output layer uses linear neurons as y = x, the y will be 0 symmetry.
* the output can be < 0 in same probabilty as > 0, so we should also normalize
* outputs to [-1, 1] instead of positively?
*
* 1. If the hidden neurons' outputs are positive-polarity (such as: LogiSigmoidNeuron)
* when the mean of initial weights is about 0, the output will be around 0.5,
* so, we'd better to normalize the outputs to [0, 1], or, with 0.5 mean and 0.5 stdDeviation
*
* 2. If the hidden neurons' outputs are double-polarity (such as: TanhSigmoidNeuron)
* when the mean of initial weights is about 0, the output will be around 0,
* so, we'd better to normalize the outputs to [-1, 1], or, with 0 mean and 1 stdDeviation
*
* Experience: If normalize ouput to [-1, 1], will cause a slower convergence.
*/
def normalizeOutputs(dimensionIdx: Int) {
val n = inputOutputPoints.length
val values = Array.ofDim[Double](n)
var i = 0
while (i < n) {
values(i) = inputOutputPoints(i).output(dimensionIdx)
i += 1
}
val normalized = normalize_ZScore(values)
i = 0
while (i < n) {
inputOutputPoints(i).output(dimensionIdx) = normalized(i)
i += 1
}
val mu = normalized(n)
val sigma = normalized(n + 1)
outputMeans(dimensionIdx) = normalized(n)
outputStdDeviations(dimensionIdx) = normalized(n + 1)
outputNormalized(dimensionIdx) = true
}
/**
* Normalize values to:
* mean: 0.5
* standard deviation: 0.5
* range: about [0, 1]
*/
def normalizeOutputsPositively(dimensionIdx: Int) {
val n = inputOutputPoints.length
val values = Array.ofDim[Double](n)
var i = 0
while (i < n) {
values(i) = inputOutputPoints(i).output(dimensionIdx)
i += 1
}
val normalized = normalize_ZScore(values)
i = 0
while (i < n) {
/** transform to mean: 0.5, standar deviation: 0.5 */
inputOutputPoints(i).output(dimensionIdx) = normalized(i) * 0.5 + 0.5
i += 1
}
/**
* When doing normalize_ZScore(),
* y = (x - mu) / sigma
* Here, we again,
* v = y * 0.5 + 0.5
* So,
* v = ((x - mu) / sigma) * 0.5 + 0.5
* = ((x - mu) + 0.5 * sigma / 0.5) / (sigma / 0.5)
* = (x - (mu - sigma)) / (sigma / 0.5)
* = (x - mu') / sigma'
* where
* mu' = mu - sigma
* sigma' = sigma / 0.5
*/
val mu = normalized(n)
val sigma = normalized(n + 1)
outputMeans(dimensionIdx) = mu - sigma
outputStdDeviations(dimensionIdx) = sigma / 0.5
outputNormalized(dimensionIdx) = true
}
def normalizeAllInputs {
val n = inputOutputPoints(0).input.dimension
var i = 0
while (i < n) {
normalizeInputs(i)
i += 1
}
}
def normalizeAllOutputs {
val n = inputOutputPoints(0).output.dimension
var i = 0
while (i < n) {
normalizeOutputs(i)
i += 1
}
}
def normalizePositivelyAllOutputs {
val n = inputOutputPoints(0).output.dimension
var i = 0
while (i < n) {
normalizeOutputsPositively(i)
i += 1
}
}
def normalizeInput(input: Vec) {
val n = input.dimension
var i = 0
while (i < n) {
val value = input(i)
input(i) = normalizeInput(i, value)
i += 1
}
}
def normalizeOutput(output: Vec) {
val n = output.dimension
var i = 0
while (i < n) {
val value = output(i)
output(i) = normalizeOutput(i, value)
i += 1
}
}
def normalizePositivelyOutput(output: Vec) {
/** as we have considered the mean and stdDeviation in positive case, it's same as: */
normalizeOutput(output)
}
def normalizeInput(dimensionIdx: Int, value: Double): Double = {
if (inputNormalized(dimensionIdx)) {
(value - inputMeans(dimensionIdx)) / inputStdDeviations(dimensionIdx)
} else {
/** the mean and stdDeviation of this dimensionIdx are not computed yet */
value
}
}
def normalizeOutput(dimensionIdx: Int, value: Double): Double = {
if (outputNormalized(dimensionIdx)) {
(value - outputMeans(dimensionIdx)) / outputStdDeviations(dimensionIdx)
} else {
/** the mean and stdDeviation of this dimensionIdx are not computed yet */
value
}
}
def normalizePositivelyOutput(dimensionIdx: Int, value: Double): Double = {
/** as we have considered the mean and stdDeviation in positive case, it's same as: */
normalizeOutput(dimensionIdx, value)
}
def reinstateInput(input: Vec) {
val n = input.dimension
var i = 0
while (i < n) {
val value = input(i) * inputStdDeviations(i) + inputMeans(i)
input(i) = value
i += 1
}
}
def reinstateOutput(output: Vec) {
val n = output.dimension
var i = 0
while (i < n) {
val value = output(i) * outputStdDeviations(i) + outputMeans(i)
output(i) = value
i += 1
}
}
def reinstateInput(dimensionIdx: Int, value: Double): Double = {
value * inputStdDeviations(dimensionIdx) + inputMeans(dimensionIdx)
}
def reinstateOutput(dimensionIdx: Int, value: Double): Double = {
value * outputStdDeviations(dimensionIdx) + outputMeans(dimensionIdx)
}
/**
* Normalize values to:
* mean: 0
* standard deviation: 1
* range: about [-1, 1]
*/
private def normalize_ZScore(values: Array[Double]): Array[Double] = {
val n = values.length
/** compute mean value */
var sum = 0.0
var i = 0
while (i < n) {
sum += values(i)
i += 1
}
val mean = sum / (n * 1d)
/** compute standard deviation */
var deviation_square_sum = 0d
i = 0
while (i < n) {
val deviation = values(i) - mean
deviation_square_sum += deviation * deviation
i += 1
}
var stdDeviation = math.sqrt(deviation_square_sum / (n * 1d))
//("Mean: " + mean + " Standard Deviation: " + stdDeviation)
if (stdDeviation == 0) {
stdDeviation = 1
}
/**
* do 'Z Score' normalization.
* 2 more dimensions are added to store mean and stdDeviation
*/
val normalized = Array.ofDim[Double](n + 2)
i = 0
while (i < n) {
normalized(i) = (values(i) - mean) / stdDeviation
i += 1
}
normalized(n) = mean
normalized(n + 1) = stdDeviation
normalized
}
/**
* y = (0.9 - 0.1) / (xmax - xmin) * x + (0.9 - (0.9 - 0.1) / (xmax - xmin) * xmax)
* = 0.8 / (xmax - xmin) * x + (0.9 - 0.8 / (xmax - xmin) * xmax)
*/
private def normalizePositively_MinMax(values: Array[Double]): Array[Double] = {
val n = values.length
/** compute min max value */
var min = Double.MaxValue
var max = Double.MinValue
var i = 0
while (i < n) {
val value = values(i)
if (value < min) {
min = value
}
if (value > max) {
max = value
}
i += 1
}
val mean = min
val stdDeviation = max - min
//println("normOutput: " + mean + " deviationOutput: " + stdDeviation)
/** do 'min max' normalization */
val normalized = Array.ofDim[Double](n + 2)
i = 0
while (i < n) {
normalized(i) = (values(i) - mean) / stdDeviation
i += 1
}
normalized(n) = mean
normalized(n + 1) = stdDeviation
normalized
}
private def normalizePositively_CustomMinMax(values: Array[Double]): Array[Double] = {
val n = values.length
/** compute max min value */
val max = 30000
val min = 0
val mean = min
val stdDeviation = max - min
/** do 'maxmin' standardization */
val normalized = Array.ofDim[Double](n + 2)
var i = 0
while (i < n) {
normalized(i) = (values(i) - mean) / stdDeviation
i += 1
}
normalized(n) = mean
normalized(n + 1) = stdDeviation
normalized
}
}
object InputOutputPointSet {
def apply[T <: InputOutputPoint: ClassTag](inputOutputPoints: Array[T]) = new InputOutputPointSet(inputOutputPoints)
} | dcaoyuan/inloopio-libs | inloopio-math/src/main/scala/inloopio/math/vector/InputOutputPointSet.scala | Scala | bsd-3-clause | 10,995 |
package com.harrys.file
import java.io.File
import java.nio.file.attribute.FileAttribute
import java.nio.file.{Files, Path}
import java.util.concurrent._
import com.typesafe.scalalogging.Logger
import org.apache.commons.io.FileUtils
import org.slf4j.LoggerFactory
import scala.collection.mutable
import scala.ref._
/**
* Created by chris on 10/14/15.
*/
class TransientFileFactory(directory: Path) {
private val log = Logger(LoggerFactory.getLogger(this.getClass))
private val pendingRefFiles = new mutable.HashSet[Reference[File]]()
private val phantomRefQueue = new ReferenceQueue[File]()
private val cleanupExecutor = new ThreadPoolExecutor(0, 1, 10, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable](), new ThreadFactory {
override def newThread(r: Runnable): Thread = {
val thread = new Thread(r)
thread.setDaemon(true)
thread.setName("TransientFileCleanup")
thread
}
})
final def create(prefix: String, suffix: String, attributes: FileAttribute[_]*) : File = {
val tempFile = Files.createTempFile(directory, prefix, suffix, attributes:_*).toFile
registerNewTempFile(tempFile)
}
final def shutdown() : Unit = {
if (this == TransientFileFactory.default){
log.warn("Rejecting attempt to stop the default instance")
} else {
this.forceShutdown()
}
}
private final def forceShutdown() : Unit = {
cleanupExecutor.shutdown()
pendingRefFiles.synchronized {
pendingRefFiles.flatMap(_.get).foreach(_.deleteOnExit)
pendingRefFiles.clear()
}
if (!cleanupExecutor.awaitTermination(50, TimeUnit.MILLISECONDS)){
log.warn("Forcing Executor shutdown")
cleanupExecutor.shutdownNow()
}
}
private final def registerNewTempFile(file: File) : File = {
val phantomRef = new PhantomReference[File](file, phantomRefQueue)
log.debug(s"Registered new Transient File: ${file.getAbsolutePath}")
pendingRefFiles.synchronized {
if (pendingRefFiles.isEmpty){
cleanupExecutor.execute(new CleanupPollingTask())
}
pendingRefFiles.add(phantomRef)
}
return file
}
private final def cleanupRegisteredRef(ref: Reference[File]) : Unit = {
pendingRefFiles.synchronized {
pendingRefFiles.remove(ref)
}
ref.get.collect {
case file if file.exists() =>
log.debug(s"Deleting Transient File: ${file.getAbsolutePath}")
file.delete()
}
ref.clear()
}
private final class CleanupPollingTask extends Runnable {
override final def run() : Unit = {
while (!cleanupExecutor.isTerminating && pendingRefFiles.synchronized { pendingRefFiles.nonEmpty }){
phantomRefQueue.remove.foreach(cleanupRegisteredRef)
}
}
}
}
object TransientFileFactory {
lazy val default = {
val factory = new TransientFileFactory(FileUtils.getTempDirectory.toPath)
Runtime.getRuntime.addShutdownHook(new Thread(new Runnable(){
def run() : Unit = factory.forceShutdown()
}))
factory
}
final def create(prefix: String, suffix: String, attributes: FileAttribute[_]*) : File = {
default.create(prefix, suffix, attributes:_*)
}
}
| harrystech/ingestion-utils | src/main/scala/com/harrys/file/TransientFileFactory.scala | Scala | mit | 3,169 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model.assessmentscores
import model.UniqueIdentifier
import org.joda.time.DateTime
import play.api.libs.json.JodaWrites._ // This is needed for DateTime serialization
import play.api.libs.json.JodaReads._ // This is needed for DateTime serialization
import repositories._
import play.api.libs.json.Json
import reactivemongo.bson.{ BSONDocument, BSONHandler, Macros }
case class AssessmentScoresFinalFeedback(
feedback: String,
updatedBy: UniqueIdentifier,
acceptedDate: DateTime,
version: Option[String] = None
) extends AssessmentScoresSection
object AssessmentScoresFinalFeedback {
implicit val jsonFormat = Json.format[AssessmentScoresFinalFeedback]
implicit val bsonHandler: BSONHandler[BSONDocument, AssessmentScoresFinalFeedback] =
Macros.handler[AssessmentScoresFinalFeedback]
}
| hmrc/fset-faststream | app/model/assessmentscores/AssessmentScoresFinalFeedback.scala | Scala | apache-2.0 | 1,418 |
import sbt._
import sbt.Keys._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
object build extends Build {
val templateSettings = scalateSettings ++ Seq(
scalateOverwrite := true,
scalateTemplateConfig in Compile <<= (baseDirectory) { base =>
Nil
}
)
lazy val root = Project("root", file(".")).settings(templateSettings:_*)
} | KIZI/EasyMiner-Apriori-R | project/build.scala | Scala | bsd-3-clause | 370 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.magic.builtin
import java.io.PrintStream
import com.datastax.spark.connector.cql._
import com.google.common.base.Strings
import com.ibm.spark.kernel.protocol.v5.MIMEType
import com.ibm.spark.magic._
import com.ibm.spark.magic.dependencies.{IncludeOutputStream, IncludeSparkContext}
import com.ibm.spark.utils.ArgumentParsingSupport
class ShowSchema extends CellMagic with ArgumentParsingSupport
with IncludeOutputStream with IncludeSparkContext {
// Lazy because the outputStream is not provided at construction
private lazy val printStream = new PrintStream(outputStream)
override def execute(code: String): CellMagicOutput = {
def printHelpAndReturn: CellMagicOutput = {
printHelp(printStream, """%%ShowSchema <keyspace>.<table>""")
CellMagicOutput()
}
CellMagicOutput(MIMEType.TextHtml -> getSchema(code))
}
def getSchema(ksTable: String) = {
def col2td (c: ColumnDef) = "<tr><td>" + c.columnName + "</td><td>" + c.columnType + "</td><td>" +
(c.columnRole match { case RegularColumn => ""
case PartitionKeyColumn => "partition key"
case ClusteringColumn(x) => "cluster key " + x.toString
case StaticColumn => "static"}) + "</td></tr>"
def table2tbl (t:TableDef) = "<b>" +
t.keyspaceName + "." + t.tableName +
"</b><table>" +
(t.partitionKey ++
t.clusteringColumns ++
t.regularColumns).map(col2td).mkString +
"</table>"
val connector = CassandraConnector(sparkContext.getConf)
val ksTableSplit = ksTable.split('.')
val (k,t) = ksTableSplit.length match {
case 0 => (None,None)
case 1 => (Some(ksTableSplit(0)),None)
case 2 => (Some(ksTableSplit(0)), Some(ksTableSplit(1)))
}
val schema = Schema.fromCassandra(connector, k, t)
schema.tables.map(table2tbl).mkString
}
}
| slowenthal/spark-kernel | kernel/src/main/scala/com/ibm/spark/magic/builtin/ShowSchema.scala | Scala | apache-2.0 | 2,449 |
import play.Project._
import sbt._
object ApplicationBuild extends Build {
val appName = "first-web-scala-project"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
jdbc,
anorm,
"com.typesafe.slick" %% "slick" % "2.1.0",
"com.h2database" % "h2" % "1.3.175"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
)
}
| fallen-s4e/elibrary-play | project/Build.scala | Scala | bsd-2-clause | 481 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.api.actor
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.stratio.sparta.driver.service.StreamingContextService
import com.stratio.sparta.serving.core.actor.{RequestActor, FragmentActor, StatusActor}
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AkkaConstant
import org.apache.curator.framework.CuratorFramework
import org.junit.runner.RunWith
import org.scalamock.scalatest.MockFactory
import org.scalatest._
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ControllerActorTest(_system: ActorSystem) extends TestKit(_system)
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfterAll
with MockFactory {
SpartaConfig.initMainConfig()
SpartaConfig.initApiConfig()
val curatorFramework = mock[CuratorFramework]
val statusActor = _system.actorOf(Props(new StatusActor(curatorFramework)))
val executionActor = _system.actorOf(Props(new RequestActor(curatorFramework)))
val streamingContextService = new StreamingContextService(curatorFramework)
val fragmentActor = _system.actorOf(Props(new FragmentActor(curatorFramework)))
val policyActor = _system.actorOf(Props(new PolicyActor(curatorFramework, statusActor)))
val sparkStreamingContextActor = _system.actorOf(
Props(new LauncherActor(streamingContextService, curatorFramework)))
val pluginActor = _system.actorOf(Props(new PluginActor()))
val configActor = _system.actorOf(Props(new ConfigActor()))
def this() =
this(ActorSystem("ControllerActorSpec", SpartaConfig.daemonicAkkaConfig))
implicit val actors = Map(
AkkaConstant.StatusActorName -> statusActor,
AkkaConstant.FragmentActorName -> fragmentActor,
AkkaConstant.PolicyActorName -> policyActor,
AkkaConstant.LauncherActorName -> sparkStreamingContextActor,
AkkaConstant.PluginActorName -> pluginActor,
AkkaConstant.ExecutionActorName -> executionActor,
AkkaConstant.ConfigActorName -> configActor
)
override def afterAll {
TestKit.shutdownActorSystem(system)
}
"ControllerActor" should {
"set up the controller actor that contains all sparta's routes without any error" in {
_system.actorOf(Props(new ControllerActor(actors, curatorFramework)))
}
}
}
| fjsc/sparta | serving-api/src/test/scala/com/stratio/sparta/serving/api/actor/ControllerActorTest.scala | Scala | apache-2.0 | 2,974 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import java.util.concurrent.TimeUnit._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.SQLMetrics
/**
* A hash-based aggregate operator that supports [[TypedImperativeAggregate]] functions that may
* use arbitrary JVM objects as aggregation states.
*
* Similar to [[HashAggregateExec]], this operator also falls back to sort-based aggregation when
* the size of the internal hash map exceeds the threshold. The differences are:
*
* - It uses safe rows as aggregation buffer since it must support JVM objects as aggregation
* states.
*
* - It tracks entry count of the hash map instead of byte size to decide when we should fall back.
* This is because it's hard to estimate the accurate size of arbitrary JVM objects in a
* lightweight way.
*
* - Whenever fallen back to sort-based aggregation, this operator feeds all of the rest input rows
* into external sorters instead of building more hash map(s) as what [[HashAggregateExec]] does.
* This is because having too many JVM object aggregation states floating there can be dangerous
* for GC.
*
* - CodeGen is not supported yet.
*
* This operator may be turned off by setting the following SQL configuration to `false`:
* {{{
* spark.sql.execution.useObjectHashAggregateExec
* }}}
* The fallback threshold can be configured by tuning:
* {{{
* spark.sql.objectHashAggregate.sortBased.fallbackThreshold
* }}}
*/
case class ObjectHashAggregateExec(
requiredChildDistributionExpressions: Option[Seq[Expression]],
groupingExpressions: Seq[NamedExpression],
aggregateExpressions: Seq[AggregateExpression],
aggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
child: SparkPlan)
extends BaseAggregateExec {
override lazy val allAttributes: AttributeSeq =
child.output ++ aggregateBufferAttributes ++ aggregateAttributes ++
aggregateExpressions.flatMap(_.aggregateFunction.inputAggBufferAttributes)
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"),
"aggTime" -> SQLMetrics.createTimingMetric(sparkContext, "time in aggregation build"),
"spillSize" -> SQLMetrics.createSizeMetric(sparkContext, "spill size"),
"numTasksFallBacked" -> SQLMetrics.createMetric(sparkContext, "number of sort fallback tasks")
)
protected override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val aggTime = longMetric("aggTime")
val spillSize = longMetric("spillSize")
val numTasksFallBacked = longMetric("numTasksFallBacked")
val fallbackCountThreshold = conf.objectAggSortBasedFallbackThreshold
child.execute().mapPartitionsWithIndexInternal { (partIndex, iter) =>
val beforeAgg = System.nanoTime()
val hasInput = iter.hasNext
val res = if (!hasInput && groupingExpressions.nonEmpty) {
// This is a grouped aggregate and the input kvIterator is empty,
// so return an empty kvIterator.
Iterator.empty
} else {
val aggregationIterator =
new ObjectAggregationIterator(
partIndex,
child.output,
groupingExpressions,
aggregateExpressions,
aggregateAttributes,
initialInputBufferOffset,
resultExpressions,
(expressions, inputSchema) =>
MutableProjection.create(expressions, inputSchema),
inputAttributes,
iter,
fallbackCountThreshold,
numOutputRows,
spillSize,
numTasksFallBacked)
if (!hasInput && groupingExpressions.isEmpty) {
numOutputRows += 1
Iterator.single[UnsafeRow](aggregationIterator.outputForEmptyGroupingKeyWithoutInput())
} else {
aggregationIterator
}
}
aggTime += NANOSECONDS.toMillis(System.nanoTime() - beforeAgg)
res
}
}
override def verboseString(maxFields: Int): String = toString(verbose = true, maxFields)
override def simpleString(maxFields: Int): String = toString(verbose = false, maxFields)
private def toString(verbose: Boolean, maxFields: Int): String = {
val allAggregateExpressions = aggregateExpressions
val keyString = truncatedString(groupingExpressions, "[", ", ", "]", maxFields)
val functionString = truncatedString(allAggregateExpressions, "[", ", ", "]", maxFields)
val outputString = truncatedString(output, "[", ", ", "]", maxFields)
if (verbose) {
s"ObjectHashAggregate(keys=$keyString, functions=$functionString, output=$outputString)"
} else {
s"ObjectHashAggregate(keys=$keyString, functions=$functionString)"
}
}
override protected def withNewChildInternal(newChild: SparkPlan): ObjectHashAggregateExec =
copy(child = newChild)
}
object ObjectHashAggregateExec {
def supportsAggregate(aggregateExpressions: Seq[AggregateExpression]): Boolean = {
aggregateExpressions.map(_.aggregateFunction).exists {
case _: TypedImperativeAggregate[_] => true
case _ => false
}
}
}
| maropu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/ObjectHashAggregateExec.scala | Scala | apache-2.0 | 6,290 |
package de.unihamburg.vsis.sddf.visualisation.logger
import de.unihamburg.vsis.sddf.visualisation.model.Analysable
import de.unihamburg.vsis.sddf.visualisation.model.BasicAnalysable
object BasicOutputter extends Outputter {
override def logCustomResults(analysable: Analysable) = {
analysable match {
case ana: BasicAnalysable => {
ana.values.foreach(kvp => {
printLogLine(kvp._1, kvp._2)
})
}
case _ => log.error("Analysable " + analysable.getClass() +
" is not of the needed type")
}
}
}
| numbnut/sddf | src/main/scala/de/unihamburg/vsis/sddf/visualisation/logger/BasicOutputter.scala | Scala | gpl-3.0 | 558 |
package com.lynbrookrobotics.potassium.events
import com.lynbrookrobotics.potassium.Signal
import com.lynbrookrobotics.potassium.tasks.{ContinuousTask, Task}
/**
* An event that has a start, running, and ending phase.
*/
class ContinuousEvent {
private val onStartSource = new ImpulseEventSource
private val onEndSource = new ImpulseEventSource
/**
* An event that is fired when the continuous event starts
*/
val onStart: ImpulseEvent = onStartSource.event
/**
* An event that is fired when the continuous event ends
*/
val onEnd: ImpulseEvent = onEndSource.event
private var onEventTrueCallbacks: List[() => Unit] = List.empty
private var onUpdateCallbacks: List[Boolean => Unit] = List.empty
private[events] var isRunning = false
protected def updateEventState(eventTrue: Boolean): Unit = {
onUpdateCallbacks.foreach(_.apply(eventTrue))
if (eventTrue) {
onEventTrueCallbacks.foreach(_.apply())
if (!isRunning) {
onStartSource.fire()
isRunning = true
}
} else if (isRunning) {
onEndSource.fire()
isRunning = false
}
}
/**
* Adds a listener to be called while the event is happening (aka, condition returns true)
* @param onTrue a function to be called continuously when the event is happening
*/
def foreach(onTrue: () => Unit): Unit = {
onEventTrueCallbacks = onTrue :: onEventTrueCallbacks
}
/**
* For each update of the event, regardless if the event is true or not
* @param onUpdate
*/
private[events] def foreachUpdate(onUpdate: Boolean => Unit) = {
onUpdateCallbacks = onUpdate :: onUpdateCallbacks
}
/**
* Adds a mapping to run a task while the continuous event is running
* @param task the task to run during the event
*/
def foreach(task: ContinuousTask): Unit = {
onStart.foreach { () =>
Task.abortCurrentTask()
Task.executeTask(task)
}
onEnd.foreach(() => Task.abortTask(task))
}
/**
* Adds a mapping to run a task while the continuous event is running
* @param task the task to run during the event
*/
def foreach(task: Signal[ContinuousTask]): Unit = {
var currentRunningTask: ContinuousTask = null
onStart.foreach { () =>
Task.abortCurrentTask()
currentRunningTask = task.get
Task.executeTask(currentRunningTask)
}
onEnd.foreach(() => Task.abortTask(currentRunningTask))
}
/**
* Returns a continuous event that is an intersection of both events
* @param other the event to intersect with the original
*/
def &&(other: ContinuousEvent): ContinuousEvent = {
val (intersectionEvent, updateAndEvent) = ContinuousEvent.newEvent
var parentATrue = false
var parentBTrue = false
this.foreachUpdate { isTrue =>
parentATrue = isTrue
updateAndEvent.apply(parentATrue && parentBTrue)
}
other.foreachUpdate { isTrue =>
parentBTrue = isTrue
updateAndEvent.apply(parentATrue && parentBTrue)
}
intersectionEvent
}
def unary_!(): ContinuousEvent = {
val (negatedEvent, updateNegatedEventState) = ContinuousEvent.newEvent
this.foreachUpdate(parentEventTrue => updateNegatedEventState(!parentEventTrue))
negatedEvent
}
}
object ContinuousEvent {
/**
* @return a ContinuousEvent and a function to update whether the event
* is true or not
*/
def newEvent: (ContinuousEvent, Boolean => Unit) = {
val ret = new ContinuousEvent
(ret, condition => ret.updateEventState(condition))
}
}
| Team846/potassium | core/shared/src/main/scala/com/lynbrookrobotics/potassium/events/ContinuousEvent.scala | Scala | mit | 3,546 |
/*******************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
******************************************************************************/
package multiwayTree.P73
import multiwayTree.MTree
import org.scalatest.Args
import scala.util.parsing.combinator.JavaTokenParsers
class sol01 extends P73 {
def lipsy(t: MTree[Char]): String =
if (t.children.isEmpty) {
t.value.toString
} else {
s"(${t.value} ${t.children.map(lipsy(_)).mkString(" ")})"
}
// Usage of Scala combinator
object TreeParser extends JavaTokenParsers {
// Define the expected value of a node (here a Char)
def value: Parser[Char] = """[a-zA-Z]""".r ^^ { _.head }
def simpleTree: Parser[MTree[Char]] = value ^^ { MTree(_) }
// Define the string representation of a non-leaf node
def tree: Parser[MTree[Char]] =
simpleTree | ("(" ~> value ~ rep(" " ~> simpleTree | tree) <~ ")") ^^ {
case value ~ children =>
if (children.isEmpty) {
MTree(value)
} else {
MTree(value, children)
}
}
def apply(input: String): MTree[Char] = parseAll(tree, input) match {
case Success(result, _) => result
case failure: NoSuccess => scala.sys.error(failure.msg)
}
}
def lipsyStringToMTree(t: String): MTree[Char] =
TreeParser(t)
}
| GuillaumeDD/scala99problems | src/main/scala/multiwayTree/P73/sol01.scala | Scala | gpl-3.0 | 1,829 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.tstreams.common
import org.slf4j.LoggerFactory
/**
* Created by Ivan Kudryavtsev on 29.08.16.
*/
object ThreadAmountCalculationUtility {
val logger = LoggerFactory.getLogger(this.getClass)
def calculateEvenThreadsAmount(minThreads: Int, maxThreads: Int): Int = {
if (minThreads >= maxThreads) {
logger.warn(s"User requested $minThreads worker threads, but total partitions amount is $maxThreads. Will use $maxThreads")
return maxThreads
}
if (minThreads <= 0) {
logger.warn(s"User requested $minThreads worker threads, but minimal amount is 1. Will use 1 worker thread.")
return 1
}
if (maxThreads % minThreads == 0) {
return minThreads
}
for (i <- minThreads to maxThreads) {
if (maxThreads % i == 0)
return i
}
maxThreads
}
}
| bwsw/t-streams | src/main/scala/com/bwsw/tstreams/common/ThreadAmountCalculationUtility.scala | Scala | apache-2.0 | 1,650 |
/*
* Copyright 2000-2014 JetBrains s.r.o.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.scala.lang.psi.api.macros
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.Any
/**
* @author Mikhail.Mutcianko
* date 22.12.14
*/
case class MacroContext(place: PsiElement, expectedType: Option[ScType])
trait ScalaMacroExpandable {
def expandMacro(macros: ScFunction, context: MacroContext): Option[ScalaPsiElement]
}
trait ScalaMacroTypeable {
def checkMacro(macros: ScFunction, context: MacroContext): Option[ScType]
}
object ScalaMacroDummyTypeable extends ScalaMacroTypeable{
def checkMacro(macros: ScFunction, context: MacroContext) = Some(Any)
} | whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/macros/ScalaMacroTraits.scala | Scala | apache-2.0 | 1,425 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.box._
case class AC7600(value: Option[Boolean]) extends CtBoxIdentifier(name = "Enter Changes in presentation and prior period adjustments note?")
with CtOptionalBoolean
with Input
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC7600.scala | Scala | apache-2.0 | 859 |
package one.murch.bitcoin.coinselection
import scala.collection.mutable.ListBuffer
/**
* Created by murch on 31.12.16.
*/
class Scenario(var startingUtxoSet: Set[Utxo], var operations: ListBuffer[Payment], var descriptor: String) {
}
| Xekyo/CoinSelectionSimulator | src/main/scala/one/murch/bitcoin/coinselection/Scenario.scala | Scala | mit | 241 |
package by.pavelverk.hardwrite.core.result
import by.pavelverk.hardwrite.core.{Result, SampleId}
import by.pavelverk.hardwrite.utils.db.DatabaseConnector
private[result] trait ResultTable {
protected val databaseConnector: DatabaseConnector
import databaseConnector.profile.api._
class Results(tag: Tag) extends Table[Result](tag, "results") {
def id = column[SampleId]("id", O.PrimaryKey)
def neuro_results = column[Double]("neuro_results")
def psicho_result = column[Int]("psicho_result")
def * = (id, neuro_results, psicho_result) <> ((Result.apply _).tupled, Result.unapply)
}
protected val results = TableQuery[Results]
}
| VerkhovtsovPavel/BSUIR_Labs | Master/back/akka-http-rest-master/src/main/scala/by/pavelverk/hardwrite/core/result/ResultTable.scala | Scala | mit | 659 |
package java.nio
object ByteBuffer {
private final val HashSeed = -547316498 // "java.nio.ByteBuffer".##
def allocate(capacity: Int): ByteBuffer =
wrap(new Array[Byte](capacity))
// TODO def allocateDirect(capacity: Int): ByteBuffer = ???
def wrap(array: Array[Byte], offset: Int, length: Int): ByteBuffer =
HeapByteBuffer.wrap(array, 0, array.length, offset, length, false)
def wrap(array: Array[Byte]): ByteBuffer =
wrap(array, 0, array.length)
}
abstract class ByteBuffer private[nio] (_capacity: Int,
private[nio] val _array: Array[Byte],
private[nio] val _arrayOffset: Int)
extends Buffer(_capacity)
with Comparable[ByteBuffer] {
private[nio] type ElementType = Byte
private[nio] type BufferType = ByteBuffer
def this(_capacity: Int) = this(_capacity, null, -1)
private[nio] var _isBigEndian: Boolean = true
def slice(): ByteBuffer
def duplicate(): ByteBuffer
def asReadOnlyBuffer(): ByteBuffer
def get(): Byte
def put(b: Byte): ByteBuffer
def get(index: Int): Byte
def put(index: Int, b: Byte): ByteBuffer
@noinline
def get(dst: Array[Byte], offset: Int, length: Int): ByteBuffer =
GenBuffer(this).generic_get(dst, offset, length)
def get(dst: Array[Byte]): ByteBuffer =
get(dst, 0, dst.length)
@noinline
def put(src: ByteBuffer): ByteBuffer =
GenBuffer(this).generic_put(src)
@noinline
def put(src: Array[Byte], offset: Int, length: Int): ByteBuffer =
GenBuffer(this).generic_put(src, offset, length)
final def put(src: Array[Byte]): ByteBuffer =
put(src, 0, src.length)
@inline final def hasArray(): Boolean =
GenBuffer(this).generic_hasArray()
@inline final def array(): Array[Byte] =
GenBuffer(this).generic_array()
@inline final def arrayOffset(): Int =
GenBuffer(this).generic_arrayOffset()
def compact(): ByteBuffer
def isDirect(): Boolean
// toString(): String inherited from Buffer
@noinline
override def hashCode(): Int =
GenBuffer(this).generic_hashCode(ByteBuffer.HashSeed)
override def equals(that: Any): Boolean = that match {
case that: ByteBuffer => compareTo(that) == 0
case _ => false
}
@noinline
def compareTo(that: ByteBuffer): Int =
GenBuffer(this).generic_compareTo(that)(_.compareTo(_))
final def order(): ByteOrder =
if (_isBigEndian) ByteOrder.BIG_ENDIAN
else ByteOrder.LITTLE_ENDIAN
final def order(bo: ByteOrder): ByteBuffer = {
if (bo == null)
throw new NullPointerException
_isBigEndian = bo == ByteOrder.BIG_ENDIAN
this
}
def getChar(): Char
def putChar(value: Char): ByteBuffer
def getChar(index: Int): Char
def putChar(index: Int, value: Char): ByteBuffer
def asCharBuffer(): CharBuffer
def getShort(): Short
def putShort(value: Short): ByteBuffer
def getShort(index: Int): Short
def putShort(index: Int, value: Short): ByteBuffer
def asShortBuffer(): ShortBuffer
def getInt(): Int
def putInt(value: Int): ByteBuffer
def getInt(index: Int): Int
def putInt(index: Int, value: Int): ByteBuffer
def asIntBuffer(): IntBuffer
def getLong(): Long
def putLong(value: Long): ByteBuffer
def getLong(index: Int): Long
def putLong(index: Int, value: Long): ByteBuffer
def asLongBuffer(): LongBuffer
def getFloat(): Float
def putFloat(value: Float): ByteBuffer
def getFloat(index: Int): Float
def putFloat(index: Int, value: Float): ByteBuffer
def asFloatBuffer(): FloatBuffer
def getDouble(): Double
def putDouble(value: Double): ByteBuffer
def getDouble(index: Int): Double
def putDouble(index: Int, value: Double): ByteBuffer
def asDoubleBuffer(): DoubleBuffer
// Internal API
override private[nio] def isBigEndian: Boolean =
_isBigEndian
private[nio] def load(index: Int): Byte
private[nio] def store(index: Int, elem: Byte): Unit
@inline
private[nio] def load(startIndex: Int,
dst: Array[Byte],
offset: Int,
length: Int): Unit =
GenBuffer(this).generic_load(startIndex, dst, offset, length)
@inline
private[nio] def store(startIndex: Int,
src: Array[Byte],
offset: Int,
length: Int): Unit =
GenBuffer(this).generic_store(startIndex, src, offset, length)
}
| cedricviaccoz/scala-native | javalib/src/main/scala/java/nio/ByteBuffer.scala | Scala | bsd-3-clause | 4,440 |
/*
* Copyright 2015-2016 David R. Bild
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.nscala_money.money
import org.joda.money._
import scala.collection.JavaConverters._
import scala.math.BigDecimal.RoundingMode.RoundingMode
object StaticMoney extends StaticMoney
trait StaticMoney extends Conversions {
def zero(currency: CurrencyUnit): Money = Money.zero(currency)
def parse(str: String): Money = Money.parse(str)
def of(provider: BigMoneyProvider): Money = Money.of(provider)
def of(provider: BigMoneyProvider, mode: RoundingMode): Money = Money.of(provider, mode)
def of(currency: CurrencyUnit, amount: BigDecimal): Money = Money.of(currency, amount.bigDecimal)
def of(currency: CurrencyUnit, amount: BigDecimal, mode: RoundingMode): Money = Money.of(currency, amount.bigDecimal, mode)
def ofMajor(currency: CurrencyUnit, amount: Long): Money = Money.ofMajor(currency, amount)
def ofMinor(currency: CurrencyUnit, amount: Long): Money = Money.ofMinor(currency, amount)
def total(monies: Iterable[Money]): Money = Money.total(monies.asJava)
def total(monies: Money*): Money = Money.total(monies: _*)
def total(currency: CurrencyUnit, monies: Iterable[Money]): Money = Money.total(currency, monies.asJava)
def total(currency: CurrencyUnit, monies: Money*): Money = Money.total(currency, monies: _*)
def min(money1: Money, money2: Money): Money = MoneyUtils.min(money1, money2)
def max(money1: Money, money2: Money): Money = MoneyUtils.max(money1, money2)
}
| nscala-money/nscala-money | core/src/main/scala/com/github/nscala_money/money/StaticMoney.scala | Scala | apache-2.0 | 2,031 |
package org.trustedanalytics.sparktk.frame.internal.ops
import org.trustedanalytics.sparktk.frame.internal.rdd.FrameRdd
import org.trustedanalytics.sparktk.frame.internal.{ FrameState, FrameTransform, BaseFrame }
import org.trustedanalytics.sparktk.frame.Frame
trait AppendFrameTransform extends BaseFrame {
/**
* Adds more data to the current frame.
*
* @param frame Frame of data to append
*/
def append(frame: Frame): Unit = {
execute(Append(frame))
}
}
case class Append(frame: Frame) extends FrameTransform {
require(frame != null, "frame parameter is required.")
override def work(state: FrameState): FrameState = {
(state: FrameRdd).union(new FrameRdd(frame.schema, frame.rdd))
}
} | shibanis1/spark-tk | core/src/main/scala/org/trustedanalytics/sparktk/frame/internal/ops/Append.scala | Scala | apache-2.0 | 724 |
/*
* Copyright (c) 2014-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.hadoop.scalding
// Hadoop
import org.apache.hadoop
// Scalding
import com.twitter.scalding.Tool
/**
* Entrypoint for Hadoop to kick off the job.
*
* Borrowed from com.twitter.scalding.Tool
*/
object JobRunner {
def main(args : Array[String]) {
hadoop.util.ToolRunner.run(new hadoop.conf.Configuration, new Tool, args);
}
}
| haensel-ams/snowplow | 3-enrich/hadoop-event-recovery/src/main/scala/com/snowplowanalytics/hadoop/scalding/JobRunner.scala | Scala | apache-2.0 | 1,073 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.filters.csrf
import org.specs2.mutable.Specification
import play.api.libs.ws._
import scala.concurrent.Future
import play.api.mvc.{ Handler, Session }
import play.api.libs.Crypto
import play.api.test.{ FakeApplication, TestServer, PlaySpecification }
import play.api.http.{ ContentTypes, ContentTypeOf, Writeable }
import org.specs2.matcher.MatchResult
/**
* Specs for functionality that each CSRF filter/action shares in common
*/
trait CSRFCommonSpecs extends Specification with PlaySpecification {
import CSRFConf._
// This extracts the tests out into different configurations
def sharedTests(csrfCheckRequest: CsrfTester, csrfAddToken: CsrfTester, generate: => String,
addToken: (WSRequestHolder, String) => WSRequestHolder,
getToken: WSResponse => Option[String], compareTokens: (String, String) => MatchResult[Any],
errorStatusCode: Int) = {
// accept/reject tokens
"accept requests with token in query string" in {
lazy val token = generate
csrfCheckRequest(req => addToken(req.withQueryString(TokenName -> token), token)
.post(Map("foo" -> "bar"))
)(_.status must_== OK)
}
"accept requests with token in form body" in {
lazy val token = generate
csrfCheckRequest(req => addToken(req, token)
.post(Map("foo" -> "bar", TokenName -> token))
)(_.status must_== OK)
}
/* TODO: write multipart/form-data Writable
"accept requests with a session token and token in multipart body" in {
lazy val token = generate
makeRequest(_.withSession(TokenName -> token)
.post(Map("foo" -> "bar", TokenName -> token))
).status must_== OK
}
*/
"accept requests with token in header" in {
lazy val token = generate
csrfCheckRequest(req => addToken(req, token)
.withHeaders(HeaderName -> token)
.post(Map("foo" -> "bar"))
)(_.status must_== OK)
}
"accept requests with nocheck header" in {
csrfCheckRequest(_.withHeaders(HeaderName -> HeaderNoCheck)
.post(Map("foo" -> "bar"))
)(_.status must_== OK)
}
"accept requests with ajax header" in {
csrfCheckRequest(_.withHeaders("X-Requested-With" -> "a spoon")
.post(Map("foo" -> "bar"))
)(_.status must_== OK)
}
"reject requests with different token in body" in {
csrfCheckRequest(req => addToken(req, generate)
.post(Map("foo" -> "bar", TokenName -> generate))
)(_.status must_== errorStatusCode)
}
"reject requests with token in session but none elsewhere" in {
csrfCheckRequest(req => addToken(req, generate)
.post(Map("foo" -> "bar"))
)(_.status must_== errorStatusCode)
}
"reject requests with token in body but not in session" in {
csrfCheckRequest(
_.post(Map("foo" -> "bar", TokenName -> generate))
)(_.status must_== errorStatusCode)
}
// add to response
"add a token if none is found" in {
csrfAddToken(_.get()) { response =>
val token = response.body
token must not be empty
val rspToken = getToken(response)
rspToken must beSome.like {
case s => compareTokens(token, s)
}
}
}
"not set the token if already set" in {
lazy val token = generate
Thread.sleep(2)
csrfAddToken(req => addToken(req, token).get()) { response =>
getToken(response) must beNone
compareTokens(token, response.body)
// Ensure that nothing was updated
response.cookies must beEmpty
}
}
}
"a CSRF filter" should {
"work with signed session tokens" in {
def csrfCheckRequest = buildCsrfCheckRequest(false)
def csrfAddToken = buildCsrfAddToken()
def generate = Crypto.generateSignedToken
def addToken(req: WSRequestHolder, token: String) = req.withSession(TokenName -> token)
def getToken(response: WSResponse) = {
val session = response.cookies.find(_.name.exists(_ == Session.COOKIE_NAME)).flatMap(_.value).map(Session.decode)
session.flatMap(_.get(TokenName))
}
def compareTokens(a: String, b: String) = Crypto.compareSignedTokens(a, b) must beTrue
sharedTests(csrfCheckRequest, csrfAddToken, generate, addToken, getToken, compareTokens, FORBIDDEN)
"reject requests with unsigned token in body" in {
csrfCheckRequest(req => addToken(req, generate)
.post(Map("foo" -> "bar", TokenName -> "foo"))
)(_.status must_== FORBIDDEN)
}
"reject requests with unsigned token in session" in {
csrfCheckRequest(req => addToken(req, "foo")
.post(Map("foo" -> "bar", TokenName -> generate))
) { response =>
response.status must_== FORBIDDEN
response.cookies.find(_.name.exists(_ == Session.COOKIE_NAME)) must beSome.like {
case cookie => cookie.value must beNone
}
}
}
"return a different token on each request" in {
lazy val token = generate
Thread.sleep(2)
csrfAddToken(req => addToken(req, token).get()) { response =>
// it shouldn't be equal, to protect against BREACH vulnerability
response.body must_!= token
Crypto.compareSignedTokens(token, response.body) must beTrue
}
}
}
"work with unsigned session tokens" in {
def csrfCheckRequest = buildCsrfCheckRequest(false, "csrf.sign.tokens" -> "false")
def csrfAddToken = buildCsrfAddToken("csrf.sign.tokens" -> "false")
def generate = Crypto.generateToken
def addToken(req: WSRequestHolder, token: String) = req.withSession(TokenName -> token)
def getToken(response: WSResponse) = {
val session = response.cookies.find(_.name.exists(_ == Session.COOKIE_NAME)).flatMap(_.value).map(Session.decode)
session.flatMap(_.get(TokenName))
}
def compareTokens(a: String, b: String) = a must_== b
sharedTests(csrfCheckRequest, csrfAddToken, generate, addToken, getToken, compareTokens, FORBIDDEN)
}
"work with signed cookie tokens" in {
def csrfCheckRequest = buildCsrfCheckRequest(false, "csrf.cookie.name" -> "csrf")
def csrfAddToken = buildCsrfAddToken("csrf.cookie.name" -> "csrf")
def generate = Crypto.generateSignedToken
def addToken(req: WSRequestHolder, token: String) = req.withCookies("csrf" -> token)
def getToken(response: WSResponse) = response.cookies.find(_.name.exists(_ == "csrf")).flatMap(_.value)
def compareTokens(a: String, b: String) = Crypto.compareSignedTokens(a, b) must beTrue
sharedTests(csrfCheckRequest, csrfAddToken, generate, addToken, getToken, compareTokens, FORBIDDEN)
}
"work with unsigned cookie tokens" in {
def csrfCheckRequest = buildCsrfCheckRequest(false, "csrf.cookie.name" -> "csrf", "csrf.sign.tokens" -> "false")
def csrfAddToken = buildCsrfAddToken("csrf.cookie.name" -> "csrf", "csrf.sign.tokens" -> "false")
def generate = Crypto.generateToken
def addToken(req: WSRequestHolder, token: String) = req.withCookies("csrf" -> token)
def getToken(response: WSResponse) = response.cookies.find(_.name.exists(_ == "csrf")).flatMap(_.value)
def compareTokens(a: String, b: String) = a must_== b
sharedTests(csrfCheckRequest, csrfAddToken, generate, addToken, getToken, compareTokens, FORBIDDEN)
}
"work with secure cookie tokens" in {
def csrfCheckRequest = buildCsrfCheckRequest(false, "csrf.cookie.name" -> "csrf", "csrf.cookie.secure" -> "true")
def csrfAddToken = buildCsrfAddToken("csrf.cookie.name" -> "csrf", "csrf.cookie.secure" -> "true")
def generate = Crypto.generateSignedToken
def addToken(req: WSRequestHolder, token: String) = req.withCookies("csrf" -> token)
def getToken(response: WSResponse) = {
response.cookies.find(_.name.exists(_ == "csrf")).flatMap { cookie =>
cookie.secure must beTrue
cookie.value
}
}
def compareTokens(a: String, b: String) = Crypto.compareSignedTokens(a, b) must beTrue
sharedTests(csrfCheckRequest, csrfAddToken, generate, addToken, getToken, compareTokens, FORBIDDEN)
}
"work with checking failed result" in {
def csrfCheckRequest = buildCsrfCheckRequest(true, "csrf.cookie.name" -> "csrf")
def csrfAddToken = buildCsrfAddToken("csrf.cookie.name" -> "csrf")
def generate = Crypto.generateSignedToken
def addToken(req: WSRequestHolder, token: String) = req.withCookies("csrf" -> token)
def getToken(response: WSResponse) = response.cookies.find(_.name.exists(_ == "csrf")).flatMap(_.value)
def compareTokens(a: String, b: String) = Crypto.compareSignedTokens(a, b) must beTrue
sharedTests(csrfCheckRequest, csrfAddToken, generate, addToken, getToken, compareTokens, UNAUTHORIZED)
}
}
trait CsrfTester {
def apply[T](makeRequest: WSRequestHolder => Future[WSResponse])(handleResponse: WSResponse => T): T
}
/**
* Set up a request that will go through the CSRF action. The action must return 200 OK if successful.
*/
def buildCsrfCheckRequest(sendUnauthorizedResult: Boolean, configuration: (String, String)*): CsrfTester
/**
* Make a request that will have a token generated and added to the request and response if not present. The request
* must return the generated token in the body, accessed as if a template had accessed it.
*/
def buildCsrfAddToken(configuration: (String, String)*): CsrfTester
implicit class EnrichedRequestHolder(request: WSRequestHolder) {
def withSession(session: (String, String)*): WSRequestHolder = {
withCookies(Session.COOKIE_NAME -> Session.encode(session.toMap))
}
def withCookies(cookies: (String, String)*): WSRequestHolder = {
request.withHeaders(COOKIE -> cookies.map(c => c._1 + "=" + c._2).mkString(", "))
}
}
implicit def simpleFormWriteable: Writeable[Map[String, String]] = Writeable.writeableOf_urlEncodedForm.map[Map[String, String]](_.mapValues(v => Seq(v)))
implicit def simpleFormContentType: ContentTypeOf[Map[String, String]] = ContentTypeOf[Map[String, String]](Some(ContentTypes.FORM))
def withServer[T](config: Seq[(String, String)])(router: PartialFunction[(String, String), Handler])(block: => T) = running(TestServer(testServerPort, FakeApplication(
additionalConfiguration = Map(config: _*) ++ Map("application.secret" -> "foobar"),
withRoutes = router
)))(block)
}
| jyotikamboj/container | pf-framework/src/play-filters-helpers/src/test/scala/play/filters/csrf/CSRFCommonSpecs.scala | Scala | mit | 10,589 |
package ua.parser
import java.util.regex.{ Matcher, Pattern }
import MatcherOps._
case class UserAgent(family: String, major: Option[String] = None, minor: Option[String] = None,
patch: Option[String] = None)
object UserAgent {
private[parser] def fromMap(m: Map[String, String]) = m.get("family").map { family =>
UserAgent(family, m.get("major"), m.get("minor"), m.get("patch"))
}
private[parser] case class UserAgentPattern(pattern: Pattern, familyReplacement: Option[String],
v1Replacement: Option[String], v2Replacement: Option[String]) {
def process(agent: String): Option[UserAgent] = {
val matcher = pattern.matcher(agent)
if (!matcher.find()) return None
familyReplacement.map { replacement =>
if (replacement.contains("$1") && matcher.groupCount() >= 1) {
replacement.replaceFirst("\\\\$1", Matcher.quoteReplacement(matcher.group(1)))
} else replacement
}.orElse(matcher.groupAt(1)).map { family =>
val major = v1Replacement.orElse(matcher.groupAt(2))
val minor = v2Replacement.orElse(matcher.groupAt(3))
val patch = matcher.groupAt(4)
UserAgent(family, major, minor, patch)
}
}
}
private object UserAgentPattern {
def fromMap(config: Map[String, String]) = config.get("regex").map { r =>
UserAgentPattern(Pattern.compile(r), config.get("family_replacement"), config.get("v1_replacement"),
config.get("v2_replacement"))
}
}
case class UserAgentParser(patterns: List[UserAgentPattern]) {
def parse(agent: String) = {
patterns.foldLeft[Option[UserAgent]](None) {
case (None, pattern) => pattern.process(agent)
case (result, _) => result
}.getOrElse(UserAgent("Other"))
}
}
object UserAgentParser {
def fromList(config: List[Map[String, String]]) = UserAgentParser(config.flatMap(UserAgentPattern.fromMap))
}
}
| yanana/uap-scala | src/main/scala/ua/parser/UserAgent.scala | Scala | mit | 1,910 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.lang.{Double => JDouble, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import java.sql.{Date => JDate, Timestamp => JTimestamp}
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.Resolver
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.{Cast, Literal}
import org.apache.spark.sql.types._
// TODO: We should tighten up visibility of the classes here once we clean up Hive coupling.
object PartitionPath {
def apply(values: InternalRow, path: String): PartitionPath =
apply(values, new Path(path))
}
/**
* Holds a directory in a partitioned collection of files as well as as the partition values
* in the form of a Row. Before scanning, the files at `path` need to be enumerated.
*/
case class PartitionPath(values: InternalRow, path: Path)
case class PartitionSpec(
partitionColumns: StructType,
partitions: Seq[PartitionPath])
object PartitionSpec {
val emptySpec = PartitionSpec(StructType(Seq.empty[StructField]), Seq.empty[PartitionPath])
}
object PartitioningUtils {
private[datasources] case class PartitionValues(columnNames: Seq[String], literals: Seq[Literal])
{
require(columnNames.size == literals.size)
}
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.DEFAULT_PARTITION_NAME
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.escapePathName
import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils.unescapePathName
/**
* Given a group of qualified paths, tries to parse them and returns a partition specification.
* For example, given:
* {{{
* hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14
* hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28
* }}}
* it returns:
* {{{
* PartitionSpec(
* partitionColumns = StructType(
* StructField(name = "a", dataType = IntegerType, nullable = true),
* StructField(name = "b", dataType = StringType, nullable = true),
* StructField(name = "c", dataType = DoubleType, nullable = true)),
* partitions = Seq(
* Partition(
* values = Row(1, "hello", 3.14),
* path = "hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14"),
* Partition(
* values = Row(2, "world", 6.28),
* path = "hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28")))
* }}}
*/
private[datasources] def parsePartitions(
paths: Seq[Path],
typeInference: Boolean,
basePaths: Set[Path]): PartitionSpec = {
// First, we need to parse every partition's path and see if we can find partition values.
val (partitionValues, optDiscoveredBasePaths) = paths.map { path =>
parsePartition(path, typeInference, basePaths)
}.unzip
// We create pairs of (path -> path's partition value) here
// If the corresponding partition value is None, the pair will be skipped
val pathsWithPartitionValues = paths.zip(partitionValues).flatMap(x => x._2.map(x._1 -> _))
if (pathsWithPartitionValues.isEmpty) {
// This dataset is not partitioned.
PartitionSpec.emptySpec
} else {
// This dataset is partitioned. We need to check whether all partitions have the same
// partition columns and resolve potential type conflicts.
// Check if there is conflicting directory structure.
// For the paths such as:
// var paths = Seq(
// "hdfs://host:9000/invalidPath",
// "hdfs://host:9000/path/a=10/b=20",
// "hdfs://host:9000/path/a=10.5/b=hello")
// It will be recognised as conflicting directory structure:
// "hdfs://host:9000/invalidPath"
// "hdfs://host:9000/path"
// TODO: Selective case sensitivity.
val discoveredBasePaths = optDiscoveredBasePaths.flatMap(x => x).map(_.toString.toLowerCase())
assert(
discoveredBasePaths.distinct.size == 1,
"Conflicting directory structures detected. Suspicious paths:\\b" +
discoveredBasePaths.distinct.mkString("\\n\\t", "\\n\\t", "\\n\\n") +
"If provided paths are partition directories, please set " +
"\\"basePath\\" in the options of the data source to specify the " +
"root directory of the table. If there are multiple root directories, " +
"please load them separately and then union them.")
val resolvedPartitionValues = resolvePartitions(pathsWithPartitionValues)
// Creates the StructType which represents the partition columns.
val fields = {
val PartitionValues(columnNames, literals) = resolvedPartitionValues.head
columnNames.zip(literals).map { case (name, Literal(_, dataType)) =>
// We always assume partition columns are nullable since we've no idea whether null values
// will be appended in the future.
StructField(name, dataType, nullable = true)
}
}
// Finally, we create `Partition`s based on paths and resolved partition values.
val partitions = resolvedPartitionValues.zip(pathsWithPartitionValues).map {
case (PartitionValues(_, literals), (path, _)) =>
PartitionPath(InternalRow.fromSeq(literals.map(_.value)), path)
}
PartitionSpec(StructType(fields), partitions)
}
}
/**
* Parses a single partition, returns column names and values of each partition column, also
* the path when we stop partition discovery. For example, given:
* {{{
* path = hdfs://<host>:<port>/path/to/partition/a=42/b=hello/c=3.14
* }}}
* it returns the partition:
* {{{
* PartitionValues(
* Seq("a", "b", "c"),
* Seq(
* Literal.create(42, IntegerType),
* Literal.create("hello", StringType),
* Literal.create(3.14, DoubleType)))
* }}}
* and the path when we stop the discovery is:
* {{{
* hdfs://<host>:<port>/path/to/partition
* }}}
*/
private[datasources] def parsePartition(
path: Path,
typeInference: Boolean,
basePaths: Set[Path]): (Option[PartitionValues], Option[Path]) = {
val columns = ArrayBuffer.empty[(String, Literal)]
// Old Hadoop versions don't have `Path.isRoot`
var finished = path.getParent == null
// currentPath is the current path that we will use to parse partition column value.
var currentPath: Path = path
while (!finished) {
// Sometimes (e.g., when speculative task is enabled), temporary directories may be left
// uncleaned. Here we simply ignore them.
if (currentPath.getName.toLowerCase == "_temporary") {
return (None, None)
}
if (basePaths.contains(currentPath)) {
// If the currentPath is one of base paths. We should stop.
finished = true
} else {
// Let's say currentPath is a path of "/table/a=1/", currentPath.getName will give us a=1.
// Once we get the string, we try to parse it and find the partition column and value.
val maybeColumn =
parsePartitionColumn(currentPath.getName, typeInference)
maybeColumn.foreach(columns += _)
// Now, we determine if we should stop.
// When we hit any of the following cases, we will stop:
// - In this iteration, we could not parse the value of partition column and value,
// i.e. maybeColumn is None, and columns is not empty. At here we check if columns is
// empty to handle cases like /table/a=1/_temporary/something (we need to find a=1 in
// this case).
// - After we get the new currentPath, this new currentPath represent the top level dir
// i.e. currentPath.getParent == null. For the example of "/table/a=1/",
// the top level dir is "/table".
finished =
(maybeColumn.isEmpty && !columns.isEmpty) || currentPath.getParent == null
if (!finished) {
// For the above example, currentPath will be "/table/".
currentPath = currentPath.getParent
}
}
}
if (columns.isEmpty) {
(None, Some(path))
} else {
val (columnNames, values) = columns.reverse.unzip
(Some(PartitionValues(columnNames, values)), Some(currentPath))
}
}
private def parsePartitionColumn(
columnSpec: String,
typeInference: Boolean): Option[(String, Literal)] = {
val equalSignIndex = columnSpec.indexOf('=')
if (equalSignIndex == -1) {
None
} else {
val columnName = columnSpec.take(equalSignIndex)
assert(columnName.nonEmpty, s"Empty partition column name in '$columnSpec'")
val rawColumnValue = columnSpec.drop(equalSignIndex + 1)
assert(rawColumnValue.nonEmpty, s"Empty partition column value in '$columnSpec'")
val literal = inferPartitionColumnValue(rawColumnValue, typeInference)
Some(columnName -> literal)
}
}
/**
* Given a partition path fragment, e.g. `fieldOne=1/fieldTwo=2`, returns a parsed spec
* for that fragment as a `TablePartitionSpec`, e.g. `Map(("fieldOne", "1"), ("fieldTwo", "2"))`.
*/
def parsePathFragment(pathFragment: String): TablePartitionSpec = {
parsePathFragmentAsSeq(pathFragment).toMap
}
/**
* Given a partition path fragment, e.g. `fieldOne=1/fieldTwo=2`, returns a parsed spec
* for that fragment as a `Seq[(String, String)]`, e.g.
* `Seq(("fieldOne", "1"), ("fieldTwo", "2"))`.
*/
def parsePathFragmentAsSeq(pathFragment: String): Seq[(String, String)] = {
pathFragment.split("/").map { kv =>
val pair = kv.split("=", 2)
(unescapePathName(pair(0)), unescapePathName(pair(1)))
}
}
/**
* This is the inverse of parsePathFragment().
*/
def getPathFragment(spec: TablePartitionSpec, partitionSchema: StructType): String = {
partitionSchema.map { field =>
escapePathName(field.name) + "=" + escapePathName(spec(field.name))
}.mkString("/")
}
/**
* Normalize the column names in partition specification, w.r.t. the real partition column names
* and case sensitivity. e.g., if the partition spec has a column named `monTh`, and there is a
* partition column named `month`, and it's case insensitive, we will normalize `monTh` to
* `month`.
*/
def normalizePartitionSpec[T](
partitionSpec: Map[String, T],
partColNames: Seq[String],
tblName: String,
resolver: Resolver): Map[String, T] = {
val normalizedPartSpec = partitionSpec.toSeq.map { case (key, value) =>
val normalizedKey = partColNames.find(resolver(_, key)).getOrElse {
throw new AnalysisException(s"$key is not a valid partition column in table $tblName.")
}
normalizedKey -> value
}
if (normalizedPartSpec.map(_._1).distinct.length != normalizedPartSpec.length) {
val duplicateColumns = normalizedPartSpec.map(_._1).groupBy(identity).collect {
case (x, ys) if ys.length > 1 => x
}
throw new AnalysisException(s"Found duplicated columns in partition specification: " +
duplicateColumns.mkString(", "))
}
normalizedPartSpec.toMap
}
/**
* Resolves possible type conflicts between partitions by up-casting "lower" types. The up-
* casting order is:
* {{{
* NullType ->
* IntegerType -> LongType ->
* DoubleType -> StringType
* }}}
*/
def resolvePartitions(
pathsWithPartitionValues: Seq[(Path, PartitionValues)]): Seq[PartitionValues] = {
if (pathsWithPartitionValues.isEmpty) {
Seq.empty
} else {
// TODO: Selective case sensitivity.
val distinctPartColNames =
pathsWithPartitionValues.map(_._2.columnNames.map(_.toLowerCase())).distinct
assert(
distinctPartColNames.size == 1,
listConflictingPartitionColumns(pathsWithPartitionValues))
// Resolves possible type conflicts for each column
val values = pathsWithPartitionValues.map(_._2)
val columnCount = values.head.columnNames.size
val resolvedValues = (0 until columnCount).map { i =>
resolveTypeConflicts(values.map(_.literals(i)))
}
// Fills resolved literals back to each partition
values.zipWithIndex.map { case (d, index) =>
d.copy(literals = resolvedValues.map(_(index)))
}
}
}
private[datasources] def listConflictingPartitionColumns(
pathWithPartitionValues: Seq[(Path, PartitionValues)]): String = {
val distinctPartColNames = pathWithPartitionValues.map(_._2.columnNames).distinct
def groupByKey[K, V](seq: Seq[(K, V)]): Map[K, Iterable[V]] =
seq.groupBy { case (key, _) => key }.mapValues(_.map { case (_, value) => value })
val partColNamesToPaths = groupByKey(pathWithPartitionValues.map {
case (path, partValues) => partValues.columnNames -> path
})
val distinctPartColLists = distinctPartColNames.map(_.mkString(", ")).zipWithIndex.map {
case (names, index) =>
s"Partition column name list #$index: $names"
}
// Lists out those non-leaf partition directories that also contain files
val suspiciousPaths = distinctPartColNames.sortBy(_.length).flatMap(partColNamesToPaths)
s"Conflicting partition column names detected:\\n" +
distinctPartColLists.mkString("\\n\\t", "\\n\\t", "\\n\\n") +
"For partitioned table directories, data files should only live in leaf directories.\\n" +
"And directories at the same level should have the same partition column name.\\n" +
"Please check the following directories for unexpected files or " +
"inconsistent partition column names:\\n" +
suspiciousPaths.map("\\t" + _).mkString("\\n", "\\n", "")
}
/**
* Converts a string to a [[Literal]] with automatic type inference. Currently only supports
* [[IntegerType]], [[LongType]], [[DoubleType]], [[DecimalType]], [[DateType]]
* [[TimestampType]], and [[StringType]].
*/
private[datasources] def inferPartitionColumnValue(
raw: String,
typeInference: Boolean): Literal = {
val decimalTry = Try {
// `BigDecimal` conversion can fail when the `field` is not a form of number.
val bigDecimal = new JBigDecimal(raw)
// It reduces the cases for decimals by disallowing values having scale (eg. `1.1`).
require(bigDecimal.scale <= 0)
// `DecimalType` conversion can fail when
// 1. The precision is bigger than 38.
// 2. scale is bigger than precision.
Literal(bigDecimal)
}
if (typeInference) {
// First tries integral types
Try(Literal.create(Integer.parseInt(raw), IntegerType))
.orElse(Try(Literal.create(JLong.parseLong(raw), LongType)))
.orElse(decimalTry)
// Then falls back to fractional types
.orElse(Try(Literal.create(JDouble.parseDouble(raw), DoubleType)))
// Then falls back to date/timestamp types
.orElse(Try(Literal(JDate.valueOf(raw))))
.orElse(Try(Literal(JTimestamp.valueOf(unescapePathName(raw)))))
// Then falls back to string
.getOrElse {
if (raw == DEFAULT_PARTITION_NAME) {
Literal.create(null, NullType)
} else {
Literal.create(unescapePathName(raw), StringType)
}
}
} else {
if (raw == DEFAULT_PARTITION_NAME) {
Literal.create(null, NullType)
} else {
Literal.create(unescapePathName(raw), StringType)
}
}
}
private val upCastingOrder: Seq[DataType] =
Seq(NullType, IntegerType, LongType, FloatType, DoubleType, StringType)
def validatePartitionColumn(
schema: StructType,
partitionColumns: Seq[String],
caseSensitive: Boolean): Unit = {
partitionColumnsSchema(schema, partitionColumns, caseSensitive).foreach {
field => field.dataType match {
case _: AtomicType => // OK
case _ => throw new AnalysisException(s"Cannot use ${field.dataType} for partition column")
}
}
if (partitionColumns.nonEmpty && partitionColumns.size == schema.fields.length) {
throw new AnalysisException(s"Cannot use all columns for partition columns")
}
}
def partitionColumnsSchema(
schema: StructType,
partitionColumns: Seq[String],
caseSensitive: Boolean): StructType = {
val equality = columnNameEquality(caseSensitive)
StructType(partitionColumns.map { col =>
schema.find(f => equality(f.name, col)).getOrElse {
throw new AnalysisException(s"Partition column $col not found in schema $schema")
}
}).asNullable
}
private def columnNameEquality(caseSensitive: Boolean): (String, String) => Boolean = {
if (caseSensitive) {
org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
} else {
org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
}
}
/**
* Given a collection of [[Literal]]s, resolves possible type conflicts by up-casting "lower"
* types.
*/
private def resolveTypeConflicts(literals: Seq[Literal]): Seq[Literal] = {
val desiredType = {
val topType = literals.map(_.dataType).maxBy(upCastingOrder.indexOf(_))
// Falls back to string if all values of this column are null or empty string
if (topType == NullType) StringType else topType
}
literals.map { case l @ Literal(_, dataType) =>
Literal.create(Cast(l, desiredType).eval(), desiredType)
}
}
}
| kimoonkim/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/PartitioningUtils.scala | Scala | apache-2.0 | 18,454 |
package xsbt.boot
import java.io.{File,InputStream}
import java.net.URL
import java.util.Properties
import xsbti._
import org.specs2._
import mutable.Specification
import LaunchTest._
import sbt.IO.{createDirectory, touch,withTemporaryDirectory}
object ScalaProviderTest extends Specification
{
"Launch" should {
"provide ClassLoader for Scala 2.8.0" in { checkScalaLoader("2.8.0") }
"provide ClassLoader for Scala 2.8.2" in { checkScalaLoader("2.8.2") }
"provide ClassLoader for Scala 2.9.0" in { checkScalaLoader("2.9.0") }
"provide ClassLoader for Scala 2.9.2" in { checkScalaLoader("2.9.2") }
}
"Launch" should {
"Successfully load an application from local repository and run it with correct arguments" in {
checkLoad(List("test"), "xsbt.boot.test.ArgumentTest").asInstanceOf[Exit].code must equalTo(0)
checkLoad(List(), "xsbt.boot.test.ArgumentTest") must throwA[RuntimeException]
}
"Successfully load an plain application from local repository and run it with correct arguments" in {
checkLoad(List("test"), "xsbt.boot.test.PlainArgumentTest").asInstanceOf[Exit].code must equalTo(0)
checkLoad(List(), "xsbt.boot.test.PlainArgumentTest") must throwA[RuntimeException]
}
"Successfully load an plain application with int return from local repository and run it with correct arguments" in {
checkLoad(List("test"), "xsbt.boot.test.PlainArgumentTestWithReturn").asInstanceOf[Exit].code must equalTo(0)
checkLoad(List(), "xsbt.boot.test.PlainArgumentTestWithReturn").asInstanceOf[Exit].code must equalTo(1)
}
"Successfully load an application from local repository and run it with correct sbt version" in {
checkLoad(List(AppVersion), "xsbt.boot.test.AppVersionTest").asInstanceOf[Exit].code must equalTo(0)
}
"Add extra resources to the classpath" in {
checkLoad(testResources, "xsbt.boot.test.ExtraTest", createExtra).asInstanceOf[Exit].code must equalTo(0)
}
}
def checkLoad(arguments: List[String], mainClassName: String): MainResult =
checkLoad(arguments, mainClassName, _ => Array[File]())
def checkLoad(arguments: List[String], mainClassName: String, extra: File => Array[File]): MainResult =
withTemporaryDirectory { currentDirectory =>
withLauncher { launcher =>
Launch.run(launcher)(
new RunConfiguration(Some(unmapScalaVersion(LaunchTest.getScalaVersion)), LaunchTest.testApp(mainClassName, extra(currentDirectory)).toID, currentDirectory, arguments)
)
}
}
private def testResources = List("test-resourceA", "a/b/test-resourceB", "sub/test-resource")
private def createExtra(currentDirectory: File) =
{
val resourceDirectory = new File(currentDirectory, "resources")
createDirectory(resourceDirectory)
testResources.foreach(resource => touch(new File(resourceDirectory, resource.replace('/', File.separatorChar))))
Array(resourceDirectory)
}
private def checkScalaLoader(version: String): Unit = withLauncher( checkLauncher(version, mapScalaVersion(version)) )
private def checkLauncher(version: String, versionValue: String)(launcher: Launcher): Unit =
{
val provider = launcher.getScala(version)
val loader = provider.loader
// ensure that this loader can load Scala classes by trying scala.ScalaObject.
tryScala(loader)
getScalaVersion(loader) must beEqualTo(versionValue)
}
private def tryScala(loader: ClassLoader): Unit = Class.forName("scala.Product", false, loader).getClassLoader must be(loader)
}
object LaunchTest
{
def testApp(main: String): Application = testApp(main, Array[File]())
def testApp(main: String, extra: Array[File]): Application = Application("org.scala-sbt", "launch-test", new Explicit(AppVersion), main, Nil, CrossValue.Disabled, extra)
import Predefined._
def testRepositories = List(Local, ScalaToolsReleases, ScalaToolsSnapshots).map(Repository.Predefined(_))
def withLauncher[T](f: xsbti.Launcher => T): T =
withTemporaryDirectory { bootDirectory =>
f(Launcher(bootDirectory, testRepositories))
}
val finalStyle = Set("2.9.1", "2.9.0-1", "2.9.0", "2.8.2", "2.8.1", "2.8.0")
def unmapScalaVersion(versionNumber: String) = versionNumber.stripSuffix(".final")
def mapScalaVersion(versionNumber: String) = if(finalStyle(versionNumber)) versionNumber + ".final" else versionNumber
def getScalaVersion: String = getScalaVersion(getClass.getClassLoader)
def getScalaVersion(loader: ClassLoader): String = getProperty(loader, "library.properties", "version.number")
lazy val AppVersion = getProperty(getClass.getClassLoader, "xsbt.version.properties", "version")
private[this] def getProperty(loader: ClassLoader, res: String, prop: String) = loadProperties(loader.getResourceAsStream(res)).getProperty(prop)
private[this] def loadProperties(propertiesStream: InputStream): Properties =
{
val properties = new Properties
try { properties.load(propertiesStream) } finally { propertiesStream.close() }
properties
}
}
| xeno-by/old-scalameta-sbt | launch/src/test/scala/ScalaProviderTest.scala | Scala | bsd-3-clause | 4,904 |
package edu.indiana.ise.spidal.util
/**
* Created by vibhatha on 7/16/17.
*/
class ReadText {
}
| vibhatha/Spark | SparkMlibBenchmark/src/main/scala/edu/indiana/ise/spidal/util/ReadText.scala | Scala | mit | 102 |
package com.arcusys.valamis.certificate.storage.schema
import com.arcusys.valamis.certificate.model.goal.{CertificateGoal, GoalType}
import com.arcusys.valamis.model.PeriodTypes
import com.arcusys.valamis.persistence.common.DbNameUtils._
import com.arcusys.valamis.persistence.common.{LongKeyTableComponent, SlickProfile, TypeMapper}
import com.arcusys.valamis.util.TupleHelpers._
import com.arcusys.valamis.util.ToTuple
trait CertificateGoalTableComponent
extends LongKeyTableComponent
with TypeMapper
with CertificateTableComponent
with CertificateGoalGroupTableComponent { self: SlickProfile =>
import driver.simple._
implicit val certificateGoalTypeMapper = enumerationIdMapper(GoalType)
implicit lazy val validPeriodTypeMapper = enumerationMapper(PeriodTypes)
class CertificateGoalTable(tag: Tag) extends LongKeyTable[CertificateGoal](tag, "CERT_GOALS") {
def certificateId = column[Long]("CERTIFICATE_ID")
def goalType = column[GoalType.Value]("GOAL_TYPE")
def periodValue = column[Int]("PERIOD_VALUE")
def periodType = column[PeriodTypes.PeriodType]("PERIOD_TYPE")
def arrangementIndex = column[Int]("ARRANGEMENT_INDEX")
def isOptional = column[Boolean]("IS_OPTIONAL")
def groupId = column[Option[Long]]("GROUP_ID")
def * = (id, certificateId, goalType, periodValue, periodType, arrangementIndex, isOptional, groupId) <> (CertificateGoal.tupled, CertificateGoal.unapply)
def update = (certificateId, goalType, periodValue, periodType, arrangementIndex, isOptional, groupId) <> (tupleToEntity, entityToTuple)
def entityToTuple(entity: TableElementType) = {
Some(toTupleWithFilter(entity))
}
def certificateFK = foreignKey(fkName("GOALS_TO_CERT"), certificateId, certificates)(x => x.id, onDelete = ForeignKeyAction.NoAction)
def groupFK = foreignKey(fkName("GOALS_TO_GROUP"), groupId, certificateGoalGroups)(x => x.id)
}
val certificateGoals = TableQuery[CertificateGoalTable]
} | igor-borisov/valamis | valamis-certificate/src/main/scala/com/arcusys/valamis/certificate/storage/schema/CertificateGoalTableComponent.scala | Scala | gpl-3.0 | 1,977 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.worker
import java.io.File
import org.apache.commons.lang3.StringUtils
import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.{DependencyUtils, SparkHadoopUtil, SparkSubmit}
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.rpc.RpcEnv
import org.apache.spark.util._
/**
* Utility object for launching driver programs such that they share fate with the Worker process.
* This is used in standalone cluster mode only.
*/
object DriverWrapper extends Logging {
def main(args: Array[String]) {
args.toList match {
/*
* IMPORTANT: Spark 1.3 provides a stable application submission gateway that is both
* backward and forward compatible across future Spark versions. Because this gateway
* uses this class to launch the driver, the ordering and semantics of the arguments
* here must also remain consistent across versions.
*/
case workerUrl :: userJar :: mainClass :: extraArgs =>
val conf = new SparkConf()
val host: String = Utils.localHostName()
val port: Int = sys.props.getOrElse(config.DRIVER_PORT.key, "0").toInt
val rpcEnv = RpcEnv.create("Driver", host, port, conf, new SecurityManager(conf))
logInfo(s"Driver address: ${rpcEnv.address}")
rpcEnv.setupEndpoint("workerWatcher", new WorkerWatcher(rpcEnv, workerUrl))
val currentLoader = Thread.currentThread.getContextClassLoader
val userJarUrl = new File(userJar).toURI().toURL()
val loader =
if (sys.props.getOrElse(config.DRIVER_USER_CLASS_PATH_FIRST.key, "false").toBoolean) {
new ChildFirstURLClassLoader(Array(userJarUrl), currentLoader)
} else {
new MutableURLClassLoader(Array(userJarUrl), currentLoader)
}
Thread.currentThread.setContextClassLoader(loader)
setupDependencies(loader, userJar)
// Delegate to supplied main class
val clazz = Utils.classForName(mainClass)
val mainMethod = clazz.getMethod("main", classOf[Array[String]])
mainMethod.invoke(null, extraArgs.toArray[String])
rpcEnv.shutdown()
case _ =>
// scalastyle:off println
System.err.println("Usage: DriverWrapper <workerUrl> <userJar> <driverMainClass> [options]")
// scalastyle:on println
System.exit(-1)
}
}
private def setupDependencies(loader: MutableURLClassLoader, userJar: String): Unit = {
val sparkConf = new SparkConf()
val secMgr = new SecurityManager(sparkConf)
val hadoopConf = SparkHadoopUtil.newConfiguration(sparkConf)
val Seq(packagesExclusions, packages, repositories, ivyRepoPath, ivySettingsPath) =
Seq(
"spark.jars.excludes",
"spark.jars.packages",
"spark.jars.repositories",
"spark.jars.ivy",
"spark.jars.ivySettings"
).map(sys.props.get(_).orNull)
val resolvedMavenCoordinates = DependencyUtils.resolveMavenDependencies(packagesExclusions,
packages, repositories, ivyRepoPath, Option(ivySettingsPath))
val jars = {
val jarsProp = sys.props.get(config.JARS.key).orNull
if (!StringUtils.isBlank(resolvedMavenCoordinates)) {
DependencyUtils.mergeFileLists(jarsProp, resolvedMavenCoordinates)
} else {
jarsProp
}
}
val localJars = DependencyUtils.resolveAndDownloadJars(jars, userJar, sparkConf, hadoopConf,
secMgr)
DependencyUtils.addJarsToClassPath(localJars, loader)
}
}
| icexelloss/spark | core/src/main/scala/org/apache/spark/deploy/worker/DriverWrapper.scala | Scala | apache-2.0 | 4,347 |
package com.dt.scala.forexpression
/**
* @author Wang Jialin
* Date 2015/8/16
* Contact Information:
* WeChat: 18610086859
* QQ: 1740415547
* Email: [email protected]
* Tel: 18610086859
* �Ź����˺ţ�DT_Spark
*/
object For_Advanced {
def main(args: Array[String]) {}
def map[A, B](list: List[A], f: A => B): List[B] =
for(element <- list) yield f(element)
def flatMap[A, B](list: List[A], f: A => List[B]): List[B] =
for(x <- list; y <- f(x)) yield y
def filter[A](list: List[A], f: A => Boolean): List[A] =
for(elem <- list if f(elem)) yield elem
} | slieer/scala-tutorials | src/main/scala/com/dt/scala/forexpression/For_Advanced.scala | Scala | apache-2.0 | 624 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.util
import java.util.concurrent.Semaphore
import com.spotify.scio.transforms.DoFnWithResource
import com.spotify.scio.transforms.DoFnWithResource.ResourceType
import org.apache.beam.sdk.transforms.DoFn
import org.apache.beam.sdk.transforms.DoFn.ProcessElement
/**
* Utility class to limit the number of parallel doFns
* @param maxDoFns
* Max number of doFns
*/
abstract private[scio] class ParallelLimitedFn[T, U](maxDoFns: Int)
extends DoFnWithResource[T, U, Semaphore]
with NamedFn {
def getResourceType: ResourceType = ResourceType.PER_CLASS
def createResource: Semaphore = new Semaphore(maxDoFns, true)
def parallelProcessElement(x: DoFn[T, U]#ProcessContext): Unit
@ProcessElement def processElement(x: DoFn[T, U]#ProcessContext): Unit = {
val semaphore = getResource
try {
semaphore.acquire()
parallelProcessElement(x)
} finally {
semaphore.release()
}
}
}
| spotify/scio | scio-core/src/main/scala/com/spotify/scio/util/ParallelLimitedFn.scala | Scala | apache-2.0 | 1,553 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.check.extractor.jsonpath
object Json1 extends JsonSample {
val value = """{
| "store": {
| "book": [
| { "category": "reference",
| "author": "Nigel Rees",
| "title": "Sayings of the Century",
| "display-price": 8.95
| },
| { "category": "fiction",
| "author": "Evelyn Waugh",
| "title": "Sword of Honour",
| "display-price": 12.99
| },
| { "category": "fiction",
| "author": "Herman Melville",
| "title": "Moby Dick",
| "isbn": "0-553-21311-3",
| "display-price": 8.99
| },
| { "category": "fiction",
| "author": "J. R. R. Tolkien",
| "title": "The Lord of the Rings",
| "isbn": "0-395-19395-8",
| "display-price": 22.99
| }
| ],
| "bicycle": {
| "foo": "baz",
| "color": "red",
| "display-price": 19.95,
| "foo:bar": "fooBar",
| "dot.notation": "new",
| "dash-notation": "dashes"
| }
| },
| "foo": "bar",
| "@id": "ID"
|}""".stripMargin
}
| MykolaB/gatling | gatling-core/src/test/scala/io/gatling/core/check/extractor/jsonpath/Json1.scala | Scala | apache-2.0 | 2,120 |
package cobalt.ast
import cobalt.ast.AST._
import cobalt.ast.AST2IR.convertToIR
import cobalt.ast.IRNew._
import cobalt.jar_loader.JarUtility
import cobalt.symbol_table.{SymbolTable, ValueEntry}
import scala.tools.asm.Opcodes
object IRUtils {
def typeStringToTypeIR(t: String): TypeIR = {
t match {
case "Int" => IntType()
case "Long" => LongType()
case "String" => StringLiteralType()
case "Unit" => UnitType()
case className => ObjectType(className)
}
}
def typeToBytecodeType(typeIR: TypeIR): String = {
typeIR match {
case _: IntType => "I"
case _: LongType => "J"
case _: StringLiteralType => "Ljava/lang/String;"
case _: UnitType => "V"
case objectType: ObjectType => "L" + objectType.name + ";"
case _: UnknownType => ""
}
}
def inferType(expression: Expression, symbolTable: SymbolTable, imports: Map[String, String]): TypeIR = {
expression match {
case aBinary: ABinary => inferType(aBinary.expression1, symbolTable, imports)
case blockExpr: BlockExpr => {
val types = blockExpr.expressions.map(e => inferType(e, symbolTable, imports))
types.length match {
case 0 => UnknownType()
case _ => types.head
}
}
case _: DoubleConst => DoubleType()
case _: FloatConst => FloatType()
case identifier: Identifier => ObjectType(symbolTable.get(identifier.name.value) match {
case v: ValueEntry => v.name
})
case _: IntObject => ObjectType("Ljava/lang/Object;")
case _: IntConst => IntType()
case _: LongConst => LongType()
case nestedExpression: NestedExpr => {
var currentType: TypeIR = null
// Loop through all method calls and variables
nestedExpression.expressions.foreach {
case methodCall: MethodCall => {
// Get the method argument types and convert to bytecode types
val argumentTypes = methodCall.expression.map(e => IRUtils.typeToBytecodeType(IRUtils.inferType(e, symbolTable, imports))).toList
//val signature = JarUtility.getBytecodeClass(currentType.classLoc).getMethod(methodCall.name.value, argumentTypes).getSignature()
}
case value: Identifier => {
currentType = symbolTable.get(value match {
case methodCall: MethodCall => methodCall.name.value
case identifier: Identifier => identifier.name.value
}) match {
case valueEntry: ValueEntry => valueEntry.`type`
}
}
}
ObjectType(currentType.classLoc)
}
case newClassInstance: NewClassInstance => {
val superClass: String = newClassInstance.`type`.ref match {
case RefLocal(name) => imports.get(name.value).getOrElse(name.value)
case RefQual(qualName) => qualName.nameSpace.nameSpace.map(_.value).mkString("/") + "/" + qualName.name.value
}
ObjectType(superClass)
}
case _: StringLiteral => StringLiteralType()
}
}
def getStoreOperator(statement: Statement): Int = {
statement match {
case inline: Inline => getStoreOperator(inline.expression)
case doBlock: DoBlock => {
getStoreOperator(doBlock.statement.head)
}
case blockStmt: BlockStmt => getStoreOperator(blockStmt.statements.head)
}
}
def getStoreOperator(expression: Expression): Int = {
expression match {
case aBinaryIR: ABinary => getStoreOperator(aBinaryIR.expression1)
case _: IntConstIR => Opcodes.ISTORE
case _: LongConst => Opcodes.LSTORE
case _: FloatConst => Opcodes.FSTORE
case _: DoubleConst => Opcodes.DSTORE
}
}
def getStoreOperator(t: TypeIR, id: Int): StoreOperators = {
t match {
case _: IntType => IStore(id)
case _: LongType => LStore(id)
case _: StringLiteralType => AStore(id);
case _: ObjectType => AStore(id)
}
}
def getLoadOperator(t: TypeIR): Int = {
t match {
case intType: IntType => Opcodes.ILOAD
case longType: LongType => Opcodes.LLOAD
case _ => Opcodes.ALOAD
}
}
def getArithmeticOperator(op: Operator, expression1: Expression, expression2: Expression): Int = {
expression1 match {
case innerABinary: ABinary => {
getArithmeticOperator(op, innerABinary.expression1, innerABinary.expression2)
}
case _: IntConstIR => {
op match {
case Add => Opcodes.IADD
case Subtract => Opcodes.ISUB
case Multiply => Opcodes.IMUL
case Divide => Opcodes.IDIV
}
}
case _: LongConstIR => {
op match {
case Add => Opcodes.LADD
case Subtract => Opcodes.LSUB
case Multiply => Opcodes.LMUL
case Divide => Opcodes.LDIV
}
}
case _: FloatConstIR => {
op match {
case Add => Opcodes.FADD
case Subtract => Opcodes.FSUB
case Multiply => Opcodes.FMUL
case Divide => Opcodes.FDIV
}
}
case _: DoubleConstIR => {
op match {
case Add => Opcodes.DADD
case Subtract => Opcodes.DSUB
case Multiply => Opcodes.DMUL
case Divide => Opcodes.DDIV
}
}
}
}
def modifierToOpcode(modifier: Modifier): Int = {
modifier match {
case _: Public => Opcodes.ACC_PUBLIC
case _: Protected => Opcodes.ACC_PROTECTED
case _: Private => Opcodes.ACC_PRIVATE
case _: Abstract => Opcodes.ACC_ABSTRACT
case _: Final => Opcodes.ACC_FINAL
}
}
}
| Michael2109/cobalt | src/main/scala/cobalt/ast/IRUtils.scala | Scala | lgpl-3.0 | 5,630 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.secondaryindex.events
import scala.collection.JavaConverters._
import org.apache.log4j.Logger
import org.apache.spark.internal.Logging
import org.apache.spark.sql.CarbonEnv
import org.apache.spark.sql.hive.CarbonRelation
import org.apache.spark.sql.index.CarbonIndexUtil
import org.apache.carbondata.api.CarbonStore
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.metadata.index.IndexType
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.events.{DeleteSegmentByIdPostEvent, Event, OperationContext, OperationEventListener}
class DeleteSegmentByIdListener extends OperationEventListener with Logging {
val LOGGER: Logger = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* Called on a specified event occurrence
*
*/
override def onEvent(event: Event, operationContext: OperationContext): Unit = {
event match {
case deleteSegmentPostEvent: DeleteSegmentByIdPostEvent =>
LOGGER.info("Delete segment By id post event listener called")
val carbonTable = deleteSegmentPostEvent.carbonTable
val loadIds = deleteSegmentPostEvent.loadIds
val sparkSession = deleteSegmentPostEvent.sparkSession
val siIndexesMap = carbonTable.getIndexesMap
.get(IndexType.SI.getIndexProviderName)
if (null != siIndexesMap) {
siIndexesMap.keySet().asScala.foreach { tableName =>
val metastore = CarbonEnv.getInstance(sparkSession).carbonMetaStore
val table = metastore
.lookupRelation(Some(carbonTable.getDatabaseName), tableName)(sparkSession)
.asInstanceOf[CarbonRelation].carbonTable
val tableStatusFilePath = CarbonTablePath.getTableStatusFilePath(table.getTablePath)
// this check is added to verify if the table status file for the index table exists
// or not. Delete on index tables is only to be called if the table status file exists.
if (FileFactory.isFileExist(tableStatusFilePath)) {
CarbonStore
.deleteLoadById(loadIds, carbonTable.getDatabaseName, table.getTableName, table)
}
}
}
}
}
}
| zzcclp/carbondata | integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/events/DeleteSegmentByIdListener.scala | Scala | apache-2.0 | 3,140 |
package org.openurp.edu.grade.course.domain.impl
import java.text.NumberFormat
import java.{ util => ju }
import org.beangle.commons.lang.{ Numbers, Strings }
import org.beangle.commons.script.ExpressionEvaluator
import org.beangle.data.dao.{ EntityDao, OqlBuilder }
import org.openurp.edu.base.code.model.ScoreMarkStyle
import org.openurp.edu.base.model.Project
import org.openurp.edu.base.service.ProjectCodeService
import org.openurp.edu.grade.code.model.GradeType
import org.openurp.edu.grade.course.domain.GradeRateService
import org.openurp.edu.grade.model.{ Grade, GradeRateConfig }
class DefaultGradeRateService extends GradeRateService {
var entityDao: EntityDao = _
var expressionEvaluator: ExpressionEvaluator = _
var projectCodeService: ProjectCodeService = _
/**
* 依照绩点规则计算平均绩点
*
* @param rule
*/
def calcGp(grade: Grade, gradeType: GradeType): java.lang.Float = {
val conifg = getConfig(grade.std.project, grade.markStyle)
if (null != conifg) {
var gp = calcGp(grade.score, conifg)
if (null != gp && gp.floatValue > 1 && null != grade.score && grade.score < 61) {
if (grade.gradeType.id == GradeType.MakeupGa) gp = 1.0f
}
return gp
}
null
}
/**
* 计算分数对应的绩点
*
* @param score
* @param conifg
* @return
*/
private def calcGp(score: java.lang.Float, conifg: GradeRateConfig): java.lang.Float = {
if (null == score || score.floatValue() <= 0) return new java.lang.Float(0) else {
var iter = conifg.items.iterator
while (iter.hasNext) {
val gradeRateItem = iter.next()
if (gradeRateItem.inScope(score)) {
if (Strings.isNotEmpty(gradeRateItem.gpExp)) {
val data = new ju.HashMap[String, Any]
data.put("score", score)
return expressionEvaluator.eval(gradeRateItem.gpExp, data, classOf[Float])
} else {
return null
}
}
}
}
new java.lang.Float(0)
}
/**
* 将字符串按照成绩记录方式转换成数字.<br>
* 空成绩将转换成null
*
* @param score
* @param markStyle
* @return
*/
def convert(score: String, scoreMarkStyle: ScoreMarkStyle, project: Project): java.lang.Float = {
if (Strings.isBlank(score)) return null
val config = getConfig(project, scoreMarkStyle).asInstanceOf[GradeRateConfig]
if (null == config || config.items.size == 0) {
if (Numbers.isDigits(score)) new java.lang.Float(Numbers.toFloat(score)) else null
} else {
val newScore = config.convert(score)
if (null != newScore) {
return newScore
}
if (Numbers.isDigits(score)) {
return new java.lang.Float(Numbers.toFloat(score))
}
null
}
}
def isPassed(score: java.lang.Float, scoreMarkStyle: ScoreMarkStyle, project: Project): Boolean = {
val config = getConfig(project, scoreMarkStyle)
if (null == config || null == score) {
false
} else {
java.lang.Float.compare(score, config.passScore) >= 0
}
}
/**
* 将字符串按照成绩记录方式转换成数字.<br>
* 空成绩将转换成""
*
* @param score
* @param markStyle
* @return
*/
def convert(score: java.lang.Float, scoreMarkStyle: ScoreMarkStyle, project: Project): String = {
if (null == score) {
return ""
}
val config = getConfig(project, scoreMarkStyle)
if (null == config) {
NumberFormat.getInstance.format(score.floatValue())
} else {
config.convert(score)
}
}
/**
* 查询记录方式对应的配置
*/
def getConfig(project: Project, scoreMarkStyle: ScoreMarkStyle): GradeRateConfig = {
if (null == project || !project.persisted) return null
val builder = OqlBuilder.from(classOf[GradeRateConfig], "config")
.where("config.project=:project and config.scoreMarkStyle=:markStyle", project, scoreMarkStyle)
.cacheable()
val rs = entityDao.search(builder)
if (rs.isEmpty) null else rs.head
}
/**
* 获得支持的记录方式
*
* @param project
* @return
*/
def getMarkStyles(project: Project): Seq[ScoreMarkStyle] = {
val builder = OqlBuilder.from(classOf[GradeRateConfig], "config")
.where("config.project=:project", project)
.cacheable()
val rs = entityDao.search(builder)
if (rs.isEmpty) {
projectCodeService.getCodes(project, classOf[ScoreMarkStyle])
} else {
rs.map(_.markStyle)
}
}
}
| openurp/edu-core | grade/core/src/main/scala/org/openurp/edu/grade/course/domain/impl/DefaultGradeRateService.scala | Scala | gpl-3.0 | 4,528 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2.validation
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.ct600.v2.retriever.ReturnStatementsBoxRetriever
trait RSQ7MutuallyExclusiveWithRSQ8 {
private def error(boxId: String) = CtValidation(Some(boxId), s"error.$boxId.mutuallyExclusive")
def validateMutualExclusivity(boxRetriever: ReturnStatementsBoxRetriever): Set[CtValidation] =
(boxRetriever.rsq7().value, boxRetriever.rsq8().value) match {
case (Some(rsq7), Some(rsq8)) if rsq7 && rsq8 => Set(error("RSQ7"), error("RSQ8"))
case _ => Set.empty
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/validation/RSQ7MutuallyExclusiveWithRSQ8.scala | Scala | apache-2.0 | 1,185 |
package keystoneml.nodes.stats
import breeze.linalg._
import org.apache.spark.SparkContext
import org.scalatest.FunSuite
import keystoneml.pipelines.Logging
import keystoneml.utils.Stats
import keystoneml.workflow.PipelineContext
class PaddedFFTSuite extends FunSuite with PipelineContext with Logging {
test("Test PaddedFFT node") {
sc = new SparkContext("local", "test")
// Set up a test matrix.
val ones = DenseVector.zeros[Double](100)
val twos = DenseVector.zeros[Double](100)
ones(0) = 1.0
twos(2) = 1.0
val x = sc.parallelize(Seq(twos, ones))
val fftd = PaddedFFT().apply(x).collect()
val twosout = fftd(0)
val onesout = fftd(1)
// Proof by agreement w/ R: Re(fft(c(0, 0, 1, rep(0, 125))))
assert(twosout.length === 64)
assert(Stats.aboutEq(twosout(0), 1.0))
assert(Stats.aboutEq(twosout(16), 0.0))
assert(Stats.aboutEq(twosout(32), -1.0))
assert(Stats.aboutEq(twosout(48), 0.0))
// Proof by agreement w/ R: Re(fft(c(1, rep(0, 127))))
assert(Stats.aboutEq(onesout, DenseVector.ones[Double](64)))
}
}
| amplab/keystone | src/test/scala/keystoneml/nodes/stats/PaddedFFTSuite.scala | Scala | apache-2.0 | 1,091 |
package macrolog.auto
import scala.annotation.tailrec
import scala.language.experimental.macros
/**
* Inspired by com.lihaoyi.Sourcecode
*
* @author Maksim Ochenashko
*/
case class Position(enclosingClass: String, enclosingMethod: Option[String], fullPosition: String)
object Position {
implicit def generate: Position = macro impl
def impl(c: scala.reflect.macros.blackbox.Context): c.Expr[Position] = {
import c.universe._
val owner = c.internal.enclosingOwner
@tailrec
def extract(s: Symbol)(p: Symbol => Boolean): Option[Symbol] =
if (s == null || s == NoSymbol) None
else if (p(s)) Some(s)
else extract(s.owner)(p)
val enclosingClass = extract(owner) { s =>
(s.name.decodedName.toString != "$anonfun") &&
(s.name.decodedName.toString != "$anon") &&
(s.isClass || s.isModuleClass || s.isModule || s.isPackage || s.isPackageClass)
}
if (enclosingClass.isEmpty) c.abort(c.enclosingPosition, "Can not detect enclosing element (class, object, package)")
val enclosingMethod = extract(owner) { s =>
s.isMethod && s.name.decodedName.toString != "applyOrElse"
}
val className = enclosingClass.get.name.toString.trim
val methodName = enclosingMethod.map(_.name.toString)
val fullName = methodName match {
case Some(m) => enclosingClass.get.fullName + "." + m + ":" + owner.pos.line
case None => enclosingClass.get.fullName + ":" + owner.pos.line
}
c.Expr[macrolog.auto.Position](q"""${c.prefix}($className, $methodName, $fullName)""")
}
} | iRevive/macrolog | src/main/scala/macrolog/auto/Position.scala | Scala | mit | 1,580 |
/*
* Copyright (C)2014 D. Plaindoux.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package smallibs.page
import org.specs2.mutable._
import scala.util.Success
import smallibs.page.engine.Engine
import smallibs.page.syntax.PageParser
object EngineGeneratorTest extends Specification {
"Template and generator should" should {
"provides a result with an empty" in {
val template = PageParser.parseAll(PageParser.template, "")
val engine = Engine(Provider.empty)
engine.generate(template.get) mustEqual Success(Some(""))
}
"provides a result with an input text" in {
val template = PageParser.parseAll(PageParser.template, "Hello, World")
val engine = Engine(Provider.empty)
engine.generate(template.get) mustEqual Success(Some("Hello, World"))
}
"provides a result with an input Ident" in {
val template = PageParser.parseAll(PageParser.template, "@VAL::hello")
val engine = Engine(Provider.record("hello" -> Provider.constant("World")))
engine.generate(template.get) mustEqual Success(Some("World"))
}
"provides a result with an input sequence" in {
val template = PageParser.parseAll(PageParser.template, "@VAL::hello, @VAL::world!")
val engine = Engine(Provider.record(
"hello" -> Provider.constant("Hello"),
"world" -> Provider.constant("World")
))
engine.generate(template.get) mustEqual Success(Some("Hello, World!"))
}
"provides a result with an anonymous repeatable" in {
val template = PageParser.parseAll(PageParser.template, "@REP[| - @VAL|]")
val engine = Engine(Provider.set(
Provider.constant("Hello"), Provider.constant("World")
))
engine.generate(template.get) mustEqual Success(Some(" - Hello - World"))
}
"provides a result with a named repeatable" in {
val template = PageParser.parseAll(PageParser.template, "@REP::keys[| - @VAL|]")
val engine = Engine(Provider.record(
"keys" -> Provider.set(Provider.constant("Hello"), Provider.constant("World"))
))
engine.generate(template.get) mustEqual Success(Some(" - Hello - World"))
}
"provides a result with a named complex repeatable" in {
val template = PageParser.parseAll(PageParser.template, "@REP::keys[| - @VAL::name|]")
val engine = Engine(Provider.record(
"keys" ->
Provider.set(
Provider.record("name" -> Provider.constant("Hello")),
Provider.record("name" -> Provider.constant("World"))
)
))
engine.generate(template.get) mustEqual Success(Some(" - Hello - World"))
}
"provides a result using Define and Use" in {
val template = PageParser.parseAll(PageParser.template, "@DEFINE::keys[|@REP::keys[| - @VAL::name|]|]\\n@USE::keys")
val engine = Engine(Provider.record(
"keys" ->
Provider.set(
Provider.record("name" -> Provider.constant("Hello")),
Provider.record("name" -> Provider.constant("World"))
)
))
engine.generate(template.get) mustEqual Success(Some(" - Hello - World"))
}
}
} | d-plaindoux/page | src/test/scala/smallibs/page/EngineGeneratorTest.scala | Scala | lgpl-2.1 | 3,829 |
package gapt.examples.tip.prod
import gapt.expr._
import gapt.expr.ty.TBase
import gapt.proofs.context.update.InductiveType
import gapt.proofs.Sequent
import gapt.proofs.gaptic._
import gapt.provers.viper.aip.AnalyticInductionProver
object prop_33 extends TacticsProof {
// Sorts
ctx += TBase( "sk" )
// Inductive types
ctx += InductiveType( ty"Nat", hoc"'Z' :Nat", hoc"'S' :Nat>Nat" )
//Function constants
ctx += hoc"'plus' :Nat>Nat>Nat"
ctx += hoc"'one' :Nat"
ctx += hoc"'mult' :Nat>Nat>Nat"
ctx += hoc"'qfac' :Nat>Nat>Nat"
ctx += hoc"'fac' :Nat>Nat"
val sequent =
hols"""
def_p: ∀x0 (p(S(x0:Nat): Nat): Nat) = x0,
def_plus_0: ∀y (plus(#c(Z: Nat), y:Nat): Nat) = y,
def_plus_1: ∀z ∀y (plus(S(z:Nat): Nat, y:Nat): Nat) = S(plus(z, y)),
def_one_0: (one:Nat) = S(#c(Z: Nat)),
def_mult_0: ∀y (mult(#c(Z: Nat), y:Nat): Nat) = #c(Z: Nat),
def_mult_1: ∀z ∀y (mult(S(z:Nat): Nat, y:Nat): Nat) = plus(y, mult(z, y)),
def_qfac_0: ∀y (qfac(#c(Z: Nat), y:Nat): Nat) = y,
def_qfac_1: ∀z ∀y (qfac(S(z:Nat): Nat, y:Nat): Nat) = qfac(z, mult(S(z), y)),
def_fac_0: (fac(#c(Z: Nat)): Nat) = S(#c(Z: Nat)),
def_fac_1: ∀y (fac(S(y:Nat): Nat): Nat) = mult(S(y), fac(y)),
constr_inj_0: ∀y0 ¬#c(Z: Nat) = S(y0:Nat)
:-
goal: ∀x (fac(x:Nat): Nat) = qfac(x, one:Nat)
"""
val plus_axioms = List(
"ap1" -> hof"∀y plus(Z, y) = y",
"ap2" -> hof"∀z ∀y plus(S(z), y) = S(plus(z, y))" )
val mult_axioms = List(
"am1" -> hof"∀y mult(Z, y) = Z",
"am2" -> hof"∀z ∀y mult(S(z), y) = plus(y, mult(z, y))" )
val fac_axioms = List(
"af1" -> hof"fac(Z) = S(Z)",
"af2" -> hof"∀y fac(S(y)) = mult(S(y), fac(y))" )
val qfac_axioms = List(
"aq1" -> hof"∀y qfac(Z, y) = y",
"aq2" -> hof"∀z ∀y qfac(S(z), y) = qfac(z, mult(S(z), y))" )
val plus_right_s_goal = hof"!x !y plus(x,S(y)) = S(plus(x,y))"
val plus_right_s = (
plus_axioms ++:
Sequent() :+ ( "" -> plus_right_s_goal ) )
val plus_right_s_proof = AnalyticInductionProver.singleInduction( plus_right_s, hov"x:Nat" )
val plus_z_neutral_goal = hof"!x plus(x,Z) = x"
val plus_z_neutral = (
plus_axioms ++:
Sequent() :+ ( "" -> plus_z_neutral_goal ) )
val plus_z_neutral_proof = AnalyticInductionProver.singleInduction( plus_z_neutral, hov"x:Nat" )
val plus_comm_goal = hof"!x !y plus(x,y) = plus(y,x)"
val plus_comm = (
plus_axioms ++:
( "prs" -> plus_right_s_goal ) +:
( "pzn" -> plus_z_neutral_goal ) +: Sequent() :+ ( "goal" -> plus_comm_goal ) )
val plus_comm_proof = Lemma( plus_comm ) {
allR; induction( hov"x:Nat" )
//- IB
decompose
rewrite ltr "ap1" in "goal"
rewrite ltr "pzn" in "goal"; refl
//- IS
decompose
rewrite ltr "ap2" in "goal"
rewrite ltr "IHx_0" in "goal"
rewrite ltr "prs" in "goal"; refl
}
val plus_assoc_goal = hof"!x !y !z plus(plus(x,y),z) = plus(x,plus(y,z))"
val plus_assoc = ( plus_axioms ++:
Sequent() :+ ( "goal" -> plus_assoc_goal ) )
val plus_assoc_proof = AnalyticInductionProver.singleInduction( plus_assoc, hov"x:Nat" )
val mult_z_zero_goal = hof"!x mult(x,Z) = Z"
val mult_z_zero = ( plus_axioms ++: mult_axioms ++:
Sequent() :+ ( "goal" -> mult_z_zero_goal ) )
val mult_z_zero_proof = AnalyticInductionProver.singleInduction( mult_z_zero, hov"x:Nat" )
val mult_dist_law_1_goal = hof"!x !y !z mult(x, plus(y,z)) = plus(mult(x,y),mult(x,z))"
val mult_dist_law_1 = ( plus_axioms ++: mult_axioms ++:
( "pcm" -> plus_comm_goal ) +:
( "pas" -> plus_assoc_goal ) +:
Sequent() :+ ( "goal" -> mult_dist_law_1_goal ) )
val mult_dist_law_1_proof = Lemma( mult_dist_law_1 ) {
allR; induction( hov"x:Nat" )
decompose
rewrite.many ltr "am1" in "goal"
rewrite.many ltr "ap1" in "goal"; refl
//- IS
decompose
rewrite.many ltr "am2" in "goal"
escargot
}
val mult_dist_law_2_goal = hof"!x !y !z mult(plus(x,y),z) = plus(mult(x,z),mult(y,z))"
val mult_dist_law_2 = ( plus_axioms ++: mult_axioms ++:
( "plus_assoc" -> plus_assoc_goal ) +:
Sequent() :+ ( "goal" -> mult_dist_law_2_goal ) )
val mult_dist_law_2_proof = Lemma( mult_dist_law_2 ) {
allR; induction( hov"x:Nat" )
//- IB
decompose
rewrite.many ltr "am1" in "goal"
rewrite.many ltr "ap1" in "goal"
refl
//- IS
decompose
rewrite.many ltr "am2" in "goal"
rewrite.many ltr "ap2" in "goal"
rewrite.many ltr "plus_assoc" in "goal"
rewrite.many rtl "IHx_0" in "goal"
rewrite.many ltr "am2" in "goal"
refl
}
val mult_one_right_id_goal = hof"!x mult(x, S(Z)) = x"
val mult_one_right_id = (
plus_axioms ++: mult_axioms ++: Sequent() :+ ( "" -> mult_one_right_id_goal ) )
val mult_one_right_id_proof = AnalyticInductionProver.singleInduction( mult_one_right_id, hov"x:Nat" )
val mult_comm_goal = hof"!x !y mult(x,y) = mult(y,x)"
val mult_comm = ( plus_axioms ++: mult_axioms ++:
( "mzz" -> mult_z_zero_goal ) +:
( "md1" -> mult_dist_law_1_goal ) +:
( "m1i" -> mult_one_right_id_goal ) +:
Sequent() :+ ( "goal" -> mult_comm_goal ) )
val mult_comm_proof = Lemma( mult_comm ) {
allR; induction( hov"x:Nat" )
//- IB
decompose
rewrite ltr "am1" in "goal"
escargot
//- IS
decompose
rewrite ltr "am2" in "goal"
rewrite ltr "IHx_0" in "goal"
allL( "m1i", le"y:Nat" ); rewrite rtl "m1i_0" in "goal"
rewrite rtl "md1" in "goal"
rewrite ltr "ap2" in "goal"
rewrite ltr "ap1" in "goal"; refl
}
val lemma_23_goal = hof"!x !y !z mult(x,mult(y,z)) = mult(mult(x,y),z)"
val lemma_23 = ( plus_axioms ++: mult_axioms ++:
( "dl2" -> mult_dist_law_2_goal ) +:
Sequent() :+ ( "goal" -> lemma_23_goal ) )
val lemma_23_proof = Lemma( lemma_23 ) {
allR; induction( hov"x:Nat" )
escargot
allR; allR
rewrite.many ltr "am2" in "goal"
rewrite ltr "IHx_0" in "goal"
rewrite ltr "dl2" in "goal"
refl
}
val cong_10_goal = hof"!x !y mult(fac(x),y) = qfac(x,y)"
val cong_10 = ( plus_axioms ++: mult_axioms ++: fac_axioms ++: qfac_axioms ++:
( "l23" -> lemma_23_goal ) +:
( "lea" -> plus_z_neutral_goal ) +:
( "lec" -> mult_comm_goal ) +:
( "led" -> mult_dist_law_2_goal ) +:
Sequent() :+ ( "goal" -> cong_10_goal ) )
val cong_10_proof = Lemma( cong_10 ) {
allR; induction( hov"x:Nat" )
//- IB
allR
rewrite ltr "af1" in "goal"
rewrite ltr "aq1" in "goal"
rewrite ltr "am2" in "goal"
rewrite ltr "am1" in "goal"
escargot
//- IS
allR
rewrite ltr "af2" in "goal"
rewrite ltr "aq2" in "goal"
rewrite.many ltr "am2" in "goal"
rewrite rtl "IHx_0" in "goal"
escargot
}
val proof = Lemma( sequent ) {
cut( "", plus_right_s_goal ); insert( plus_right_s_proof )
cut( "", plus_z_neutral_goal ); insert( plus_z_neutral_proof )
cut( "", plus_comm_goal ); insert( plus_comm_proof )
cut( "", plus_assoc_goal ); insert( plus_assoc_proof )
cut( "", mult_z_zero_goal ); insert( mult_z_zero_proof )
cut( "", mult_dist_law_1_goal ); insert( mult_dist_law_1_proof )
cut( "", mult_dist_law_2_goal ); insert( mult_dist_law_2_proof )
cut( "", mult_one_right_id_goal ); insert( mult_one_right_id_proof )
cut( "", mult_comm_goal ); insert( mult_comm_proof )
cut( "", lemma_23_goal ); insert( lemma_23_proof )
cut( "", cong_10_goal ); insert( cong_10_proof )
escargot
}
}
| gapt/gapt | examples/tip/prod/prop_33.scala | Scala | gpl-3.0 | 7,534 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.