code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.archivebag.command
import java.io.File
import java.net.{ MalformedURLException, URI, URL }
import java.nio.file.Paths
import java.util.UUID
import nl.knaw.dans.easy.archivebag.{ BagId, Parameters }
import nl.knaw.dans.lib.logging.DebugEnhancedLogging
import nl.knaw.dans.lib.string._
import org.rogach.scallop.{ ScallopConf, ScallopOption, ValueConverter, singleArgConverter, stringConverter }
object CommandLineOptions extends DebugEnhancedLogging {
def parse(args: Array[String]): Parameters = {
debug("Loading application.properties ...")
val configuration = Configuration(Paths.get(System.getProperty("app.home")))
debug("Parsing command line ...")
val cmd = new ScallopCommandLine(configuration, args)
cmd.verify()
val settings = Parameters(
username = cmd.username(),
password = cmd.password(),
bagDir = cmd.bagDirectory(),
tempDir = new File(configuration.properties.getString("tempdir")),
storageDepositService = cmd.bagStoreUrl(),
bagIndexService = new URI(configuration.properties.getString("bag-index.uri")),
validateDansBagService = new URI(configuration.properties.getString("validate-bag.uri")),
readTimeOut = configuration.properties.getString("validate-bag.read-timeout").toInt,
bagId = cmd.uuid(),
userAgent = s"easy-archive-bag/${ configuration.version }"
)
debug(s"Using the following settings: $settings")
settings
}
}
class ScallopCommandLine(configuration: Configuration, args: Array[String]) extends ScallopConf(args) {
private implicit val uuidParser: ValueConverter[UUID] = stringConverter.flatMap(_.toUUID.fold(e => Left(e.getMessage), uuid => Right(Option(uuid))))
private implicit val urlConverter: ValueConverter[URL] = singleArgConverter(s => new URL(addTrailingSlashIfNeeded(s)), {
case e: MalformedURLException => Left(s"bad URL, ${ e.getMessage }")
})
private def addTrailingSlashIfNeeded(s: String): String = {
if (s endsWith "/") s
else s"$s/"
}
appendDefaultToDescription = true
editBuilder(_.setHelpWidth(110))
printedName = "easy-archive-bag"
version(s"$printedName ${ configuration.version }")
banner(
s"""
|Send a bag to archival storage.
|
|Usage: $printedName <bag-directory> <uuid> <storage-service-url>
|Options:
|""".stripMargin)
val username: ScallopOption[String] = opt[String](
name = "username",
short = 'u',
descr = "Username to use for authentication/authorisation to the storage service",
required = true,
)
val password: ScallopOption[String] = opt[String](
name = "password",
short = 'p',
descr = "Password to use for authentication/authorisation to the storage service",
required = true,
)
val bagDirectory: ScallopOption[File] = trailArg[File](
name = "bag-directory",
descr = "Directory in BagIt format that will be sent to archival storage",
)
val uuid: ScallopOption[BagId] = trailArg[BagId](
name = "uuid",
descr = "Identifier for the bag in archival storage",
)
val bagStoreUrl: ScallopOption[URL] = trailArg[URL](
name = "bag-store-url",
descr = "base url to the store in which the bag needs to be archived",
)
validateFileExists(bagDirectory)
validateOpt(bagDirectory) {
case Some(_) => Right(Unit)
case _ => Left("Could not parse parameter <bag-directory>")
}
}
| DANS-KNAW/easy-archive-bag | command/src/main/scala/nl.knaw.dans.easy.archivebag.command/CommandLineOptions.scala | Scala | apache-2.0 | 4,080 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.util
import java.{util => ju}
import org.junit.Assert._
import org.junit.Assume._
import org.junit.Test
import org.scalajs.testsuite.utils.AssertThrows._
import org.scalajs.testsuite.utils.Platform._
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
trait CollectionsOnCheckedMapTest extends CollectionsOnMapsTest {
def originalFactory: MapFactory
def factory: MapFactory = {
new MapFactory {
override def implementationName: String =
s"checkedMap(${originalFactory.implementationName})"
def empty[K, V](implicit kct: ClassTag[K], vct: ClassTag[V]): ju.Map[K, V] = {
ju.Collections.checkedMap(originalFactory.empty[K, V],
kct.runtimeClass.asInstanceOf[Class[K]],
vct.runtimeClass.asInstanceOf[Class[V]])
}
override def allowsNullKeys: Boolean =
originalFactory.allowsNullKeys
override def allowsNullValues: Boolean =
originalFactory.allowsNullValues
}
}
@Test def testCheckedMap(): Unit = {
assertNull(superMap().put(new C, new C))
}
@Test def testCheckedMapBadInputs(): Unit = {
assumeTrue("Assumed compliant asInstanceOf", hasCompliantAsInstanceOfs)
expectThrows(classOf[ClassCastException], superMap().put(new A, new C))
expectThrows(classOf[ClassCastException], superMap().put(new C, new A))
expectThrows(classOf[ClassCastException], superMap().put(new A, new A))
def singletonMap(): ju.Map[A, A] = {
val m = factory.empty[B, B]
m.put(new C, new C)
m.asInstanceOf[ju.Map[A, A]]
}
expectThrows(classOf[ClassCastException],
singletonMap().entrySet().asScala.head.setValue(new A))
}
private def superMap(): ju.Map[A, A] =
factory.empty[B, B].asInstanceOf[ju.Map[A, A]]
}
trait CollectionsOnCheckedSortedMapTest extends CollectionsOnSortedMapsTest {
def originalFactory: SortedMapFactory
def factory: SortedMapFactory = {
new SortedMapFactory {
override def implementationName: String =
s"checkedSortedMap(${originalFactory.implementationName})"
def empty[K, V](implicit kct: ClassTag[K], vct: ClassTag[V]): ju.SortedMap[K, V] = {
ju.Collections.checkedSortedMap(originalFactory.empty[K, V],
kct.runtimeClass.asInstanceOf[Class[K]],
vct.runtimeClass.asInstanceOf[Class[V]])
}
override def allowsNullKeys: Boolean =
originalFactory.allowsNullKeys
override def allowsNullValues: Boolean =
originalFactory.allowsNullValues
}
}
@Test def testCheckedMap(): Unit = {
assertNull(superMap().put(new C, new C))
}
@Test def testCheckedMapBadInputs(): Unit = {
assumeTrue("Assumed compliant asInstanceOf", hasCompliantAsInstanceOfs)
expectThrows(classOf[ClassCastException], superMap().put(new A, new C))
expectThrows(classOf[ClassCastException], superMap().put(new C, new A))
expectThrows(classOf[ClassCastException], superMap().put(new A, new A))
def singletonMap(): ju.Map[A, A] = {
val m = factory.empty[B, B]
m.put(new C, new C)
m.asInstanceOf[ju.Map[A, A]]
}
expectThrows(classOf[ClassCastException],
singletonMap().entrySet().asScala.head.setValue(new A))
}
private def superMap(): ju.Map[A, A] =
factory.empty[B, B].asInstanceOf[ju.Map[A, A]]
}
class CollectionsOnCheckedMapOnHashMapTest
extends CollectionsOnCheckedMapTest {
def originalFactory: MapFactory = new HashMapFactory
}
class CollectionsOnCheckedMapOnLinkedHashMapInsertionOrderTest
extends CollectionsOnCheckedMapTest {
def originalFactory: MapFactory = new LinkedHashMapFactory(false, None)
}
class CollectionsOnCheckedMapOnLinkedHashMapInsertionOrderWithLimitTest
extends CollectionsOnCheckedMapTest {
def originalFactory: MapFactory = new LinkedHashMapFactory(false, Some(50))
}
class CollectionsOnCheckedMapOnLinkedHashMapAccessOrderTest
extends CollectionsOnCheckedMapTest {
def originalFactory: MapFactory = new LinkedHashMapFactory(true, None)
}
class CollectionsOnOnCheckedMapOnLinkedHashMapAccessOrderWithLimitTest
extends CollectionsOnCheckedMapTest {
def originalFactory: MapFactory = new LinkedHashMapFactory(true, Some(50))
}
| SebsLittleHelpers/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/util/CollectionsOnCheckedMapTest.scala | Scala | apache-2.0 | 4,535 |
package uk.co.pragmasoft.experiments.bigdata.spark
import org.scalatest.{FlatSpec, Matchers}
class CSVOperationsSpec extends FlatSpec with Matchers with SparkTestSupport {
import CSVOperations._
behavior of "CSVReaderOperations"
it should "Convert an RDD of strings into an RDD of Array[String] splitting the values with comma" in withSparkContext { sc =>
val stringRdd = sc.parallelize( List( "1field1,1field2,1field3", "2field1,2field2,2field3" ) )
stringRdd.extractCSV().collect() shouldEqual ( Array( Array("1field1" ,"1field2", "1field3" ), Array("2field1" ,"2field2", "2field3" ) ) )
}
it should "Strip header if asked so" in withSparkContext { sc =>
val stringRdd = sc.parallelize( List( "header1,header2,header3", "1field1,1field2,1field3", "2field1,2field2,2field3" ) )
stringRdd.extractCSV(true).collect() shouldEqual ( Array( Array("1field1" ,"1field2", "1field3" ), Array("2field1" ,"2field2", "2field3" ) ) )
}
behavior of "CSVWriterOperations"
}
| galarragas/spark-experiments | src/test/scala/uk/co/pragmasoft/experiments/bigdata/spark/CSVOperationsSpec.scala | Scala | apache-2.0 | 1,004 |
package mr.merc.map.hex
import mr.merc.map.ShortestGrid
import mr.merc.map.hex.Direction._
import mr.merc.map.pathfind.PathFinder
import scala.collection.concurrent.TrieMap
abstract class AbstractHexField[T <: Hex](init: (Int, Int) => T) {
def isLegalCoords(x: Int, y: Int): Boolean
def hex(x: Int, y: Int): T
def hexOpt(x:Int, y:Int): Option[T] = {
if (isLegalCoords(x, y)) Some(hex(x, y)) else None
}
def neighbours(hex: T): List[T] = neighbours(hex.x, hex.y)
def neighboursSet(hex: T): Set[T] = neighbours(hex).toSet
def neighbours(x: Int, y: Int): List[T] = neighboursList(x, y)
def neighboursSet(x: Int, y: Int): Set[T] = neighboursList(x, y).toSet
private val cachedNeighbours = TrieMap[(Int, Int), List[T]]()
private def neighboursList(x: Int, y: Int): List[T] = {
cachedNeighbours.getOrElseUpdate((x, y), {
val allNeighboursCoords = neighboursListWithInvalid(x, y)
val correctCoords = allNeighboursCoords.filter(h => isLegalCoords(h._1, h._2))
correctCoords.map(h => hex(h._1, h._2))
})
}
private def neighboursListWithInvalid(x: Int, y: Int): List[(Int, Int)] = {
// x % 2 == 1 is even because we start from zero
val corrections = correctionsList(x % 2 != 0)
corrections.map(h => (h._1 + x, h._2 + y))
}
def neighboursWithDirections(hex: T): Map[Direction, T] = neighboursWithDirections(hex.x, hex.y)
def neighboursWithDirections(x: Int, y: Int): Map[Direction, T] = {
val resultList = (directionsList zip neighboursListWithInvalid(x, y)).filter(dh => isLegalCoords(dh._2._1, dh._2._2))
resultList.map(df => (df._1, hex(df._2._1, df._2._2))).toMap
}
def direction(hex: T, neig: T):Direction = neighboursWithDirections(hex).find(_._2 == neig).get._1
private def correctionsList(even: Boolean): List[(Int, Int)] = {
even match {
case true => List((-1, 0), (0, -1), (1, 0), (1, 1), (0, 1), (-1, 1))
case false => List((-1, -1), (0, -1), (1, -1), (1, 0), (0, 1), (-1, 0))
}
}
private val directionsList = List(NW, N, NE, SE, S, SW)
def distance(from: T, to: T):Int = from.distance(to)
def hexRing(hex: T, radius:Int):List[T] = {
if (radius == 0) List(hex)
else {
val cube = hex.toCubeHex.neighbour(SW, radius)
val movements = for {
dir <- List(SE, NE, N, NW, SW, S)
_ <- 0 until radius
} yield {
dir
}
val cubes = movements.scanLeft(cube) {(c, direction) =>
c.neighbour(direction)
}
cubes.tail.flatMap {c =>
val h = c.toHex
this.hexOpt(h.x, h.y)
}
}
}
def line(from: T, to: T): List[T] = {
val n = from.distance(to)
(0 to n).map { i =>
val xx = Math.round(from.x + i * (to.x - from.x).toFloat / n)
val yy = Math.round(from.y + i * (to.y - from.y).toFloat / n)
hex(xx, yy)
} toList
}
def closest(hex:T):LazyList[T] = {
LazyList.from(0).map(radius => hexRing(hex, radius)).takeWhile(_.nonEmpty).flatten
}
def closest(hexes:Set[T]):LazyList[T] = {
case class Step(allPrev: Set[T], current: Set[T], step: Int)
def close:LazyList[Step] = Step(Set(), hexes, 0) #:: close.zip(LazyList.from(1)).map { case (prevStep, radius) =>
val allPrev = prevStep.allPrev ++ prevStep.current
val current = hexes.flatMap(hexRing(_, radius)) -- allPrev
Step(allPrev, current, radius)
}.takeWhile(_.current.nonEmpty)
close.flatMap(_.current)
}
def findClosest(start: T, predicate: T => Boolean):Option[T] = {
closest(start).find(predicate)
}
def findPath(from: T, to: T, blocking:T => Boolean, greedy:Boolean = false):Option[List[T]] = {
val grid = new ShortestGrid[T] {
override def isBlocked(t: T) = blocking(t)
override def price(from: T, to: T): Double = 1
override def neighbours(t: T): List[T] = AbstractHexField.this.neighbours(t)
override def heuristic(from: T, to: T): Double = {
val a = from.toCubeHex
val b = to.toCubeHex
import scala.math._
max(abs(a.x - b.x), max(abs(a.y - b.y), abs(a.z - b.z)))
}
}
PathFinder.findPath(grid, from, to, greedy)
}
def findPathForRiver(from: T, to: T, blocking:T => Boolean):Option[List[T]] = {
val grid = new ShortestGrid[T] {
override def isBlocked(t: T) = blocking(t)
override def price(from: T, to: T): Double = 1
override def neighbours(t: T): List[T] = AbstractHexField.this.neighbours(t)
override def heuristic(from: T, to: T): Double = {
val a = from.toCubeHex
val b = to.toCubeHex
import scala.math._
max(abs(a.x - b.x), max(abs(a.y - b.y), abs(a.z - b.z)))
}
}
PathFinder.findPathForRiver(grid, from, to)
}
} | RenualdMarch/merc | src/main/scala/mr/merc/map/hex/AbstractHexField.scala | Scala | gpl-3.0 | 4,745 |
package com.evojam.mongodb.client.cursor
import scala.concurrent.{ExecutionContext, Future}
import org.bson.codecs.Codec
import rx.lang.scala.Observable
import com.evojam.mongodb.client.codec.Reader
trait Cursor {
def head[T]()(implicit r: Reader[T], exc: ExecutionContext): Future[T] =
rawHead()(r.codec, exc)
.toList
.map(_.head)
.map(r.read)
.toBlocking.toFuture
def headOpt[T]()(implicit r: Reader[T], exc: ExecutionContext): Future[Option[T]] =
rawHead()(r.codec, exc)
.toList
.map(_.headOption)
.map(_.map(r.read))
.toBlocking.toFuture
def foreach[T](f: T => Unit)(implicit r: Reader[T], exc: ExecutionContext): Unit =
rawForeach((r.read _) andThen f)(r.codec, exc)
def observable[T]()(implicit r: Reader[T], exc: ExecutionContext): Observable[T] =
rawObservable()(r.codec, exc)
.map(r.read)
def observable[T](batchSize: Int)(implicit r: Reader[T], exc: ExecutionContext): Observable[List[T]] =
rawObservable(batchSize)(r.codec, exc)
.map(_.map(r.read))
def collect[T]()(implicit r: Reader[T], exc: ExecutionContext): Future[List[T]] =
rawObservable()(r.codec, exc)
.map(r.read)
.toList.toBlocking.toFuture
protected def rawHead[T: Codec]()(implicit exc: ExecutionContext): Observable[T]
protected def rawForeach[T: Codec](f: T => Unit)(implicit exc: ExecutionContext): Unit
protected def rawObservable[T: Codec]()(implicit exc: ExecutionContext): Observable[T]
protected def rawObservable[T: Codec](batchSize: Int)(implicit exc: ExecutionContext): Observable[List[T]]
}
| evojam/mongodb-driver-scala | src/main/scala/com/evojam/mongodb/client/cursor/Cursor.scala | Scala | apache-2.0 | 1,606 |
package tests.rescala.misc
import tests.rescala.testtools.RETests
class ReactiveCreationInTurnsTest extends RETests {
multiEngined { engine =>
import engine._
test("evaluations Of Inner Signals") {
val v1 = Var(5)
val c1 = Var(0)
val v2 = Signal {
val _ = v1.value
var res = 0
c1.map(x => { res += 1; x })
res
}
assert(v2.readValueOnce === 1, "unrelated signal should only be evaluated once on creation")
v1.set(100)
assert(v2.readValueOnce === 1, "unrelated signal should only be evaluated once on change")
}
test("evaluations Of Inner Related Signals") {
val v1 = Var(5)
val v2 = Signal {
val _ = v1.value
var res = 0
v1.map(x => { res += 1; x })
res
}
assert(
v2.readValueOnce === 1,
"related signal is only be evaluated once on creation (this behaviour is actually undefined)"
)
v1.set(100)
assert(
v2.readValueOnce === 1,
"related signal should be evaluated once on change (this behaviour is actually undefined)"
)
}
test("change Of Created Signal") {
engine.transaction() { implicit t =>
val v1 = engine.Var(0)
val v2 = v1.map(_ + 1)
v1.change.observe(v => fail(s"created signals should not change, but change was $v"))
v2.change.observe(v => fail(s"created mapped signals should not change, but change was $v"))
}
// {
// val v1 = Var(0)
// var v2: Signal[Int] = null
// var v1changedFired = false
// implicitEngine.transaction(v1) { implicit t =>
// val c1 = v1.change
// c1.observe(v => v1changedFired = true)
// v2 = v1.map(_ + 1)
// val c2 = v2.change
// c2.observe(v => fail("created mapped signals should not change when admitting in same turn, but change was " + v))
// v1.admit(10)
// }
// assert(v1changedFired, "created change events should fire when admitting in same turn, but did not.")
// assert(v1.now == 10)
// assert(v2.now == 11)
// }
{
val v1 = Var(0)
val v2 = v1.map(_ + 1)
var o1 = false
var o2 = false
v1.change.observe(_ => o1 = true)
v2.change.observe(_ => o2 = true)
assert(!o1, "created signals do not change outside of turn during creation")
assert(!o2, "created mapped signals do not change outside of turn during creation")
v1.set(10)
assert(o1, "created signals do change outside of turn")
assert(o2, "created mapped signals do change outside of turn")
}
}
// test("create changes during reevaluation"){
// val v = Var(1)
// val mapped = v.map(_ + 0)
//
// val sm = Signal { mapped.change.apply() }
// val sd = dynamic() {t => t.depend(mapped.change(CreationTicket.fromTicketDImplicit(t, implicitly))) }
//
//
// //intercept[NoSuchElementException](sm.now)
// //assert(sm.now.isEmpty)
// //assert(sd.now.isEmpty)
//
// v.set(2)
//
// assert(sm.now.get.pair == 1 -> 2)
// assert(sd.now.get.pair == 1 -> 2)
//
// v.set(3)
//
// assert(sm.now.get.pair == 2 -> 3)
// assert(sd.now.get.pair == 2 -> 3)
//
// }
}
}
| guidosalva/REScala | Code/Main/shared/src/test/scala-2/tests/rescala/misc/ReactiveCreationInTurnsTest.scala | Scala | apache-2.0 | 3,262 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.implbase
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
import io.rdbc.sapi.Timeout
class CompatSpec extends RdbcImplbaseSpec {
implicit private val timeout = Timeout.Inf
"FutureCompat.transformWith" should {
"transform a successful future" in {
import Compat._
implicit val ec = ExecutionContext.global
val target = "10"
val future = Future.successful(10)
val transformed = future.transformWith {
case Success(_) => Future.successful(target)
case Failure(ex) => Future.failed(ex)
}
transformed.get shouldBe target
}
"transform a failed future" in {
import Compat._
implicit val ec = ExecutionContext.global
val target = "10"
val future = Future.failed(new RuntimeException)
val transformed = future.transformWith {
case Failure(ex) => Future.successful(target)
case Success(_) => Future.failed(new RuntimeException)
}
transformed.get shouldBe target
}
}
"FutureObjectCompat" should {
"provide Future.unit" in {
import Compat._
implicit val ec = ExecutionContext.global
val unit = Future.unit
unit shouldBe 'completed
unit.foreach { unitVal =>
unitVal shouldBe (())
}
}
}
}
| rdbc-io/rdbc | rdbc-implbase/src/test/scala-2.11/io/rdbc/implbase/CompatSpec.scala | Scala | apache-2.0 | 1,945 |
package com.github.mdr.mash.utils
import com.github.mdr.mash.utils.StringUtils.ellipsisise
import org.scalatest.{ FlatSpec, Matchers }
class StringUtilsTest extends FlatSpec with Matchers {
"Ellipsisise" should "work" in {
ellipsisise("123456", 3) should equal("12…")
ellipsisise("123", 3) should equal("123")
ellipsisise("12", 3) should equal("12")
ellipsisise("1", 1) should equal("1")
ellipsisise("12", 1) should equal("…")
ellipsisise("1", 0) should equal("")
ellipsisise("", 0) should equal("")
}
}
| mdr/mash | src/test/scala/com/github/mdr/mash/utils/StringUtilsTest.scala | Scala | mit | 547 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.flowmanagement.flowrouters.businessmatching
import javax.inject.{Inject, Singleton}
import models.flowmanagement._
import play.api.mvc.Result
import services.businessmatching.BusinessMatchingService
import services.flowmanagement.Router
import services.flowmanagement.pagerouters.removeflow._
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class RemoveBusinessTypeRouter @Inject()(val businessMatchingService: BusinessMatchingService,
val whatServicesToRemovePageRouter: WhatBusinessTypesToRemovePageRouter,
val needToUpdatePageRouter: NeedToUpdatePageRouter,
val removeServicesSummaryPageRouter: RemoveBusinessTypesSummaryPageRouter,
val unableToRemovePageRouter: UnableToRemovePageRouter,
val whatDateToRemovePageRouter: WhatDateToRemovePageRouter
) extends Router[RemoveBusinessTypeFlowModel] {
override def getRoute(credId: String, pageId: PageId, model: RemoveBusinessTypeFlowModel, edit: Boolean = false)
(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[Result] = {
pageId match {
case WhatBusinessTypesToRemovePageId => whatServicesToRemovePageRouter.getRoute(credId, model, edit)
case NeedToUpdatePageId => needToUpdatePageRouter.getRoute(credId, model, edit)
case RemoveBusinessTypesSummaryPageId => removeServicesSummaryPageRouter.getRoute(credId, model, edit)
case UnableToRemovePageId => unableToRemovePageRouter.getRoute(credId, model, edit)
case WhatDateRemovedPageId => whatDateToRemovePageRouter.getRoute(credId, model, edit)
case _ => throw new Exception("PagId not in remove flow")
}
}
}
| hmrc/amls-frontend | app/services/flowmanagement/flowrouters/businessmatching/RemoveBusinessTypeRouter.scala | Scala | apache-2.0 | 2,510 |
package com.monovore.decline.effect
import cats.effect.ExitCode
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import cats.effect.unsafe.IORuntime
class CommandIOAppSpec extends AnyFlatSpec with Matchers {
"CommandIOApp" should "return a success exit code when passing an argument" in {
runApp("me") shouldBe ExitCode.Success
}
it should "return a success exit code when passing a version option" in {
runApp("--version") shouldBe ExitCode.Success
}
it should "return a success exit code when passing a help option" in {
runApp("--help") shouldBe ExitCode.Success
}
it should "return an error exit code when passing no arguments" in {
runApp() shouldBe ExitCode.Error
}
private[this] def runApp(args: String*): ExitCode =
PureHelloWorld.run(args.toList).unsafeRunSync()(IORuntime.global)
}
| bkirwi/decline | effect/jvm/src/test/scala/com/monovore/decline/effect/CommandIOAppSpec.scala | Scala | apache-2.0 | 878 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.cache
import org.apache.ignite.cluster.{ClusterGroupEmptyException, ClusterNode}
import org.apache.ignite.internal.visor.cache.VisorCacheResetMetricsTask
import org.apache.ignite.internal.visor.util.VisorTaskUtils._
import org.apache.ignite.visor.visor._
import scala.language.reflectiveCalls
/**
* ==Overview==
* Visor 'reset' command implementation.
*
* ====Specification====
* {{{
* cache -reset -c=<cache name>
* }}}
*
* ====Arguments====
* {{{
* <cache-name>
* Name of the cache.
* }}}
*
* ====Examples====
* {{{
* cache -reset -c=@c0
* Reset metrics for cache with name taken from 'c0' memory variable.
* }}}
*/
class VisorCacheResetCommand {
/**
* Prints error message and advise.
*
* @param errMsgs Error messages.
*/
private def scold(errMsgs: Any*) {
assert(errMsgs != null)
warn(errMsgs: _*)
warn("Type 'help cache' to see how to use this command.")
}
private def error(e: Exception) {
var cause: Throwable = e
while (cause.getCause != null)
cause = cause.getCause
scold(cause.getMessage)
}
/**
* ===Command===
* Reset metrics for cache with specified name.
*
* ===Examples===
* <ex>cache -c=cache -reset</ex>
* Reset metrics for cache with name 'cache'.
*
* @param argLst Command arguments.
*/
def reset(argLst: ArgList, node: Option[ClusterNode]) {
val cacheArg = argValue("c", argLst)
val cacheName = cacheArg match {
case None => null // default cache.
case Some(s) if s.startsWith("@") =>
warn("Can't find cache variable with specified name: " + s,
"Type 'cache' to see available cache variables."
)
return
case Some(name) => name
}
val grp = try {
groupForDataNode(node, cacheName)
}
catch {
case _: ClusterGroupEmptyException =>
scold(messageNodeNotFound(node, cacheName))
return
}
try {
executeRandom(grp, classOf[VisorCacheResetMetricsTask], cacheName)
println("Visor successfully reset metrics for cache: " + escapeName(cacheName))
}
catch {
case _: ClusterGroupEmptyException => scold(messageNodeNotFound(node, cacheName))
case e: Exception => error(e)
}
}
}
/**
* Companion object that does initialization of the command.
*/
object VisorCacheResetCommand {
/** Singleton command. */
private val cmd = new VisorCacheResetCommand
/**
* Singleton.
*/
def apply() = cmd
}
| pperalta/ignite | modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheResetCommand.scala | Scala | apache-2.0 | 3,583 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.util
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.util.collection.OpenHashMap
import scala.collection.JavaConversions.mapAsScalaMap
private[streaming]
object RawTextHelper {
/**
* Splits lines and counts the words.
*/
def splitAndCountPartitions(iter: Iterator[String]): Iterator[(String, Long)] = {
val map = new OpenHashMap[String,Long]
var i = 0
var j = 0
while (iter.hasNext) {
val s = iter.next()
i = 0
while (i < s.length) {
j = i
while (j < s.length && s.charAt(j) != ' ') {
j += 1
}
if (j > i) {
val w = s.substring(i, j)
map.changeValue(w, 1L, _ + 1L)
}
i = j
while (i < s.length && s.charAt(i) == ' ') {
i += 1
}
}
map.toIterator.map {
case (k, v) => (k, v)
}
}
map.toIterator.map{case (k, v) => (k, v)}
}
/**
* Gets the top k words in terms of word counts. Assumes that each word exists only once
* in the `data` iterator (that is, the counts have been reduced).
*/
def topK(data: Iterator[(String, Long)], k: Int): Iterator[(String, Long)] = {
val taken = new Array[(String, Long)](k)
var i = 0
var len = 0
var done = false
var value: (String, Long) = null
var swap: (String, Long) = null
var count = 0
while(data.hasNext) {
value = data.next
if (value != null) {
count += 1
if (len == 0) {
taken(0) = value
len = 1
} else if (len < k || value._2 > taken(len - 1)._2) {
if (len < k) {
len += 1
}
taken(len - 1) = value
i = len - 1
while(i > 0 && taken(i - 1)._2 < taken(i)._2) {
swap = taken(i)
taken(i) = taken(i-1)
taken(i - 1) = swap
i -= 1
}
}
}
}
taken.toIterator
}
/**
* Warms up the SparkContext in master and slave by running tasks to force JIT kick in
* before real workload starts.
*/
def warmUp(sc: SparkContext) {
for(i <- 0 to 1) {
sc.parallelize(1 to 200000, 1000)
.map(_ % 1331).map(_.toString)
.mapPartitions(splitAndCountPartitions).reduceByKey(_ + _, 10)
.count()
}
}
def add(v1: Long, v2: Long) = (v1 + v2)
def subtract(v1: Long, v2: Long) = (v1 - v2)
def max(v1: Long, v2: Long) = math.max(v1, v2)
}
| trueyao/spark-lever | streaming/src/main/scala/org/apache/spark/streaming/util/RawTextHelper.scala | Scala | apache-2.0 | 3,322 |
case object Direction {
private val dict = Vector(Pos(1, 0), Pos(1, 1), Pos(0, 1), Pos(-1, 1), Pos(-1, 0), Pos(-1, -1), Pos(0, -1), Pos(1, -1))
private def angle(p: Pos) = (((math.atan2(p.y, p.x) * 180 / math.Pi + 360) % 360) / 45).toInt
def apply(pos: Pos): Direction = if(pos != Pos.Mid)
Direction.apply(angle(pos) % 8)
else {
Direction.apply((math.random * 8).toInt)
}
val East = new Direction(0)
val SouthEast = new Direction(1)
val South = new Direction(2)
val SouthWest = new Direction(3)
val West = new Direction(4)
val NorthWest = new Direction(5)
val North = new Direction(6)
val NorthEast = new Direction(7)
val All = Seq(East, SouthEast, South, SouthWest, West, NorthWest, North, NorthEast)
}
case class Direction(dirIdx: Int) {
val offset = Direction.dict(dirIdx)
override def toString = offset.x + ":" + offset.y
def rotateLittleBitLeft = Direction((dirIdx + 7) % 8)
def rotateLittleBitRight = Direction((dirIdx + 1) % 8)
def turnBack = Direction((dirIdx + 4) % 8)
} | nurkiewicz/scalatron-bot | src/main/scala/Direction.scala | Scala | apache-2.0 | 1,007 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LeafNode
import org.apache.spark.sql.connector.catalog.{CatalogPlugin, Identifier, Table, TableCatalog}
/**
* Holds the name of a namespace that has yet to be looked up in a catalog. It will be resolved to
* [[ResolvedNamespace]] during analysis.
*/
case class UnresolvedNamespace(multipartIdentifier: Seq[String]) extends LeafNode {
override lazy val resolved: Boolean = false
override def output: Seq[Attribute] = Nil
}
/**
* Holds the name of a table that has yet to be looked up in a catalog. It will be resolved to
* [[ResolvedTable]] during analysis.
*/
case class UnresolvedTable(multipartIdentifier: Seq[String]) extends LeafNode {
override lazy val resolved: Boolean = false
override def output: Seq[Attribute] = Nil
}
/**
* Holds the name of a table or view that has yet to be looked up in a catalog. It will
* be resolved to [[ResolvedTable]] or [[ResolvedView]] during analysis.
*/
case class UnresolvedTableOrView(
multipartIdentifier: Seq[String],
allowTempView: Boolean = true) extends LeafNode {
override lazy val resolved: Boolean = false
override def output: Seq[Attribute] = Nil
}
sealed trait PartitionSpec
case class UnresolvedPartitionSpec(
spec: TablePartitionSpec,
location: Option[String] = None) extends PartitionSpec
/**
* Holds the name of a function that has yet to be looked up in a catalog. It will be resolved to
* [[ResolvedFunc]] during analysis.
*/
case class UnresolvedFunc(multipartIdentifier: Seq[String]) extends LeafNode {
override lazy val resolved: Boolean = false
override def output: Seq[Attribute] = Nil
}
/**
* A plan containing resolved namespace.
*/
case class ResolvedNamespace(catalog: CatalogPlugin, namespace: Seq[String])
extends LeafNode {
override def output: Seq[Attribute] = Nil
}
/**
* A plan containing resolved table.
*/
case class ResolvedTable(catalog: TableCatalog, identifier: Identifier, table: Table)
extends LeafNode {
override def output: Seq[Attribute] = Nil
}
case class ResolvedPartitionSpec(
spec: InternalRow,
location: Option[String] = None) extends PartitionSpec
/**
* A plan containing resolved (temp) views.
*/
// TODO: create a generic representation for temp view, v1 view and v2 view, after we add view
// support to v2 catalog. For now we only need the identifier to fallback to v1 command.
case class ResolvedView(identifier: Identifier, isTemp: Boolean) extends LeafNode {
override def output: Seq[Attribute] = Nil
}
/**
* A plan containing resolved function.
*/
// TODO: create a generic representation for v1, v2 function, after we add function
// support to v2 catalog. For now we only need the identifier to fallback to v1 command.
case class ResolvedFunc(identifier: Identifier)
extends LeafNode {
override def output: Seq[Attribute] = Nil
}
| shuangshuangwang/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/v2ResolutionPlans.scala | Scala | apache-2.0 | 3,915 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.io.{InputStream, NotSerializableException}
import java.util.Properties
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.collection.Map
import scala.collection.mutable.Queue
import scala.reflect.ClassTag
import scala.util.control.NonFatal
import org.apache.commons.lang3.SerializationUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{BytesWritable, LongWritable, Text}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.spark._
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.input.FixedLengthBinaryInputFormat
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.{RDD, RDDOperationScope}
import org.apache.spark.scheduler.LiveListenerBus
import org.apache.spark.serializer.SerializationDebugger
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContextState._
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.streaming.scheduler.
{ExecutorAllocationManager, JobScheduler, StreamingListener, StreamingListenerStreamingStarted}
import org.apache.spark.streaming.ui.{StreamingJobProgressListener, StreamingTab}
import org.apache.spark.util.{CallSite, ShutdownHookManager, ThreadUtils, Utils}
/**
* Main entry point for Spark Streaming functionality. It provides methods used to create
* [[org.apache.spark.streaming.dstream.DStream]]s from various input sources. It can be either
* created by providing a Spark master URL and an appName, or from a org.apache.spark.SparkConf
* configuration (see core Spark documentation), or from an existing org.apache.spark.SparkContext.
* The associated SparkContext can be accessed using `context.sparkContext`. After
* creating and transforming DStreams, the streaming computation can be started and stopped
* using `context.start()` and `context.stop()`, respectively.
* `context.awaitTermination()` allows the current thread to wait for the termination
* of the context by `stop()` or by an exception.
*/
class StreamingContext private[streaming] (
_sc: SparkContext,
_cp: Checkpoint,
_batchDur: Duration
) extends Logging {
/**
* Create a StreamingContext using an existing SparkContext.
* @param sparkContext existing SparkContext
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(sparkContext: SparkContext, batchDuration: Duration) = {
this(sparkContext, null, batchDuration)
}
/**
* Create a StreamingContext by providing the configuration necessary for a new SparkContext.
* @param conf a org.apache.spark.SparkConf object specifying Spark parameters
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(conf: SparkConf, batchDuration: Duration) = {
this(StreamingContext.createNewSparkContext(conf), null, batchDuration)
}
/**
* Create a StreamingContext by providing the details necessary for creating a new SparkContext.
* @param master cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName a name for your job, to display on the cluster web UI
* @param batchDuration the time interval at which streaming data will be divided into batches
*/
def this(
master: String,
appName: String,
batchDuration: Duration,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(StreamingContext.createNewSparkContext(master, appName, sparkHome, jars, environment),
null, batchDuration)
}
/**
* Recreate a StreamingContext from a checkpoint file.
* @param path Path to the directory that was specified as the checkpoint directory
* @param hadoopConf Optional, configuration object if necessary for reading from
* HDFS compatible filesystems
*/
def this(path: String, hadoopConf: Configuration) =
this(null, CheckpointReader.read(path, new SparkConf(), hadoopConf).orNull, null)
/**
* Recreate a StreamingContext from a checkpoint file.
* @param path Path to the directory that was specified as the checkpoint directory
*/
def this(path: String) = this(path, SparkHadoopUtil.get.conf)
/**
* Recreate a StreamingContext from a checkpoint file using an existing SparkContext.
* @param path Path to the directory that was specified as the checkpoint directory
* @param sparkContext Existing SparkContext
*/
def this(path: String, sparkContext: SparkContext) = {
this(
sparkContext,
CheckpointReader.read(path, sparkContext.conf, sparkContext.hadoopConfiguration).orNull,
null)
}
require(_sc != null || _cp != null,
"Spark Streaming cannot be initialized with both SparkContext and checkpoint as null")
private[streaming] val isCheckpointPresent: Boolean = _cp != null
private[streaming] val sc: SparkContext = {
if (_sc != null) {
_sc
} else if (isCheckpointPresent) {
SparkContext.getOrCreate(_cp.createSparkConf())
} else {
throw new SparkException("Cannot create StreamingContext without a SparkContext")
}
}
if (sc.conf.get("spark.master") == "local" || sc.conf.get("spark.master") == "local[1]") {
logWarning("spark.master should be set as local[n], n > 1 in local mode if you have receivers" +
" to get data, otherwise Spark jobs will not get resources to process the received data.")
}
private[streaming] val conf = sc.conf
private[streaming] val env = sc.env
private[streaming] val graph: DStreamGraph = {
if (isCheckpointPresent) {
_cp.graph.setContext(this)
_cp.graph.restoreCheckpointData()
_cp.graph
} else {
require(_batchDur != null, "Batch duration for StreamingContext cannot be null")
val newGraph = new DStreamGraph()
newGraph.setBatchDuration(_batchDur)
newGraph
}
}
private val nextInputStreamId = new AtomicInteger(0)
private[streaming] var checkpointDir: String = {
if (isCheckpointPresent) {
sc.setCheckpointDir(_cp.checkpointDir)
_cp.checkpointDir
} else {
null
}
}
private[streaming] val checkpointDuration: Duration = {
if (isCheckpointPresent) _cp.checkpointDuration else graph.batchDuration
}
private[streaming] val scheduler = new JobScheduler(this)
private[streaming] val waiter = new ContextWaiter
private[streaming] val progressListener = new StreamingJobProgressListener(this)
private[streaming] val uiTab: Option[StreamingTab] =
if (conf.getBoolean("spark.ui.enabled", true)) {
Some(new StreamingTab(this))
} else {
None
}
/* Initializing a streamingSource to register metrics */
private val streamingSource = new StreamingSource(this)
private var state: StreamingContextState = INITIALIZED
private val startSite = new AtomicReference[CallSite](null)
// Copy of thread-local properties from SparkContext. These properties will be set in all tasks
// submitted by this StreamingContext after start.
private[streaming] val savedProperties = new AtomicReference[Properties](new Properties)
private[streaming] def getStartSite(): CallSite = startSite.get()
private var shutdownHookRef: AnyRef = _
conf.getOption("spark.streaming.checkpoint.directory").foreach(checkpoint)
/**
* Return the associated Spark context
*/
def sparkContext: SparkContext = sc
/**
* Set each DStream in this context to remember RDDs it generated in the last given duration.
* DStreams remember RDDs only for a limited duration of time and release them for garbage
* collection. This method allows the developer to specify how long to remember the RDDs (
* if the developer wishes to query old data outside the DStream computation).
* @param duration Minimum duration that each DStream should remember its RDDs
*/
def remember(duration: Duration) {
graph.remember(duration)
}
/**
* Set the context to periodically checkpoint the DStream operations for driver
* fault-tolerance.
* @param directory HDFS-compatible directory where the checkpoint data will be reliably stored.
* Note that this must be a fault-tolerant file system like HDFS.
*/
def checkpoint(directory: String) {
if (directory != null) {
val path = new Path(directory)
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
fs.mkdirs(path)
val fullPath = fs.getFileStatus(path).getPath().toString
sc.setCheckpointDir(fullPath)
checkpointDir = fullPath
} else {
checkpointDir = null
}
}
private[streaming] def isCheckpointingEnabled: Boolean = {
checkpointDir != null
}
private[streaming] def initialCheckpoint: Checkpoint = {
if (isCheckpointPresent) _cp else null
}
private[streaming] def getNewInputStreamId() = nextInputStreamId.getAndIncrement()
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withScope[U](body: => U): U = sparkContext.withScope(body)
/**
* Execute a block of code in a scope such that all new DStreams created in this body will
* be part of the same scope. For more detail, see the comments in `doCompute`.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[streaming] def withNamedScope[U](name: String)(body: => U): U = {
RDDOperationScope.withScope(sc, name, allowNesting = false, ignoreParent = false)(body)
}
/**
* Create an input stream with any arbitrary user implemented receiver.
* Find more details at http://spark.apache.org/docs/latest/streaming-custom-receivers.html
* @param receiver Custom implementation of Receiver
*/
def receiverStream[T: ClassTag](receiver: Receiver[T]): ReceiverInputDStream[T] = {
withNamedScope("receiver stream") {
new PluggableInputDStream[T](this, receiver)
}
}
/**
* Creates an input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes is interpreted as UTF8 encoded `\\n` delimited
* lines.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @see [[socketStream]]
*/
def socketTextStream(
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[String] = withNamedScope("socket text stream") {
socketStream[String](hostname, port, SocketReceiver.bytesToLines, storageLevel)
}
/**
* Creates an input stream from TCP source hostname:port. Data is received using
* a TCP socket and the receive bytes it interpreted as object using the given
* converter.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param converter Function to convert the byte stream to objects
* @param storageLevel Storage level to use for storing the received objects
* @tparam T Type of the objects received (after converting bytes to objects)
*/
def socketStream[T: ClassTag](
hostname: String,
port: Int,
converter: (InputStream) => Iterator[T],
storageLevel: StorageLevel
): ReceiverInputDStream[T] = {
new SocketInputDStream[T](this, hostname, port, converter, storageLevel)
}
/**
* Create an input stream from network source hostname:port, where data is received
* as serialized blocks (serialized using the Spark's serializer) that can be directly
* pushed into the block manager without deserializing them. This is the most efficient
* way to receive data.
* @param hostname Hostname to connect to for receiving data
* @param port Port to connect to for receiving data
* @param storageLevel Storage level to use for storing the received objects
* (default: StorageLevel.MEMORY_AND_DISK_SER_2)
* @tparam T Type of the objects in the received blocks
*/
def rawSocketStream[T: ClassTag](
hostname: String,
port: Int,
storageLevel: StorageLevel = StorageLevel.MEMORY_AND_DISK_SER_2
): ReceiverInputDStream[T] = withNamedScope("raw socket stream") {
new RawInputDStream[T](this, hostname, port, storageLevel)
}
/**
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory)
}
/**
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String, filter: Path => Boolean, newFilesOnly: Boolean): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly)
}
/**
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them using the given key-value types and input format.
* Files must be written to the monitored directory by "moving" them from another
* location within the same file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
* @param filter Function to filter paths to process
* @param newFilesOnly Should process only new files and ignore existing files in the directory
* @param conf Hadoop configuration
* @tparam K Key type for reading HDFS file
* @tparam V Value type for reading HDFS file
* @tparam F Input format for reading HDFS file
*/
def fileStream[
K: ClassTag,
V: ClassTag,
F <: NewInputFormat[K, V]: ClassTag
] (directory: String,
filter: Path => Boolean,
newFilesOnly: Boolean,
conf: Configuration): InputDStream[(K, V)] = {
new FileInputDStream[K, V, F](this, directory, filter, newFilesOnly, Option(conf))
}
/**
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as text files (using key as LongWritable, value
* as Text and input format as TextInputFormat). Files must be written to the
* monitored directory by "moving" them from another location within the same
* file system. File names starting with . are ignored.
* @param directory HDFS directory to monitor for new file
*/
def textFileStream(directory: String): DStream[String] = withNamedScope("text file stream") {
fileStream[LongWritable, Text, TextInputFormat](directory).map(_._2.toString)
}
/**
* Create an input stream that monitors a Hadoop-compatible filesystem
* for new files and reads them as flat binary files, assuming a fixed length per record,
* generating one byte array per record. Files must be written to the monitored directory
* by "moving" them from another location within the same file system. File names
* starting with . are ignored.
*
* @param directory HDFS directory to monitor for new file
* @param recordLength length of each record in bytes
*
* @note We ensure that the byte array for each record in the
* resulting RDDs of the DStream has the provided record length.
*/
def binaryRecordsStream(
directory: String,
recordLength: Int): DStream[Array[Byte]] = withNamedScope("binary records stream") {
val conf = _sc.hadoopConfiguration
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = fileStream[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](
directory, FileInputDStream.defaultFilter: Path => Boolean, newFilesOnly = true, conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
require(bytes.length == recordLength, "Byte array does not have correct length. " +
s"${bytes.length} did not equal recordLength: $recordLength")
bytes
}
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
*
* @param queue Queue of RDDs. Modifications to this data structure must be synchronized.
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @tparam T Type of objects in the RDD
*
* @note Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean = true
): InputDStream[T] = {
queueStream(queue, oneAtATime, sc.makeRDD(Seq.empty[T], 1))
}
/**
* Create an input stream from a queue of RDDs. In each batch,
* it will process either one or all of the RDDs returned by the queue.
*
* @param queue Queue of RDDs. Modifications to this data structure must be synchronized.
* @param oneAtATime Whether only one RDD should be consumed from the queue in every interval
* @param defaultRDD Default RDD is returned by the DStream when the queue is empty.
* Set as null if no RDD should be returned when empty
* @tparam T Type of objects in the RDD
*
* @note Arbitrary RDDs can be added to `queueStream`, there is no way to recover data of
* those RDDs, so `queueStream` doesn't support checkpointing.
*/
def queueStream[T: ClassTag](
queue: Queue[RDD[T]],
oneAtATime: Boolean,
defaultRDD: RDD[T]
): InputDStream[T] = {
new QueueInputDStream(this, queue, oneAtATime, defaultRDD)
}
/**
* Create a unified DStream from multiple DStreams of the same type and same slide duration.
*/
def union[T: ClassTag](streams: Seq[DStream[T]]): DStream[T] = withScope {
new UnionDStream[T](streams.toArray)
}
/**
* Create a new DStream in which each RDD is generated by applying a function on RDDs of
* the DStreams.
*/
def transform[T: ClassTag](
dstreams: Seq[DStream[_]],
transformFunc: (Seq[RDD[_]], Time) => RDD[T]
): DStream[T] = withScope {
new TransformedDStream[T](dstreams, sparkContext.clean(transformFunc))
}
/**
* Add a [[org.apache.spark.streaming.scheduler.StreamingListener]] object for
* receiving system events related to streaming.
*/
def addStreamingListener(streamingListener: StreamingListener) {
scheduler.listenerBus.addListener(streamingListener)
}
private def validate() {
assert(graph != null, "Graph is null")
graph.validate()
require(
!isCheckpointingEnabled || checkpointDuration != null,
"Checkpoint directory has been set, but the graph checkpointing interval has " +
"not been set. Please use StreamingContext.checkpoint() to set the interval."
)
// Verify whether the DStream checkpoint is serializable
if (isCheckpointingEnabled) {
val checkpoint = new Checkpoint(this, Time(0))
try {
Checkpoint.serialize(checkpoint, conf)
} catch {
case e: NotSerializableException =>
throw new NotSerializableException(
"DStream checkpointing has been enabled but the DStreams with their functions " +
"are not serializable\\n" +
SerializationDebugger.improveException(checkpoint, e).getMessage()
)
}
}
if (Utils.isDynamicAllocationEnabled(sc.conf) ||
ExecutorAllocationManager.isDynamicAllocationEnabled(conf)) {
logWarning("Dynamic Allocation is enabled for this application. " +
"Enabling Dynamic allocation for Spark Streaming applications can cause data loss if " +
"Write Ahead Log is not enabled for non-replayable sources like Flume. " +
"See the programming guide for details on how to enable the Write Ahead Log.")
}
}
/**
* :: DeveloperApi ::
*
* Return the current state of the context. The context can be in three possible states -
*
* - StreamingContextState.INITIALIZED - The context has been created, but not started yet.
* Input DStreams, transformations and output operations can be created on the context.
* - StreamingContextState.ACTIVE - The context has been started, and not stopped.
* Input DStreams, transformations and output operations cannot be created on the context.
* - StreamingContextState.STOPPED - The context has been stopped and cannot be used any more.
*/
@DeveloperApi
def getState(): StreamingContextState = synchronized {
state
}
/**
* Start the execution of the streams.
*
* @throws IllegalStateException if the StreamingContext is already stopped.
*/
def start(): Unit = synchronized {
state match {
case INITIALIZED =>
startSite.set(DStream.getCreationSite())
StreamingContext.ACTIVATION_LOCK.synchronized {
StreamingContext.assertNoOtherContextIsActive()
try {
validate()
// Start the streaming scheduler in a new thread, so that thread local properties
// like call sites and job groups can be reset without affecting those of the
// current thread.
ThreadUtils.runInNewThread("streaming-start") {
sparkContext.setCallSite(startSite.get)
sparkContext.clearJobGroup()
sparkContext.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false")
savedProperties.set(SerializationUtils.clone(sparkContext.localProperties.get()))
scheduler.start()
}
state = StreamingContextState.ACTIVE
scheduler.listenerBus.post(
StreamingListenerStreamingStarted(System.currentTimeMillis()))
} catch {
case NonFatal(e) =>
logError("Error starting the context, marking it as stopped", e)
scheduler.stop(false)
state = StreamingContextState.STOPPED
throw e
}
StreamingContext.setActiveContext(this)
}
logDebug("Adding shutdown hook") // force eager creation of logger
shutdownHookRef = ShutdownHookManager.addShutdownHook(
StreamingContext.SHUTDOWN_HOOK_PRIORITY)(() => stopOnShutdown())
// Registering Streaming Metrics at the start of the StreamingContext
assert(env.metricsSystem != null)
env.metricsSystem.registerSource(streamingSource)
uiTab.foreach(_.attach())
logInfo("StreamingContext started")
case ACTIVE =>
logWarning("StreamingContext has already been started")
case STOPPED =>
throw new IllegalStateException("StreamingContext has already been stopped")
}
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
*/
def awaitTermination() {
waiter.waitForStopOrError()
}
/**
* Wait for the execution to stop. Any exceptions that occurs during the execution
* will be thrown in this thread.
*
* @param timeout time to wait in milliseconds
* @return `true` if it's stopped; or throw the reported error during the execution; or `false`
* if the waiting time elapsed before returning from the method.
*/
def awaitTerminationOrTimeout(timeout: Long): Boolean = {
waiter.waitForStopOrError(timeout)
}
/**
* Stop the execution of the streams immediately (does not wait for all received data
* to be processed). By default, if `stopSparkContext` is not specified, the underlying
* SparkContext will also be stopped. This implicit behavior can be configured using the
* SparkConf configuration spark.streaming.stopSparkContextByDefault.
*
* @param stopSparkContext If true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
*/
def stop(
stopSparkContext: Boolean = conf.getBoolean("spark.streaming.stopSparkContextByDefault", true)
): Unit = synchronized {
stop(stopSparkContext, false)
}
/**
* Stop the execution of the streams, with option of ensuring all received data
* has been processed.
*
* @param stopSparkContext if true, stops the associated SparkContext. The underlying SparkContext
* will be stopped regardless of whether this StreamingContext has been
* started.
* @param stopGracefully if true, stops gracefully by waiting for the processing of all
* received data to be completed
*/
def stop(stopSparkContext: Boolean, stopGracefully: Boolean): Unit = {
var shutdownHookRefToRemove: AnyRef = null
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(s"Cannot stop StreamingContext within listener bus thread.")
}
synchronized {
// The state should always be Stopped after calling `stop()`, even if we haven't started yet
state match {
case INITIALIZED =>
logWarning("StreamingContext has not been started yet")
state = STOPPED
case STOPPED =>
logWarning("StreamingContext has already been stopped")
state = STOPPED
case ACTIVE =>
// It's important that we don't set state = STOPPED until the very end of this case,
// since we need to ensure that we're still able to call `stop()` to recover from
// a partially-stopped StreamingContext which resulted from this `stop()` call being
// interrupted. See SPARK-12001 for more details. Because the body of this case can be
// executed twice in the case of a partial stop, all methods called here need to be
// idempotent.
Utils.tryLogNonFatalError {
scheduler.stop(stopGracefully)
}
// Removing the streamingSource to de-register the metrics on stop()
Utils.tryLogNonFatalError {
env.metricsSystem.removeSource(streamingSource)
}
Utils.tryLogNonFatalError {
uiTab.foreach(_.detach())
}
StreamingContext.setActiveContext(null)
Utils.tryLogNonFatalError {
waiter.notifyStop()
}
if (shutdownHookRef != null) {
shutdownHookRefToRemove = shutdownHookRef
shutdownHookRef = null
}
logInfo("StreamingContext stopped successfully")
state = STOPPED
}
}
if (shutdownHookRefToRemove != null) {
ShutdownHookManager.removeShutdownHook(shutdownHookRefToRemove)
}
// Even if we have already stopped, we still need to attempt to stop the SparkContext because
// a user might stop(stopSparkContext = false) and then call stop(stopSparkContext = true).
if (stopSparkContext) sc.stop()
}
private def stopOnShutdown(): Unit = {
val stopGracefully = conf.getBoolean("spark.streaming.stopGracefullyOnShutdown", false)
logInfo(s"Invoking stop(stopGracefully=$stopGracefully) from shutdown hook")
// Do not stop SparkContext, let its own shutdown hook stop it
stop(stopSparkContext = false, stopGracefully = stopGracefully)
}
}
/**
* StreamingContext object contains a number of utility functions related to the
* StreamingContext class.
*/
object StreamingContext extends Logging {
/**
* Lock that guards activation of a StreamingContext as well as access to the singleton active
* StreamingContext in getActiveOrCreate().
*/
private val ACTIVATION_LOCK = new Object()
private val SHUTDOWN_HOOK_PRIORITY = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY + 1
private val activeContext = new AtomicReference[StreamingContext](null)
private def assertNoOtherContextIsActive(): Unit = {
ACTIVATION_LOCK.synchronized {
if (activeContext.get() != null) {
throw new IllegalStateException(
"Only one StreamingContext may be started in this JVM. " +
"Currently running StreamingContext was started at" +
activeContext.get.getStartSite().longForm)
}
}
}
private def setActiveContext(ssc: StreamingContext): Unit = {
ACTIVATION_LOCK.synchronized {
activeContext.set(ssc)
}
}
/**
* :: Experimental ::
*
* Get the currently active context, if there is one. Active means started but not stopped.
*/
@Experimental
def getActive(): Option[StreamingContext] = {
ACTIVATION_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* :: Experimental ::
*
* Either return the "active" StreamingContext (that is, started but not stopped), or create a
* new StreamingContext that is
* @param creatingFunc Function to create a new StreamingContext
*/
@Experimental
def getActiveOrCreate(creatingFunc: () => StreamingContext): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { creatingFunc() }
}
}
/**
* :: Experimental ::
*
* Either get the currently active StreamingContext (that is, started but not stopped),
* OR recreate a StreamingContext from checkpoint data in the given path. If checkpoint data
* does not exist in the provided, then create a new StreamingContext by calling the provided
* `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
@Experimental
def getActiveOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
ACTIVATION_LOCK.synchronized {
getActive().getOrElse { getOrCreate(checkpointPath, creatingFunc, hadoopConf, createOnError) }
}
}
/**
* Either recreate a StreamingContext from checkpoint data or create a new StreamingContext.
* If checkpoint data exists in the provided `checkpointPath`, then StreamingContext will be
* recreated from the checkpoint data. If the data does not exist, then the StreamingContext
* will be created by called the provided `creatingFunc`.
*
* @param checkpointPath Checkpoint directory used in an earlier StreamingContext program
* @param creatingFunc Function to create a new StreamingContext
* @param hadoopConf Optional Hadoop configuration if necessary for reading from the
* file system
* @param createOnError Optional, whether to create a new StreamingContext if there is an
* error in reading checkpoint data. By default, an exception will be
* thrown on error.
*/
def getOrCreate(
checkpointPath: String,
creatingFunc: () => StreamingContext,
hadoopConf: Configuration = SparkHadoopUtil.get.conf,
createOnError: Boolean = false
): StreamingContext = {
val checkpointOption = CheckpointReader.read(
checkpointPath, new SparkConf(), hadoopConf, createOnError)
checkpointOption.map(new StreamingContext(null, _, null)).getOrElse(creatingFunc())
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to StreamingContext.
*/
def jarOfClass(cls: Class[_]): Option[String] = SparkContext.jarOfClass(cls)
private[streaming] def createNewSparkContext(conf: SparkConf): SparkContext = {
new SparkContext(conf)
}
private[streaming] def createNewSparkContext(
master: String,
appName: String,
sparkHome: String,
jars: Seq[String],
environment: Map[String, String]
): SparkContext = {
val conf = SparkContext.updatedConf(
new SparkConf(), master, appName, sparkHome, jars, environment)
new SparkContext(conf)
}
private[streaming] def rddToFileName[T](prefix: String, suffix: String, time: Time): String = {
var result = time.milliseconds.toString
if (prefix != null && prefix.length > 0) {
result = s"$prefix-$result"
}
if (suffix != null && suffix.length > 0) {
result = s"$result.$suffix"
}
result
}
}
private class StreamingContextPythonHelper {
/**
* This is a private method only for Python to implement `getOrCreate`.
*/
def tryRecoverFromCheckpoint(checkpointPath: String): Option[StreamingContext] = {
val checkpointOption = CheckpointReader.read(
checkpointPath, new SparkConf(), SparkHadoopUtil.get.conf, ignoreReadError = false)
checkpointOption.map(new StreamingContext(null, _, null))
}
}
| bravo-zhang/spark | streaming/src/main/scala/org/apache/spark/streaming/StreamingContext.scala | Scala | apache-2.0 | 35,621 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.ir
import scala.annotation.switch
// Unimport default print and println to avoid invoking them by mistake
import scala.Predef.{print => _, println => _, _}
import java.io.Writer
import Position._
import Trees._
import Types._
import Utils.printEscapeJS
object Printers {
/** Basically copied from scala.reflect.internal.Printers */
abstract class IndentationManager {
protected val out: Writer
private var indentMargin = 0
private val indentStep = 2
private var indentString = " " // 40
protected def indent(): Unit = indentMargin += indentStep
protected def undent(): Unit = indentMargin -= indentStep
protected def getIndentMargin(): Int = indentMargin
protected def println(): Unit = {
out.write('\n')
while (indentMargin > indentString.length())
indentString += indentString
if (indentMargin > 0)
out.write(indentString, 0, indentMargin)
}
}
class IRTreePrinter(protected val out: Writer) extends IndentationManager {
protected final def printColumn(ts: List[IRNode], start: String,
sep: String, end: String): Unit = {
print(start); indent()
var rest = ts
while (rest.nonEmpty) {
println()
printAnyNode(rest.head)
rest = rest.tail
if (rest.nonEmpty)
print(sep)
}
undent(); println(); print(end)
}
protected final def printRow(ts: List[IRNode], start: String, sep: String,
end: String): Unit = {
print(start)
var rest = ts
while (rest.nonEmpty) {
printAnyNode(rest.head)
rest = rest.tail
if (rest.nonEmpty)
print(sep)
}
print(end)
}
protected def printBlock(tree: Tree): Unit = {
tree match {
case Block(trees) =>
printColumn(trees, "{", ";", "}")
case _ =>
print('{'); indent(); println()
print(tree)
undent(); println(); print('}')
}
}
protected def printSig(args: List[ParamDef], resultType: Type): Unit = {
printRow(args, "(", ", ", ")")
if (resultType != NoType) {
print(": ")
print(resultType)
print(" = ")
} else {
print(' ')
}
}
protected def printArgs(args: List[TreeOrJSSpread]): Unit = {
printRow(args, "(", ", ", ")")
}
def printAnyNode(node: IRNode): Unit = {
node match {
case node: Ident => print(node)
case node: ComputedName => print(node)
case node: ParamDef => print(node)
case node: Tree => print(node)
case node: JSSpread => print(node)
case node: ClassDef => print(node)
case node: MemberDef => print(node)
case node: TopLevelExportDef => print(node)
}
}
def print(paramDef: ParamDef): Unit = {
val ParamDef(ident, ptpe, mutable, rest) = paramDef
if (mutable)
print("var ")
if (rest)
print("...")
print(ident)
print(": ")
print(ptpe)
}
def print(tree: Tree): Unit = {
tree match {
// Definitions
case VarDef(ident, vtpe, mutable, rhs) =>
if (mutable)
print("var ")
else
print("val ")
print(ident)
print(": ")
print(vtpe)
print(" = ")
print(rhs)
// Control flow constructs
case Skip() =>
print("/*<skip>*/")
case tree: Block =>
printBlock(tree)
case Labeled(label, tpe, body) =>
print(label)
if (tpe != NoType) {
print('[')
print(tpe)
print(']')
}
print(": ")
printBlock(body)
case Assign(lhs, rhs) =>
print(lhs)
print(" = ")
print(rhs)
case Return(expr, label) =>
print("return@")
print(label)
print(" ")
print(expr)
case If(cond, BooleanLiteral(true), elsep) =>
print(cond)
print(" || ")
print(elsep)
case If(cond, thenp, BooleanLiteral(false)) =>
print(cond)
print(" && ")
print(thenp)
case If(cond, thenp, elsep) =>
print("if (")
print(cond)
print(") ")
printBlock(thenp)
elsep match {
case Skip() => ()
case If(_, _, _) =>
print(" else ")
print(elsep)
case _ =>
print(" else ")
printBlock(elsep)
}
case While(cond, body) =>
print("while (")
print(cond)
print(") ")
printBlock(body)
case DoWhile(body, cond) =>
print("do ")
printBlock(body)
print(" while (")
print(cond)
print(')')
case ForIn(obj, keyVar, body) =>
print("for (val ")
print(keyVar)
print(" in ")
print(obj)
print(") ")
printBlock(body)
case TryFinally(TryCatch(block, errVar, handler), finalizer) =>
print("try ")
printBlock(block)
print(" catch (")
print(errVar)
print(") ")
printBlock(handler)
print(" finally ")
printBlock(finalizer)
case TryCatch(block, errVar, handler) =>
print("try ")
printBlock(block)
print(" catch (")
print(errVar)
print(") ")
printBlock(handler)
case TryFinally(block, finalizer) =>
print("try ")
printBlock(block)
print(" finally ")
printBlock(finalizer)
case Throw(expr) =>
print("throw ")
print(expr)
case Match(selector, cases, default) =>
print("match (")
print(selector)
print(") {"); indent()
for ((values, body) <- cases) {
println()
printRow(values, "case ", " | ", ":"); indent(); println()
print(body)
print(";")
undent()
}
println()
print("default:"); indent(); println()
print(default)
print(";")
undent()
undent(); println(); print('}')
case Debugger() =>
print("debugger")
// Scala expressions
case New(cls, ctor, args) =>
print("new ")
print(cls)
print("().")
print(ctor)
printArgs(args)
case LoadModule(cls) =>
print("mod:")
print(cls)
case StoreModule(cls, value) =>
print("mod:")
print(cls)
print("<-")
print(value)
case Select(qualifier, item) =>
print(qualifier)
print('.')
print(item)
case SelectStatic(cls, item) =>
print(cls)
print('.')
print(item)
case Apply(flags, receiver, method, args) =>
print(receiver)
print(".")
print(method)
printArgs(args)
case ApplyStatically(flags, receiver, cls, method, args) =>
print(receiver)
print(".")
print(cls)
print("::")
print(flags)
print(method)
printArgs(args)
case ApplyStatic(flags, cls, method, args) =>
print(cls)
print("::")
print(flags)
print(method)
printArgs(args)
case UnaryOp(op, lhs) =>
import UnaryOp._
print('(')
print((op: @switch) match {
case Boolean_! =>
"!"
case IntToChar =>
"(char)"
case IntToByte =>
"(byte)"
case IntToShort =>
"(short)"
case CharToInt | ByteToInt | ShortToInt | LongToInt | DoubleToInt =>
"(int)"
case IntToLong | DoubleToLong =>
"(long)"
case DoubleToFloat =>
"(float)"
case IntToDouble | LongToDouble | FloatToDouble =>
"(double)"
})
print(lhs)
print(')')
case BinaryOp(BinaryOp.Int_-, IntLiteral(0), rhs) =>
print("(-")
print(rhs)
print(')')
case BinaryOp(BinaryOp.Int_^, IntLiteral(-1), rhs) =>
print("(~")
print(rhs)
print(')')
case BinaryOp(BinaryOp.Long_-, LongLiteral(0L), rhs) =>
print("(-")
print(rhs)
print(')')
case BinaryOp(BinaryOp.Long_^, LongLiteral(-1L), rhs) =>
print("(~")
print(rhs)
print(')')
case BinaryOp(BinaryOp.Float_-, FloatLiteral(0.0f), rhs) =>
print("(-")
print(rhs)
print(')')
case BinaryOp(BinaryOp.Double_-,
IntLiteral(0) | FloatLiteral(0.0f) | DoubleLiteral(0.0), rhs) =>
print("(-")
print(rhs)
print(')')
case BinaryOp(op, lhs, rhs) =>
import BinaryOp._
print('(')
print(lhs)
print(' ')
print((op: @switch) match {
case === => "==="
case !== => "!=="
case String_+ => "+[string]"
case Boolean_== => "==[bool]"
case Boolean_!= => "!=[bool]"
case Boolean_| => "|[bool]"
case Boolean_& => "&[bool]"
case Int_+ => "+[int]"
case Int_- => "-[int]"
case Int_* => "*[int]"
case Int_/ => "/[int]"
case Int_% => "%[int]"
case Int_| => "|[int]"
case Int_& => "&[int]"
case Int_^ => "^[int]"
case Int_<< => "<<[int]"
case Int_>>> => ">>>[int]"
case Int_>> => ">>[int]"
case Int_== => "==[int]"
case Int_!= => "!=[int]"
case Int_< => "<[int]"
case Int_<= => "<=[int]"
case Int_> => ">[int]"
case Int_>= => ">=[int]"
case Long_+ => "+[long]"
case Long_- => "-[long]"
case Long_* => "*[long]"
case Long_/ => "/[long]"
case Long_% => "%[long]"
case Long_| => "|[long]"
case Long_& => "&[long]"
case Long_^ => "^[long]"
case Long_<< => "<<[long]"
case Long_>>> => ">>>[long]"
case Long_>> => ">>[long]"
case Long_== => "==[long]"
case Long_!= => "!=[long]"
case Long_< => "<[long]"
case Long_<= => "<=[long]"
case Long_> => ">[long]"
case Long_>= => ">=[long]"
case Float_+ => "+[float]"
case Float_- => "-[float]"
case Float_* => "*[float]"
case Float_/ => "/[float]"
case Float_% => "%[float]"
case Double_+ => "+[double]"
case Double_- => "-[double]"
case Double_* => "*[double]"
case Double_/ => "/[double]"
case Double_% => "%[double]"
case Double_== => "==[double]"
case Double_!= => "!=[double]"
case Double_< => "<[double]"
case Double_<= => "<=[double]"
case Double_> => ">[double]"
case Double_>= => ">=[double]"
})
print(' ')
print(rhs)
print(')')
case NewArray(typeRef, lengths) =>
print("new ")
print(typeRef.baseClassName)
for (length <- lengths) {
print('[')
print(length)
print(']')
}
for (dim <- lengths.size until typeRef.dimensions)
print("[]")
case ArrayValue(typeRef, elems) =>
print(typeRef)
printArgs(elems)
case ArrayLength(array) =>
print(array)
print(".length")
case ArraySelect(array, index) =>
print(array)
print('[')
print(index)
print(']')
case RecordValue(tpe, elems) =>
print('(')
var first = true
for ((field, value) <- tpe.fields zip elems) {
if (first) first = false
else print(", ")
print(field.name)
print(" = ")
print(value)
}
print(')')
case IsInstanceOf(expr, typeRef) =>
print(expr)
print(".isInstanceOf[")
print(typeRef)
print(']')
case AsInstanceOf(expr, typeRef) =>
print(expr)
print(".asInstanceOf[")
print(typeRef)
print(']')
case Unbox(expr, charCode) =>
print(expr)
print(".asInstanceOf[")
print(charCode)
print(']')
case GetClass(expr) =>
print(expr)
print(".getClass()")
// JavaScript expressions
case JSNew(ctor, args) =>
def containsOnlySelectsFromAtom(tree: Tree): Boolean = tree match {
case JSDotSelect(qual, _) => containsOnlySelectsFromAtom(qual)
case JSBracketSelect(qual, _) => containsOnlySelectsFromAtom(qual)
case VarRef(_) => true
case This() => true
case _ => false // in particular, Apply
}
if (containsOnlySelectsFromAtom(ctor)) {
print("new ")
print(ctor)
} else {
print("new (")
print(ctor)
print(')')
}
printArgs(args)
case JSDotSelect(qualifier, item) =>
print(qualifier)
print(".")
print(item)
case JSBracketSelect(qualifier, item) =>
print(qualifier)
print('[')
print(item)
print(']')
case JSFunctionApply(fun, args) =>
fun match {
case _:JSDotSelect | _:JSBracketSelect | _:Select =>
print("(0, ")
print(fun)
print(')')
case _ =>
print(fun)
}
printArgs(args)
case JSDotMethodApply(receiver, method, args) =>
print(receiver)
print(".")
print(method)
printArgs(args)
case JSBracketMethodApply(receiver, method, args) =>
print(receiver)
print('[')
print(method)
print(']')
printArgs(args)
case JSSuperBracketSelect(superClass, qualifier, item) =>
print("super(")
print(superClass)
print(")::")
print(qualifier)
print('[')
print(item)
print(']')
case JSSuperBracketCall(superClass, receiver, method, args) =>
print("super(")
print(superClass)
print(")::")
print(receiver)
print('[')
print(method)
print(']')
printArgs(args)
case JSSuperConstructorCall(args) =>
print("super")
printArgs(args)
case LoadJSConstructor(cls) =>
print("constructorOf[")
print(cls)
print(']')
case LoadJSModule(cls) =>
print("mod:")
print(cls)
case JSDelete(prop) =>
print("delete ")
print(prop)
case JSUnaryOp(op, lhs) =>
import JSUnaryOp._
print('(')
print((op: @switch) match {
case + => "+"
case - => "-"
case ~ => "~"
case ! => "!"
case `typeof` => "typeof "
})
print(lhs)
print(")")
case JSBinaryOp(op, lhs, rhs) =>
import JSBinaryOp._
print('(')
print(lhs)
print(" ")
print((op: @switch) match {
case === => "==="
case !== => "!=="
case + => "+"
case - => "-"
case * => "*"
case / => "/"
case % => "%"
case | => "|"
case & => "&"
case ^ => "^"
case << => "<<"
case >> => ">>"
case >>> => ">>>"
case < => "<"
case <= => "<="
case > => ">"
case >= => ">="
case && => "&&"
case || => "||"
case `in` => "in"
case `instanceof` => "instanceof"
})
print(" ")
print(rhs)
print(')')
case JSArrayConstr(items) =>
printRow(items, "[", ", ", "]")
case JSObjectConstr(Nil) =>
print("{}")
case JSObjectConstr(fields) =>
print('{'); indent(); println()
var rest = fields
while (rest.nonEmpty) {
print(rest.head._1)
print(": ")
print(rest.head._2)
rest = rest.tail
if (rest.nonEmpty) {
print(",")
println()
}
}
undent(); println(); print('}')
case JSGlobalRef(ident) =>
print("global:")
print(ident)
case JSLinkingInfo() =>
print("<linkinginfo>")
// Literals
case Undefined() =>
print("(void 0)")
case Null() =>
print("null")
case BooleanLiteral(value) =>
print(if (value) "true" else "false")
case CharLiteral(value) =>
print('\'')
printEscapeJS(value.toString(), out)
print('\'')
case ByteLiteral(value) =>
if (value >= 0) {
print(value.toString)
print("_b")
} else {
print('(')
print(value.toString)
print("_b)")
}
case ShortLiteral(value) =>
if (value >= 0) {
print(value.toString)
print("_s")
} else {
print('(')
print(value.toString)
print("_s)")
}
case IntLiteral(value) =>
if (value >= 0) {
print(value.toString)
} else {
print('(')
print(value.toString)
print(')')
}
case LongLiteral(value) =>
if (value < 0L)
print('(')
print(value.toString)
print('L')
if (value < 0L)
print(')')
case FloatLiteral(value) =>
if (value == 0.0f && 1.0f / value < 0.0f) {
print("(-0f)")
} else {
if (value < 0.0f)
print('(')
print(value.toString)
print('f')
if (value < 0.0f)
print(')')
}
case DoubleLiteral(value) =>
if (value == 0.0 && 1.0 / value < 0.0) {
print("(-0d)")
} else {
if (value < 0.0)
print('(')
print(value.toString)
print('d')
if (value < 0.0)
print(')')
}
case StringLiteral(value) =>
print('\"')
printEscapeJS(value, out)
print('\"')
case ClassOf(typeRef) =>
print("classOf[")
print(typeRef)
print(']')
// Atomic expressions
case VarRef(ident) =>
print(ident)
case This() =>
print("this")
case Closure(arrow, captureParams, params, body, captureValues) =>
if (arrow)
print("(arrow-lambda<")
else
print("(lambda<")
var first = true
for ((param, value) <- captureParams.zip(captureValues)) {
if (first)
first = false
else
print(", ")
print(param)
print(" = ")
print(value)
}
printRow(params, ">(", ", ", ") = ")
printBlock(body)
print(')')
case CreateJSClass(cls, captureValues) =>
print("createjsclass[")
print(cls)
printRow(captureValues, "](", ", ", ")")
// Transient
case Transient(value) =>
value.printIR(this)
}
}
def print(spread: JSSpread): Unit = {
print("...")
print(spread.items)
}
def print(classDef: ClassDef): Unit = {
import classDef._
for (jsClassCaptures <- classDef.jsClassCaptures) {
if (jsClassCaptures.isEmpty)
print("captures: none")
else
printRow(jsClassCaptures, "captures: ", ", ", "")
println()
}
print(classDef.optimizerHints)
kind match {
case ClassKind.Class => print("class ")
case ClassKind.ModuleClass => print("module class ")
case ClassKind.Interface => print("interface ")
case ClassKind.AbstractJSType => print("abstract js type ")
case ClassKind.HijackedClass => print("hijacked class ")
case ClassKind.JSClass => print("js class ")
case ClassKind.JSModuleClass => print("js module class ")
case ClassKind.NativeJSClass => print("native js class ")
case ClassKind.NativeJSModuleClass => print("native js module class ")
}
print(name)
superClass.foreach { cls =>
print(" extends ")
print(cls)
jsSuperClass.foreach { tree =>
print(" (via ")
print(tree)
print(")")
}
}
if (interfaces.nonEmpty) {
print(" implements ")
var rest = interfaces
while (rest.nonEmpty) {
print(rest.head)
rest = rest.tail
if (rest.nonEmpty)
print(", ")
}
}
jsNativeLoadSpec.foreach { spec =>
print(" loadfrom ")
print(spec)
}
print(" ")
printColumn(memberDefs ::: topLevelExportDefs, "{", "", "}")
}
def print(memberDef: MemberDef): Unit = {
memberDef match {
case FieldDef(flags, name, vtpe) =>
print(flags.namespace.prefixString)
if (flags.isMutable)
print("var ")
else
print("val ")
print(name)
print(": ")
print(vtpe)
case tree: MethodDef =>
val MethodDef(flags, name, args, resultType, body) = tree
print(tree.optimizerHints)
print(flags.namespace.prefixString)
print("def ")
print(name)
printSig(args, resultType)
body.fold {
print("<abstract>")
} { body =>
printBlock(body)
}
case PropertyDef(flags, name, getterBody, setterArgAndBody) =>
getterBody foreach { body =>
print(flags.namespace.prefixString)
print("get ")
print(name)
printSig(Nil, AnyType)
printBlock(body)
}
if (getterBody.isDefined && setterArgAndBody.isDefined) {
println()
}
setterArgAndBody foreach { case (arg, body) =>
print(flags.namespace.prefixString)
print("set ")
print(name)
printSig(arg :: Nil, NoType)
printBlock(body)
}
}
}
def print(topLevelExportDef: TopLevelExportDef): Unit = {
topLevelExportDef match {
case TopLevelJSClassExportDef(exportName) =>
print("export top class \"")
printEscapeJS(exportName, out)
print('\"')
case TopLevelModuleExportDef(exportName) =>
print("export top module \"")
printEscapeJS(exportName, out)
print('\"')
case TopLevelMethodExportDef(methodDef) =>
print("export top ")
print(methodDef)
case TopLevelFieldExportDef(exportName, field) =>
print("export top static field ")
print(field)
print(" as \"")
printEscapeJS(exportName, out)
print('\"')
}
}
def print(tpe: TypeRef): Unit = tpe match {
case ClassRef(className) =>
print(className)
case ArrayTypeRef(base, dims) =>
print(base)
for (i <- 1 to dims)
print("[]")
}
def print(tpe: Type): Unit = tpe match {
case AnyType => print("any")
case NothingType => print("nothing")
case UndefType => print("void")
case BooleanType => print("boolean")
case CharType => print("char")
case ByteType => print("byte")
case ShortType => print("short")
case IntType => print("int")
case LongType => print("long")
case FloatType => print("float")
case DoubleType => print("double")
case StringType => print("string")
case NullType => print("null")
case ClassType(className) => print(className)
case NoType => print("<notype>")
case ArrayType(arrayTypeRef) =>
print(arrayTypeRef)
case RecordType(fields) =>
print('(')
var first = true
for (RecordType.Field(name, _, tpe, mutable) <- fields) {
if (first)
first = false
else
print(", ")
if (mutable)
print("var ")
print(name)
print(": ")
print(tpe)
}
print(')')
}
def print(ident: Ident): Unit =
printEscapeJS(ident.name, out)
def print(propName: PropertyName): Unit = propName match {
case lit: StringLiteral => print(lit: Tree)
case ident: Ident => print(ident)
case ComputedName(tree, index) =>
print("[")
print(tree)
print("](")
print(index)
print(")")
}
def print(spec: JSNativeLoadSpec): Unit = {
def printPath(path: List[String]): Unit = {
for (propName <- path) {
print("[\"")
printEscapeJS(propName, out)
print("\"]")
}
}
spec match {
case JSNativeLoadSpec.Global(globalRef, path) =>
print("global:")
print(globalRef)
printPath(path)
case JSNativeLoadSpec.Import(module, path) =>
print("import(")
print(module)
print(')')
printPath(path)
case JSNativeLoadSpec.ImportWithGlobalFallback(importSpec, globalSpec) =>
print(importSpec)
print(" fallback ")
print(globalSpec)
}
}
def print(s: String): Unit =
out.write(s)
def print(c: Int): Unit =
out.write(c)
def print(optimizerHints: OptimizerHints)(
implicit dummy: DummyImplicit): Unit = {
if (optimizerHints != OptimizerHints.empty) {
print("@hints(")
print(OptimizerHints.toBits(optimizerHints).toString)
print(") ")
}
}
def print(flags: ApplyFlags)(
implicit dummy1: DummyImplicit, dummy2: DummyImplicit): Unit = {
if (flags.isPrivate)
print("private::")
}
// Make it public
override def println(): Unit = super.println()
def complete(): Unit = ()
}
}
| nicolasstucki/scala-js | ir/src/main/scala/org/scalajs/ir/Printers.scala | Scala | apache-2.0 | 27,839 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.metadata
.t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: [email protected]
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.metadata
/**
* Enumeration of array cardinalities
*/
sealed trait Cardinality
object Cardinality {
/** Indicates an array with zero or one members */
case object ZeroOrOne extends Cardinality
/** Indicates an array with zero or more members */
case object ZeroOrMore extends Cardinality
/** Indicates an array with one or more members */
case object OneOrMore extends Cardinality
/**
* Indicates an array that has at least min members
* and no more than max members
* @param min minimum number of members (inclusive)
* @param max maximum number of members (inclusive)
*/
case class MinMax(min: Int, max: Int) extends Cardinality
}
| S-Mach/s_mach.explain | metadata/src/main/scala/s_mach/metadata/Cardinality.scala | Scala | mit | 1,431 |
package ucesoft.cbm.peripheral.keyboard
import java.io.{BufferedReader, FileInputStream, FileNotFoundException, IOException, InputStreamReader, PrintWriter}
import java.awt.event.KeyEvent
import ucesoft.cbm.Log
trait KeyboardMapper {
val map : Map[Int,CKey.Key]
val keypad_map : Map[Int,CKey.Key]
}
object KeyboardMapperStore {
private val KEY_EVENT_MAP = getKeyEventMap
private val KEY_EVENT_REV_MAP = getKeyEventMap map { kv => (kv._2,kv._1) }
def getKey(code:Int) : String = KEY_EVENT_MAP get code match {
case None =>
KeyEvent.getKeyText(code)
case Some(k) =>
k.substring(3)
}
def isExtendedKey(code:Int) : Boolean = !KEY_EVENT_MAP.contains(code)
def store(km:KeyboardMapper,out:PrintWriter) : Unit = {
out.println("[map]")
for(kv <- km.map) {
KEY_EVENT_MAP get kv._1 match {
case Some(k) =>
out.println("%20s = %s".format(s"$k",s"${kv._2}"))
case None =>
out.println("%20s = %s".format(s"#${kv._1}",s"${kv._2}"))
}
}
out.println("[keypad_map]")
for(kv <- km.keypad_map) {
out.println("%20s = %s".format(s"${KEY_EVENT_MAP(kv._1)}",s"${kv._2}"))
}
}
def loadFromResource(name:String) : Option[KeyboardMapper] = {
val in = getClass.getResourceAsStream(name)
if (in == null) None
else {
try {
val map = Some(load(new BufferedReader(new InputStreamReader(in))))
in.close
map
}
catch {
case t:Throwable =>
Log.info(s"Can't load keyboard mapping '$name': " + t)
None
}
}
}
def load(in:BufferedReader) : KeyboardMapper = {
val e_map = new collection.mutable.HashMap[Int,CKey.Key]
val e_keypad_map = new collection.mutable.HashMap[Int,CKey.Key]
var line = in.readLine
var section = 0
while (line != null) {
line = line.trim
section match {
case 0 =>
if (line == "[map]") section += 1
case 1 => // map
if (line == "[keypad_map]") section += 1
else {
val Array(n,v) = line.split("=")
val k = n.trim
val key = if (k.charAt(0) == '#') k.substring(1).toInt else KEY_EVENT_REV_MAP(k)
e_map += key -> CKey.withName(v.trim)
}
case 2 => // keypad_map
val Array(n,v) = line.split("=")
e_keypad_map += KEY_EVENT_REV_MAP(n.trim) -> CKey.withName(v.trim)
}
line = in.readLine
}
if (section == 0) throw new IllegalArgumentException
// add l-shift button
e_map += KeyEvent.VK_SHIFT -> CKey.L_SHIFT
new KeyboardMapper {
val map = e_map.toMap
val keypad_map = e_keypad_map.toMap
}
}
private def getKeyEventMap : Map[Int,String] = {
val clazz = classOf[KeyEvent]
val fields = clazz.getDeclaredFields
fields filter { _.getName.startsWith("VK_") } map { f => (f.get(null).asInstanceOf[Int],f.getName) } toMap
}
private def findDefaultKeyboardLayoutForLocale(internalResource:String) : String = {
Option(java.awt.im.InputContext.getInstance().getLocale) match {
case None =>
Log.info("Cannot find any keyboard layout for current locale. Switching to IT")
s"${internalResource}_IT"
case Some(loc) =>
s"${internalResource}_${loc.getLanguage.toUpperCase}"
}
}
def loadMapper(externalFile:Option[String],_internalResource:String) : KeyboardMapper = {
externalFile match {
case None =>
val internalResource = findDefaultKeyboardLayoutForLocale(_internalResource)
loadFromResource(internalResource) match {
case None =>
// layout not found, switching to IT
loadFromResource(s"${_internalResource}_IT") match {
case None =>
throw new FileNotFoundException(s"Can't find default keyboard file: ${_internalResource}")
case Some(m) =>
Log.info(s"Loaded keyboard configuration file from $internalResource")
m
}
case Some(m) =>
Log.info(s"Loaded keyboard configuration file from $internalResource")
m
}
case Some(file) =>
try {
val in = new BufferedReader(new InputStreamReader(new FileInputStream(file)))
val m = load(in)
in.close
Log.info(s"Loaded keyboard configuration file from $file")
m
}
catch {
case t:Throwable =>
Log.info(s"Cannot load keyboard file $file: " + t)
println(s"Cannot load keyboard file $file: " + t)
loadMapper(None,_internalResource)
}
}
}
} | abbruzze/kernal64 | Kernal64/src/ucesoft/cbm/peripheral/keyboard/KeyboardMapper.scala | Scala | mit | 4,701 |
import org.junit.Assert.assertEquals
import org.junit.Test
class TestSuite {
@Test
def getRoot(): Unit = {
assertEquals(
Response(
200,
Map("Content-Type" -> "text/html;charset=utf-8"),
body = """|<html>
| <body>
| <h1>Hello, world!</h1>
| </body>
|</html>""".stripMargin
),
Request("GET", "http://localhost:8080/", Map.empty, None)
)
}
}
| earldouglas/xsbt-web-plugin | src/reqs/src/test/scala/TestSuite.scala | Scala | bsd-3-clause | 494 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.Locale
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.catalog.CatalogTable
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.execution.command.{DDLUtils, LeafRunnableCommand}
import org.apache.spark.sql.execution.command.ViewHelper.createTemporaryViewRelation
import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.types._
/**
* Create a table and optionally insert some data into it. Note that this plan is unresolved and
* has to be replaced by the concrete implementations during analysis.
*
* @param tableDesc the metadata of the table to be created.
* @param mode the data writing mode
* @param query an optional logical plan representing data to write into the created table.
*/
case class CreateTable(
tableDesc: CatalogTable,
mode: SaveMode,
query: Option[LogicalPlan]) extends LogicalPlan {
assert(tableDesc.provider.isDefined, "The table to be created must have a provider.")
if (query.isEmpty) {
assert(
mode == SaveMode.ErrorIfExists || mode == SaveMode.Ignore,
"create table without data insertion can only use ErrorIfExists or Ignore as SaveMode.")
}
override def children: Seq[LogicalPlan] = query.toSeq
override def output: Seq[Attribute] = Seq.empty
override lazy val resolved: Boolean = false
override protected def withNewChildrenInternal(
newChildren: IndexedSeq[LogicalPlan]): LogicalPlan =
copy(query = if (query.isDefined) Some(newChildren.head) else None)
}
/**
* Create or replace a local/global temporary view with given data source.
*/
case class CreateTempViewUsing(
tableIdent: TableIdentifier,
userSpecifiedSchema: Option[StructType],
replace: Boolean,
global: Boolean,
provider: String,
options: Map[String, String]) extends LeafRunnableCommand {
if (tableIdent.database.isDefined) {
throw QueryCompilationErrors.cannotSpecifyDatabaseForTempViewError(tableIdent)
}
override def argString(maxFields: Int): String = {
s"[tableIdent:$tableIdent " +
userSpecifiedSchema.map(_ + " ").getOrElse("") +
s"replace:$replace " +
s"provider:$provider " +
conf.redactOptions(options)
}
override def run(sparkSession: SparkSession): Seq[Row] = {
if (provider.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw QueryCompilationErrors.cannotCreateTempViewUsingHiveDataSourceError()
}
val dataSource = DataSource(
sparkSession,
userSpecifiedSchema = userSpecifiedSchema,
className = provider,
options = options)
val catalog = sparkSession.sessionState.catalog
val analyzedPlan = Dataset.ofRows(
sparkSession, LogicalRelation(dataSource.resolveRelation())).logicalPlan
if (global) {
val db = sparkSession.sessionState.conf.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE)
val viewIdent = TableIdentifier(tableIdent.table, Option(db))
val viewDefinition = createTemporaryViewRelation(
viewIdent,
sparkSession,
replace,
catalog.getRawGlobalTempView,
originalText = None,
analyzedPlan,
aliasedPlan = analyzedPlan)
catalog.createGlobalTempView(tableIdent.table, viewDefinition, replace)
} else {
val viewDefinition = createTemporaryViewRelation(
tableIdent,
sparkSession,
replace,
catalog.getRawTempView,
originalText = None,
analyzedPlan,
aliasedPlan = analyzedPlan)
catalog.createTempView(tableIdent.table, viewDefinition, replace)
}
Seq.empty[Row]
}
}
case class RefreshResource(path: String)
extends LeafRunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.catalog.refreshByPath(path)
Seq.empty[Row]
}
}
| jiangxb1987/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/ddl.scala | Scala | apache-2.0 | 4,862 |
package mageknight
case class GameMap() | fadeddata/mk | src/main/scala/GameMap.scala | Scala | mit | 40 |
package wom.types
abstract class WomPrimitiveType extends WomType {
lazy val coercionMap: Map[WomType, Seq[WomType]] = Map(
// From type -> To type
WomStringType -> Seq(WomStringType, WomIntegerType, WomFloatType, WomFileType, WomBooleanType),
WomFileType -> Seq(WomStringType, WomFileType),
WomIntegerType -> Seq(WomStringType, WomIntegerType, WomFloatType),
WomFloatType -> Seq(WomStringType, WomFloatType),
WomBooleanType -> Seq(WomStringType, WomBooleanType)
)
override def isCoerceableFrom(otherType: WomType): Boolean = {
coercionMap.get(otherType) match {
case Some(types) => types contains this
case None => false
}
}
}
| ohsu-comp-bio/cromwell | wom/src/main/scala/wom/types/WomPrimitiveType.scala | Scala | bsd-3-clause | 683 |
package com.aristocrat.mandrill.requests.Exports
import com.aristocrat.mandrill.requests.MandrillRequest
case class Whitelist(key: String, notifyEmail: String) extends MandrillRequest
| aristocratic/mandrill | src/main/scala/com/aristocrat/mandrill/requests/Exports/Whitelist.scala | Scala | mit | 186 |
package endpoints.scalaj.client
import endpoints.testsuite.client.{BasicAuthTestSuite, OptionalResponsesTestSuite, SimpleTestSuite}
import endpoints.testsuite.{BasicAuthTestApi, OptionalResponsesTestApi, SimpleTestApi}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class TestClient(val address: String) extends SimpleTestApi
with BasicAuthTestApi
with OptionalResponsesTestApi
with Endpoints
with BasicAuthentication
with OptionalResponses
class EndpointsTest
extends SimpleTestSuite[TestClient]
with BasicAuthTestSuite[TestClient]
with OptionalResponsesTestSuite[TestClient] {
val client: TestClient = new TestClient(s"localhost:$wiremockPort")
def call[Req, Resp](endpoint: client.Endpoint[Req, Resp], args: Req): Future[Resp] =
endpoint.callAsync(args)
clientTestSuite()
basicAuthSuite()
optionalResponsesSuite()
}
| Krever/endpoints | scalaj/client/src/test/scala/endpoints/scalaj/client/EndpointsTest.scala | Scala | mit | 906 |
package geotrellis.util
object Colors {
val black = "\\u001b[0;30m"
val red = "\\u001b[0;31m"
val green = "\\u001b[0;32m"
val yellow = "\\u001b[0;33m"
val blue = "\\u001b[0;34m"
val magenta = "\\u001b[0;35m"
val cyan = "\\u001b[0;36m"
val grey = "\\u001b[0;37m"
val default = "\\u001b[m"
private def _colorLine(color: String, str: String): String = s"${color}${str}${default}"
def black(str: String): String = _colorLine(black, str)
def red(str: String): String = _colorLine(red, str)
def green(str: String): String = _colorLine(green, str)
def yellow(str: String): String = _colorLine(yellow, str)
def blue(str: String): String = _colorLine(blue, str)
def magenta(str: String): String = _colorLine(magenta, str)
def cyan(str: String): String = _colorLine(cyan, str)
def grey(str: String): String = _colorLine(grey, str)
}
| pomadchin/geotrellis-integration-tests | src/main/scala/geotrellis/util/Colors.scala | Scala | apache-2.0 | 889 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.voa.play.form
import play.api.data.{FormError, Mapping}
import play.api.data.validation.Constraint
case class ConditionalMapping[T](condition: Condition, wrapped: Mapping[T], defaultValue: T,
constraints: Seq[Constraint[T]] = Nil, keys: Set[String] = Set()) extends Mapping[T] {
override val format: Option[(String, Seq[Any])] = wrapped.format
val key = wrapped.key
def verifying(addConstraints: Constraint[T]*): Mapping[T] =
this.copy(constraints = constraints ++ addConstraints.toSeq)
def bind(data: Map[String, String]): Either[Seq[FormError], T] =
if (condition(data)) wrapped.bind(data) else Right(defaultValue)
def unbind(value: T): Map[String, String] = wrapped.unbind(value)
def unbindAndValidate(value: T): (Map[String, String], Seq[FormError]) = wrapped.unbindAndValidate(value)
def withPrefix(prefix: String): Mapping[T] = copy(wrapped = wrapped.withPrefix(prefix))
val mappings: Seq[Mapping[_]] = wrapped.mappings :+ this
}
| hmrc/play-conditional-form-mapping | src/main/scala/uk/gov/voa/play/form/ConditionalMapping.scala | Scala | apache-2.0 | 1,593 |
package uk.gov.gds.ier.transaction.ordinary.openRegister
import com.google.inject.{Inject, Singleton}
import uk.gov.gds.ier.serialiser.JsonSerialiser
import uk.gov.gds.ier.validation._
import play.api.mvc.Call
import play.api.templates.Html
import uk.gov.gds.ier.config.Config
import uk.gov.gds.ier.security.EncryptionService
import uk.gov.gds.ier.step.{OrdinaryStep, Routes}
import uk.gov.gds.ier.transaction.ordinary.{OrdinaryControllers, InprogressOrdinary}
import uk.gov.gds.ier.assets.RemoteAssets
@Singleton
class OpenRegisterStep @Inject ()(
val serialiser: JsonSerialiser,
val config: Config,
val encryptionService : EncryptionService,
val remoteAssets: RemoteAssets,
val ordinary: OrdinaryControllers
) extends OrdinaryStep
with OpenRegisterForms
with OpenRegisterMustache {
val validation = openRegisterForm
val routing = Routes(
get = routes.OpenRegisterStep.get,
post = routes.OpenRegisterStep.post,
editGet = routes.OpenRegisterStep.editGet,
editPost = routes.OpenRegisterStep.editPost
)
def nextStep(currentState: InprogressOrdinary) = {
ordinary.PostalVoteStep
}
override def isStepComplete(currentState: InprogressOrdinary) = {
currentState.openRegisterOptin.isDefined
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/ordinary/openRegister/OpenRegisterStep.scala | Scala | mit | 1,256 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import java.net.SocketTimeoutException
import junit.framework.Assert
import kafka.admin.CreateTopicCommand
import kafka.integration.KafkaServerTestHarness
import kafka.message._
import kafka.server.KafkaConfig
import kafka.utils._
import org.junit.Test
import org.scalatest.junit.JUnit3Suite
import kafka.api.ProducerResponseStatus
import kafka.common.{TopicAndPartition, ErrorMapping}
class SyncProducerTest extends JUnit3Suite with KafkaServerTestHarness {
private var messageBytes = new Array[Byte](2);
val configs = List(new KafkaConfig(TestUtils.createBrokerConfigs(1).head))
val zookeeperConnect = TestZKUtils.zookeeperConnect
@Test
def testReachableServer() {
val server = servers.head
val props = TestUtils.getSyncProducerConfig(server.socketServer.port)
val producer = new SyncProducer(new SyncProducerConfig(props))
val firstStart = SystemTime.milliseconds
try {
val response = producer.send(TestUtils.produceRequest("test", 0,
new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = new Message(messageBytes)), acks = 1))
Assert.assertNotNull(response)
} catch {
case e: Exception => Assert.fail("Unexpected failure sending message to broker. " + e.getMessage)
}
val firstEnd = SystemTime.milliseconds
Assert.assertTrue((firstEnd-firstStart) < 500)
val secondStart = SystemTime.milliseconds
try {
val response = producer.send(TestUtils.produceRequest("test", 0,
new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = new Message(messageBytes)), acks = 1))
Assert.assertNotNull(response)
} catch {
case e: Exception => Assert.fail("Unexpected failure sending message to broker. " + e.getMessage)
}
val secondEnd = SystemTime.milliseconds
Assert.assertTrue((secondEnd-secondStart) < 500)
try {
val response = producer.send(TestUtils.produceRequest("test", 0,
new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = new Message(messageBytes)), acks = 1))
Assert.assertNotNull(response)
} catch {
case e: Exception => Assert.fail("Unexpected failure sending message to broker. " + e.getMessage)
}
}
@Test
def testEmptyProduceRequest() {
val server = servers.head
val props = TestUtils.getSyncProducerConfig(server.socketServer.port)
val correlationId = 0
val clientId = SyncProducerConfig.DefaultClientId
val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs
val ack: Short = 1
val emptyRequest = new kafka.api.ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]())
val producer = new SyncProducer(new SyncProducerConfig(props))
val response = producer.send(emptyRequest)
Assert.assertTrue(response != null)
Assert.assertTrue(!response.hasError && response.status.size == 0)
}
@Test
def testMessageSizeTooLarge() {
val server = servers.head
val props = TestUtils.getSyncProducerConfig(server.socketServer.port)
val producer = new SyncProducer(new SyncProducerConfig(props))
CreateTopicCommand.createTopic(zkClient, "test", 1, 1)
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, "test", 0, 500)
val message1 = new Message(new Array[Byte](configs(0).messageMaxBytes + 1))
val messageSet1 = new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = message1)
val response1 = producer.send(TestUtils.produceRequest("test", 0, messageSet1, acks = 1))
Assert.assertEquals(1, response1.status.count(_._2.error != ErrorMapping.NoError))
Assert.assertEquals(ErrorMapping.MessageSizeTooLargeCode, response1.status(TopicAndPartition("test", 0)).error)
Assert.assertEquals(-1L, response1.status(TopicAndPartition("test", 0)).offset)
val safeSize = configs(0).messageMaxBytes - Message.MessageOverhead - MessageSet.LogOverhead - 1
val message2 = new Message(new Array[Byte](safeSize))
val messageSet2 = new ByteBufferMessageSet(compressionCodec = NoCompressionCodec, messages = message2)
val response2 = producer.send(TestUtils.produceRequest("test", 0, messageSet2, acks = 1))
Assert.assertEquals(1, response1.status.count(_._2.error != ErrorMapping.NoError))
Assert.assertEquals(ErrorMapping.NoError, response2.status(TopicAndPartition("test", 0)).error)
Assert.assertEquals(0, response2.status(TopicAndPartition("test", 0)).offset)
}
@Test
def testProduceCorrectlyReceivesResponse() {
val server = servers.head
val props = TestUtils.getSyncProducerConfig(server.socketServer.port)
val producer = new SyncProducer(new SyncProducerConfig(props))
val messages = new ByteBufferMessageSet(NoCompressionCodec, new Message(messageBytes))
// #1 - test that we get an error when partition does not belong to broker in response
val request = TestUtils.produceRequestWithAcks(Array("topic1", "topic2", "topic3"), Array(0), messages, 1)
val response = producer.send(request)
Assert.assertNotNull(response)
Assert.assertEquals(request.correlationId, response.correlationId)
Assert.assertEquals(3, response.status.size)
response.status.values.foreach {
case ProducerResponseStatus(error, nextOffset) =>
Assert.assertEquals(ErrorMapping.UnknownTopicOrPartitionCode.toShort, error)
Assert.assertEquals(-1L, nextOffset)
}
// #2 - test that we get correct offsets when partition is owned by broker
CreateTopicCommand.createTopic(zkClient, "topic1", 1, 1)
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, "topic1", 0, 500)
CreateTopicCommand.createTopic(zkClient, "topic3", 1, 1)
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, "topic3", 0, 500)
val response2 = producer.send(request)
Assert.assertNotNull(response2)
Assert.assertEquals(request.correlationId, response2.correlationId)
Assert.assertEquals(3, response2.status.size)
// the first and last message should have been accepted by broker
Assert.assertEquals(ErrorMapping.NoError, response2.status(TopicAndPartition("topic1", 0)).error)
Assert.assertEquals(ErrorMapping.NoError, response2.status(TopicAndPartition("topic3", 0)).error)
Assert.assertEquals(0, response2.status(TopicAndPartition("topic1", 0)).offset)
Assert.assertEquals(0, response2.status(TopicAndPartition("topic3", 0)).offset)
// the middle message should have been rejected because broker doesn't lead partition
Assert.assertEquals(ErrorMapping.UnknownTopicOrPartitionCode.toShort,
response2.status(TopicAndPartition("topic2", 0)).error)
Assert.assertEquals(-1, response2.status(TopicAndPartition("topic2", 0)).offset)
}
@Test
def testProducerCanTimeout() {
val timeoutMs = 500
val server = servers.head
val props = TestUtils.getSyncProducerConfig(server.socketServer.port)
val producer = new SyncProducer(new SyncProducerConfig(props))
val messages = new ByteBufferMessageSet(NoCompressionCodec, new Message(messageBytes))
val request = TestUtils.produceRequest("topic1", 0, messages, acks = 1)
// stop IO threads and request handling, but leave networking operational
// any requests should be accepted and queue up, but not handled
server.requestHandlerPool.shutdown()
val t1 = SystemTime.milliseconds
try {
producer.send(request)
Assert.fail("Should have received timeout exception since request handling is stopped.")
} catch {
case e: SocketTimeoutException => /* success */
case e => Assert.fail("Unexpected exception when expecting timeout: " + e)
}
val t2 = SystemTime.milliseconds
// make sure we don't wait fewer than timeoutMs for a response
Assert.assertTrue((t2-t1) >= timeoutMs)
}
@Test
def testProduceRequestWithNoResponse() {
val server = servers.head
val props = TestUtils.getSyncProducerConfig(server.socketServer.port)
val correlationId = 0
val clientId = SyncProducerConfig.DefaultClientId
val ackTimeoutMs = SyncProducerConfig.DefaultAckTimeoutMs
val ack: Short = 0
val emptyRequest = new kafka.api.ProducerRequest(correlationId, clientId, ack, ackTimeoutMs, collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]())
val producer = new SyncProducer(new SyncProducerConfig(props))
val response = producer.send(emptyRequest)
Assert.assertTrue(response == null)
}
}
| kavink92/kafka-0.8.0-beta1-src | core/src/test/scala/unit/kafka/producer/SyncProducerTest.scala | Scala | apache-2.0 | 9,318 |
package gh2011.models
import net.liftweb.json.JsonAST.JValue
case class MemberEventPayload(repo: String, member: String, actor: String, actor_gravatar: String, action: String)
object MemberEventPayload
{
def apply(json: JValue): Option[MemberEventPayload] =
{
val n2s = gh3.node2String(json)(_)
val n2l = gh3.node2Long(json)(_)
val repo = n2s("repo")
val member = n2s("member")
val actor = n2s("actor")
val actor_gravatar = n2s("actor_gravatar")
val action = n2s("action")
val params = Seq(repo, member, actor, actor, actor_gravatar, action)
if(params.forall(_.isDefined))
Some(MemberEventPayload(repo.get, member.get, actor.get, actor_gravatar.get, action.get))
else None
}
}
| mgoeminne/github_etl | src/main/scala/gh2011/models/MemberEventPayload.scala | Scala | mit | 762 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pathy.scalacheck
import slamdata.Predef._
import pathy.Path.{DirName, FileName}
import org.scalacheck.{Gen, Arbitrary}
import scalaz.Show
import scalaz.syntax.show._
/** Represents a `FileName` indexed by another type `A` which is used to
* generate the segment.
*
* i.e. For some type `A` such that `Arbitrary[A]` and `Show[A]`,
* `Arbitrary[FileNameOf[A]]` will generate an arbitrary `FileName` where
* the path segment is formed by the string representation of an arbitrary `A`.
*/
final case class FileNameOf[A](filename: FileName)
/** Represents a `DirName` indexed by another type `A` which is used to
* generate the segment.
*
* i.e. For some type `A` such that `Arbitrary[A]` and `Show[A]`,
* `Arbitrary[DirNameOf[A]]` will generate an arbitrary `DirName` where
* the path segment is formed by the string representation of an arbitrary `A`.
*/
final case class DirNameOf[A](dirname: DirName)
object PathNameOf {
implicit def fileNameOf[A: Arbitrary: Show]: Arbitrary[FileNameOf[A]] =
Arbitrary(genFileName[A] map (FileNameOf(_)))
implicit def dirNameOf[A: Arbitrary: Show]: Arbitrary[DirNameOf[A]] =
Arbitrary(genDirName[A] map (DirNameOf(_)))
////
private[scalacheck] def genFileName[A: Arbitrary: Show]: Gen[FileName] =
genSegment[A].map(FileName(_))
private[scalacheck] def genDirName[A: Arbitrary: Show]: Gen[DirName] =
genSegment[A].map(DirName(_))
private def genSegment[A: Arbitrary: Show]: Gen[String] =
Arbitrary.arbitrary[A] map (_.shows)
}
| mossprescott/slamengine-pathy | scalacheck/src/main/scala/pathy/scalacheck/PathNameOf.scala | Scala | agpl-3.0 | 2,130 |
package io.cumulus.stages
import java.time.format.DateTimeFormatter
import java.time.{LocalDateTime, ZoneId}
import akka.stream.Materializer
import akka.stream.scaladsl.StreamConverters
import com.sksamuel.scrimage
import io.cumulus.stream.storage.StorageReferenceReader
import io.cumulus.validation.AppError
import io.cumulus.models.fs._
import io.cumulus.models.user.session.Session
import io.cumulus.persistence.storage.StorageEngines
import org.apache.pdfbox.pdmodel.PDDocument
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.util.Try
/**
* Abstract metadata extractor, used to read metadata from an uploaded file.
*/
trait MetadataExtractor {
def extract(
file: File
)(implicit
ec: ExecutionContext,
materializer: Materializer,
ciphers: Ciphers,
compressions: Compressions,
storageEngines: StorageEngines,
session: Session
): Either[AppError, FileMetadata]
def maxSize: Long = 1048576 // 10Mo
def applyOn: Seq[String]
}
/**
* Default implementation, returning no metadata.
*/
object DefaultMetadataExtractor extends MetadataExtractor {
def extract(
file: File
)(implicit
ec: ExecutionContext,
materializer: Materializer,
ciphers: Ciphers,
compressions: Compressions,
storageEngines: StorageEngines,
session: Session
): Either[AppError, DefaultMetadata] = {
Right(DefaultMetadata.empty)
}
def applyOn: Seq[String] = Seq()
}
/**
* Implementation for images, using scrimage to read metadata from an image. Exif data are extracted.
*/
object ImageMetadataExtractor extends MetadataExtractor {
def extract(
file: File
)(implicit
ec: ExecutionContext,
materializer: Materializer,
ciphers: Ciphers,
compressions: Compressions,
storageEngines: StorageEngines,
session: Session
): Either[AppError, ImageMetadata] = {
StorageReferenceReader.reader(
file
).map { source =>
val fileInputStream = source.runWith(StreamConverters.asInputStream())
val metadata = scrimage.ImageMetadata.fromStream(fileInputStream)
val make = metadata.tags.find(_.name == "Make").map(_.value)
val model = metadata.tags.find(_.name == "Model").map(_.value)
val latitudeRef = metadata.tags.find(_.name == "GPS Latitude Ref").map(_.value)
val latitude = metadata.tags.find(_.name == "GPS Latitude").map(_.value)
val longitudeRef = metadata.tags.find(_.name == "GPS Longitude Ref").map(_.value)
val longitude = metadata.tags.find(_.name == "GPS Longitude").map(_.value)
val altitude = metadata.tags.find(_.name == "GPS Altitude").map(_.value)
val dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy:MM:dd HH:mm:ss")
val datetime = metadata.tags.find(_.name == "Date/Time Original").map(t => LocalDateTime.parse(t.value, dateTimeFormatter))
val pattern = "(\\\\d+).*".r
val height = metadata.tags.find(_.name == "Image Height").flatMap { t =>
t.value match {
case pattern(heightValue) =>
Try(heightValue.toLong).toOption
case _ =>
None
}
}
val width = metadata.tags.find(_.name == "Image Width").flatMap { t =>
t.value match {
case pattern(widthValue) =>
Try(widthValue.toLong).toOption
case _ =>
None
}
}
ImageMetadata.apply(
maker = make,
model = model,
latitudeRef = latitudeRef,
latitude = latitude,
longitudeRef = longitudeRef,
longitude = longitude,
altitude = altitude,
datetime = datetime,
height = height,
width = width,
values = Map(
metadata.tags
.filter(_.value != null) // Filter out null values
.map(t => t.name -> t.value.replaceAll("""\\u0000""", "")):_*
),
tags = Seq.empty
)
}
}
def applyOn: Seq[String] = Seq(
"image/bmp",
"image/gif",
"image/x-icon",
"image/jpeg",
"image/jpg",
"image/png",
"image/tiff"
)
}
/**
* Implementation for PDFs, using PDFBox to read the document and extract metadata.
*/
object PDFDocumentMetadataExtractor extends MetadataExtractor {
def extract(
file: File
)(implicit
ec: ExecutionContext,
materializer: Materializer,
ciphers: Ciphers,
compressions: Compressions,
storageEngines: StorageEngines,
session: Session
): Either[AppError, PDFDocumentMetadata] = {
StorageReferenceReader.reader(
file
).map { source =>
var document: PDDocument = null
try {
val fileInputStream = source.runWith(StreamConverters.asInputStream())
document = PDDocument.load(fileInputStream)
val info = document.getDocumentInformation
val pageCount: Long = Option(document.getNumberOfPages).map(_.toLong).getOrElse(0)
val title = Option(info.getTitle)
val author = Option(info.getAuthor)
val subject = Option(info.getSubject)
val tags = Option(info.getKeywords).map(_.split(";").toSeq.filterNot(_.trim.isEmpty)).getOrElse(Seq[String]())
val creator = Option(info.getCreator)
val producer = Option(info.getProducer)
val creation = Option(info.getCreationDate).map(t => t.toInstant.atZone(ZoneId.systemDefault()).toLocalDateTime)
val modification = Option(info.getModificationDate).map(t => t.toInstant.atZone(ZoneId.systemDefault()).toLocalDateTime)
val keys = info.getMetadataKeys.asScala
val values = Map(keys.map(key => Option(info.getCustomMetadataValue(key)).map(v => key -> v)).toSeq.flatten: _*)
PDFDocumentMetadata(
pageCount = pageCount,
title = title,
author = author,
subject = subject,
tags = tags,
creator = creator,
producer = producer,
creationDate = creation,
modificationDate = modification,
values = values
)
} finally {
if(document != null)
document.close()
}
}
}
def applyOn: Seq[String] = Seq(
"application/pdf"
)
}
case class MetadataExtractors(extractors: MetadataExtractor*) {
def get(name: String): MetadataExtractor =
extractors
.find(_.applyOn.contains(name))
.getOrElse(DefaultMetadataExtractor)
def get(name: Option[String]): MetadataExtractor =
name match {
case Some(n) => get(n)
case _ => DefaultMetadataExtractor
}
}
| Cumulus-Cloud/cumulus | server/cumulus-core/src/main/scala/io/cumulus/stages/MetadataStage.scala | Scala | mit | 6,546 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package test
import play.api.test._
import models.UserId
object RouterSpec extends PlaySpecification {
"reverse routes containing boolean parameters" in {
"in the query string" in {
controllers.routes.Application.takeBool(true).url must equalTo("/take-bool?b=true")
controllers.routes.Application.takeBool(false).url must equalTo("/take-bool?b=false")
}
"in the path" in {
controllers.routes.Application.takeBool2(true).url must equalTo("/take-bool-2/true")
controllers.routes.Application.takeBool2(false).url must equalTo("/take-bool-2/false")
}
}
"reverse routes containing custom parameters" in {
"the query string" in {
controllers.routes.Application.queryUser(UserId("foo")).url must equalTo("/query-user?userId=foo")
controllers.routes.Application.queryUser(UserId("foo/bar")).url must equalTo("/query-user?userId=foo%2Fbar")
controllers.routes.Application.queryUser(UserId("foo?bar")).url must equalTo("/query-user?userId=foo%3Fbar")
controllers.routes.Application.queryUser(UserId("foo%bar")).url must equalTo("/query-user?userId=foo%25bar")
controllers.routes.Application.queryUser(UserId("foo&bar")).url must equalTo("/query-user?userId=foo%26bar")
}
"the path" in {
controllers.routes.Application.user(UserId("foo")).url must equalTo("/users/foo")
controllers.routes.Application.user(UserId("foo/bar")).url must equalTo("/users/foo%2Fbar")
controllers.routes.Application.user(UserId("foo?bar")).url must equalTo("/users/foo%3Fbar")
controllers.routes.Application.user(UserId("foo%bar")).url must equalTo("/users/foo%25bar")
// & is not special for path segments
controllers.routes.Application.user(UserId("foo&bar")).url must equalTo("/users/foo&bar")
}
}
"bind boolean parameters" in {
"from the query string" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-bool?b=true")).get
contentAsString(result) must equalTo("true")
val result2 = route(implicitApp, FakeRequest(GET, "/take-bool?b=false")).get
contentAsString(result2) must equalTo("false")
// Bind boolean values from 1 and 0 integers too
contentAsString(route(implicitApp, FakeRequest(GET, "/take-bool?b=1")).get) must equalTo("true")
contentAsString(route(implicitApp, FakeRequest(GET, "/take-bool?b=0")).get) must equalTo("false")
}
"from the path" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-bool-2/true")).get
contentAsString(result) must equalTo("true")
val result2 = route(implicitApp, FakeRequest(GET, "/take-bool-2/false")).get
contentAsString(result2) must equalTo("false")
// Bind boolean values from 1 and 0 integers too
contentAsString(route(implicitApp, FakeRequest(GET, "/take-bool-2/1")).get) must equalTo("true")
contentAsString(route(implicitApp, FakeRequest(GET, "/take-bool-2/0")).get) must equalTo("false")
}
}
"bind int parameters from the query string as a list" in {
"from a list of numbers" in new WithApplication() {
val result =
route(implicitApp, FakeRequest(GET, controllers.routes.Application.takeList(List(1, 2, 3)).url)).get
contentAsString(result) must equalTo("1,2,3")
}
"from a list of numbers and letters" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-list?x=1&x=a&x=2")).get
status(result) must equalTo(BAD_REQUEST)
}
"when there is no parameter at all" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-list")).get
contentAsString(result) must equalTo("")
}
"using the Java API" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-java-list?x=1&x=2&x=3")).get
contentAsString(result) must equalTo("1,2,3")
}
"using backticked names on route params" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-list-tick-param?b[]=4&b[]=5&b[]=6")).get
contentAsString(result) must equalTo("4,5,6")
}
"using backticked names urlencoded on route params" in new WithApplication() {
val result = route(implicitApp, FakeRequest(GET, "/take-list-tick-param?b%5B%5D=4&b%5B%5D=5&b%5B%5D=6")).get
contentAsString(result) must equalTo("4,5,6")
}
}
"use a new instance for each instantiated controller" in new WithApplication() {
route(implicitApp, FakeRequest(GET, "/instance")) must beSome.like {
case result => contentAsString(result) must_== "1"
}
route(implicitApp, FakeRequest(GET, "/instance")) must beSome.like {
case result => contentAsString(result) must_== "1"
}
}
"URL encoding and decoding works correctly" in new WithApplication() {
def checkDecoding(
dynamicEncoded: String,
staticEncoded: String,
queryEncoded: String,
dynamicDecoded: String,
staticDecoded: String,
queryDecoded: String
) = {
val path = s"/urlcoding/$dynamicEncoded/$staticEncoded?q=$queryEncoded"
val expected = s"dynamic=$dynamicDecoded static=$staticDecoded query=$queryDecoded"
val result = route(implicitApp, FakeRequest(GET, path)).get
val actual = contentAsString(result)
actual must equalTo(expected)
}
def checkEncoding(
dynamicDecoded: String,
staticDecoded: String,
queryDecoded: String,
dynamicEncoded: String,
staticEncoded: String,
queryEncoded: String
) = {
val expected = s"/urlcoding/$dynamicEncoded/$staticEncoded?q=$queryEncoded"
val call = controllers.routes.Application.urlcoding(dynamicDecoded, staticDecoded, queryDecoded)
call.url must equalTo(expected)
}
checkDecoding("a", "a", "a", "a", "a", "a")
checkDecoding("%2B", "%2B", "%2B", "+", "%2B", "+")
checkDecoding("+", "+", "+", "+", "+", " ")
checkDecoding("%20", "%20", "%20", " ", "%20", " ")
checkDecoding("&", "&", "-", "&", "&", "-")
checkDecoding("=", "=", "-", "=", "=", "-")
checkEncoding("+", "+", "+", "+", "+", "%2B")
checkEncoding(" ", " ", " ", "%20", " ", "+")
checkEncoding("&", "&", "&", "&", "&", "%26")
checkEncoding("=", "=", "=", "=", "=", "%3D")
// We use java.net.URLEncoder for query string encoding, which is not
// RFC compliant, e.g. it percent-encodes "/" which is not a delimiter
// for query strings, and it percent-encodes "~" which is an "unreserved" character
// that should never be percent-encoded. The following tests, therefore
// don't really capture our ideal desired behaviour for query string
// encoding. However, the behaviour for dynamic and static paths is correct.
checkEncoding("/", "/", "/", "%2F", "/", "%2F")
checkEncoding("~", "~", "~", "~", "~", "%7E")
checkDecoding("123", "456", "789", "123", "456", "789")
checkEncoding("123", "456", "789", "123", "456", "789")
}
"allow reverse routing of routes includes" in new WithApplication() {
// Force the router to bootstrap the prefix
implicitApp.injector.instanceOf[play.api.routing.Router]
controllers.module.routes.ModuleController.index.url must_== "/module/index"
}
"document the router" in new WithApplication() {
// The purpose of this test is to alert anyone that changes the format of the router documentation that
// it is being used by Swagger. So if you do change it, please let Tony Tam know at tony at wordnik dot com.
val someRoute = implicitApp.injector
.instanceOf[play.api.routing.Router]
.documentation
.find(r => r._1 == "GET" && r._2.startsWith("/with/"))
someRoute must beSome[(String, String, String)]
val route = someRoute.get
route._2 must_== "/with/$param<[^/]+>"
route._3 must startWith("controllers.Application.withParam")
}
"reverse routes complex query params " in new WithApplication() {
controllers.routes.Application
.takeListTickedParam(List(1, 2, 3))
.url must_== "/take-list-tick-param?b%5B%5D=1&b%5B%5D=2&b%5B%5D=3" // ?b[]=1&b[]=2&b[]=3
}
"choose the first matching route for a call in reverse routes" in new WithApplication() {
controllers.routes.Application.hello.url must_== "/hello"
}
"The assets reverse route support" should {
"fingerprint assets" in new WithApplication() {
controllers.routes.Assets.versioned("css/main.css").url must_== "/public/css/abcd1234-main.css"
}
"selected the minified version" in new WithApplication() {
controllers.routes.Assets.versioned("css/minmain.css").url must_== "/public/css/abcd1234-minmain-min.css"
}
"work for non fingerprinted assets" in new WithApplication() {
controllers.routes.Assets.versioned("css/nonfingerprinted.css").url must_== "/public/css/nonfingerprinted.css"
}
"selected the minified non fingerprinted version" in new WithApplication() {
controllers.routes.Assets
.versioned("css/nonfingerprinted-minmain.css")
.url must_== "/public/css/nonfingerprinted-minmain-min.css"
}
}
}
| mkurz/playframework | dev-mode/sbt-plugin/src/sbt-test/play-sbt-plugin/routes-compiler-injected-routes-compilation/tests/RouterSpec.scala | Scala | apache-2.0 | 9,259 |
object Test extends App {
trait MyPF[@specialized(Int) -A] extends (A => Unit) {
def isDefinedAt(x: A): Boolean
def applyOrElse[A1 <: A](x: A1, default: A1 => Unit): Unit = {
println("MyPF.applyOrElse entered...")
if (isDefinedAt(x)) apply(x) else default(x)
}
}
trait MySmartPF[@specialized(Int) -A] extends MyPF[A] {
def apply(x: A): Unit = {
println("MySmartPF.apply entered...")
applyOrElse(x, { default: Any => throw new MatchError(default) })
}
}
type T = Int
//type T = Any
def newPF(test: T): MyPF[T] = new MySmartPF[T] {
def isDefinedAt(x: T): Boolean = x != test
override def applyOrElse[A1 <: T](x: A1, default: A1 => Unit): Unit = {
println("newPF.applyOrElse entered...")
if (x != test) { println("ok"); () } else { println("default"); default(x) }
}
}
val pf = newPF(1)
println("=== pf(1):")
try { pf(1) } catch { case x: Throwable => println(x) }
println("=== pf(42):")
pf(42)
println("=== done")
}
| scala/scala | test/files/run/t5629b.scala | Scala | apache-2.0 | 1,016 |
package termfiltering
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by chanjinpark on 2016. 9. 15..
*/
object TermFrequency extends App {
/* TODO: 1. 많이 나오는 단어 분포 2. TFIDF로 중요 단어 분포
중요하지 않으면서 자주 나오지 않은 단어를 제거
*/
val conf = new SparkConf(true).setMaster("local").setAppName("NSFLDA")
val sc = new SparkContext(conf)
Logger.getLogger("org").setLevel(Level.ERROR)
import org.apache.spark.sql.SparkSession
import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel}
import org.apache.spark.mllib.evaluation.{BinaryClassificationMetrics, MulticlassMetrics}
val spark = SparkSession
.builder()
.appName("Spark SQL Example")
.config("spark.some.config.option", "some-value")
.getOrCreate()
val sqlContext = new org.apache.spark.sql.SQLContext(sc)
import org.apache.spark.ml.linalg.{Vectors, VectorUDT, SparseVector, Vector}
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.rdd.RDD
val dir = "/Users/chanjinpark/GitHub/NRFAnalysis/"
val docs = sc.textFile(dir + "data/docs")
val corpus = sc.textFile(dir + "data/corpus").map(_.split(","))
case class MetaData(title: String, mainArea: Array[String], nationArea: Array[String], sixTArea: Array[String]) {
override def toString: String = title + ":::" + mainArea.mkString(",") + ":::" +
nationArea.mkString(",") + ":::" + sixTArea.mkString(",")
}
def getMetadata(s: String) = {
val attr = s.split(":::")
new MetaData(attr(0), attr(1).split(","), attr(2).split(","), attr(3).split(","))
}
val metadata = {
scala.io.Source.fromFile(dir + "data/meta.txt").getLines().map(l => {
val id = l.substring(0, l.indexOf("-"))
val meta = getMetadata(l.substring(l.indexOf("-") + 1))
(id, meta)
}).toMap
}
val crb: Map[String, Int] = metadata.map(_._2.mainArea(1)).toList.distinct.sortWith(_.compare(_) < 0).zipWithIndex.toMap
val classes = crb.map(_.swap)
val vocab = corpus.flatMap(x => x).distinct.collect.zipWithIndex.toMap
val matrix: RDD[Vector] = corpus.map {
case tokens => {
//val counts = new scala.collection.mutable.HashMap[Int, Double]()
val vec = tokens.foldLeft(Map[Int, Double]())((res, t) => {
val vocabid = vocab(t)
res + (vocabid -> (res.getOrElse(vocabid, 0.0) + 1.0))
})
//val (indices, values) = vec.keys
new SparseVector(vocab.size, vec.keys.toArray, vec.values.toArray)
}
}
//val cvModel: CountVectorizerModel = new CountVectorizer().setInputCol("words").setOutputCol("features").setVocabSize(vocab.size).fit(sqlContext.createDataFrame(corpus.zipWithIndex()).toDF("words", "idx"))
def getLabel(m: MetaData): Double = crb(m.mainArea(1)) // CRB 분류
}
| chanjin/NRFAnalysis | src/main/scala/termfiltering/TermFrequency.scala | Scala | apache-2.0 | 2,876 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.execution
import java.util.{ArrayList, Arrays}
import scala.collection.JavaConversions._
import scala.reflect.BeanProperty
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_PARTITION_COLUMNS
import org.apache.hadoop.hive.ql.exec.{TableScanOperator => HiveTableScanOperator}
import org.apache.hadoop.hive.ql.exec.{MapSplitPruning, Utilities}
import org.apache.hadoop.hive.ql.io.HiveInputFormat
import org.apache.hadoop.hive.ql.io.orc.OrcSerde
import org.apache.hadoop.hive.ql.metadata.{Partition, Table}
import org.apache.hadoop.hive.ql.plan.{TableDesc, TableScanDesc}
import org.apache.hadoop.hive.ql.plan.PartitionDesc
import org.apache.hadoop.hive.ql.plan.PlanUtils
import org.apache.hadoop.hive.ql.metadata.{Partition, Table}
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.hadoop.hive.serde2.Serializer
import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
import org.apache.hadoop.hive.serde2.`lazy`.LazyStruct
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory
import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.spark.rdd.{PartitionPruningRDD, RDD}
import shark.{LogHelper, SharkConfVars, SharkEnv}
import shark.execution.optimization.ColumnPruner
import shark.memstore2.CacheType
import shark.memstore2.CacheType._
import shark.memstore2.{ColumnarSerDe, MemoryMetadataManager}
import shark.memstore2.{TablePartition, TablePartitionStats}
import shark.util.HiveUtils
/**
* The TableScanOperator is used for scanning any type of Shark or Hive table.
*/
class TableScanOperator extends TopOperator[TableScanDesc] {
// TODO(harvey): Try to use 'TableDesc' for execution and save 'Table' for analysis/planning.
// Decouple `Table` from TableReader and ColumnPruner.
@transient var table: Table = _
@transient var hiveOp: HiveTableScanOperator = _
// Metadata for Hive-partitions (i.e if the table was created from PARTITION BY). NULL if this
// table isn't Hive-partitioned. Set in SparkTask::initializeTableScanTableDesc().
@transient var parts: Array[Partition] = _
// For convenience, a local copy of the HiveConf for this task.
@transient var localHConf: HiveConf = _
// PartitionDescs are used during planning in Hive. This reference to a single PartitionDesc
// is used to initialize partition ObjectInspectors.
// If the table is not Hive-partitioned, then 'firstConfPartDesc' won't be used. The value is not
// NULL, but rather a reference to a "dummy" PartitionDesc, in which only the PartitionDesc's
// 'table' is not NULL.
// Set in SparkTask::initializeTableScanTableDesc().
@BeanProperty var firstConfPartDesc: PartitionDesc = _
@BeanProperty var tableDesc: TableDesc = _
// True if table data is stored the Spark heap.
@BeanProperty var isInMemoryTableScan: Boolean = _
@BeanProperty var cacheMode: CacheType.CacheType = _
override def initializeOnMaster() {
// Create a local copy of the HiveConf that will be assigned job properties and, for disk reads,
// broadcasted to slaves.
localHConf = new HiveConf(super.hconf)
cacheMode = CacheType.fromString(
tableDesc.getProperties().get("shark.cache").asInstanceOf[String])
isInMemoryTableScan = SharkEnv.memoryMetadataManager.containsTable(
table.getDbName, table.getTableName)
}
override def outputObjectInspector() = {
if (parts == null) {
val tableSerDe = if (isInMemoryTableScan || cacheMode == CacheType.TACHYON) {
new ColumnarSerDe
} else {
tableDesc.getDeserializerClass().newInstance()
}
tableSerDe.initialize(hconf, tableDesc.getProperties)
tableSerDe.getObjectInspector()
} else {
val partProps = firstConfPartDesc.getProperties()
val tableSerDe = if (isInMemoryTableScan || cacheMode == CacheType.TACHYON) {
new ColumnarSerDe
} else {
tableDesc.getDeserializerClass().newInstance()
}
tableSerDe.initialize(hconf, tableDesc.getProperties)
HiveUtils.makeUnionOIForPartitionedTable(partProps, tableSerDe)
}
}
override def execute(): RDD[_] = {
assert(parentOperators.size == 0)
val tableNameSplit = tableDesc.getTableName.split('.') // Split from 'databaseName.tableName'
val databaseName = tableNameSplit(0)
val tableName = tableNameSplit(1)
// There are three places we can load the table from.
// 1. Spark heap (block manager), accessed through the Shark MemoryMetadataManager
// 2. Tachyon table
// 3. Hive table on HDFS (or other Hadoop storage)
// TODO(harvey): Pruning Hive-partitioned, cached tables isn't supported yet.
if (isInMemoryTableScan || cacheMode == CacheType.TACHYON) {
if (isInMemoryTableScan) {
assert(cacheMode == CacheType.MEMORY || cacheMode == CacheType.MEMORY_ONLY,
"Table %s.%s is in Shark metastore, but its cacheMode (%s) indicates otherwise".
format(databaseName, tableName, cacheMode))
}
val tableReader = if (cacheMode == CacheType.TACHYON) {
new TachyonTableReader(tableDesc)
} else {
new HeapTableReader(tableDesc)
}
if (table.isPartitioned) {
tableReader.makeRDDForPartitionedTable(parts, Some(createPrunedRdd _))
} else {
tableReader.makeRDDForTable(table, Some(createPrunedRdd _))
}
} else {
// Table is a Hive table on HDFS (or other Hadoop storage).
makeRDDFromHadoop()
}
}
private def createPrunedRdd(
rdd: RDD[_],
indexToStats: collection.Map[Int, TablePartitionStats]): RDD[_] = {
// Run map pruning if the flag is set, there exists a filter predicate on
// the input table and we have statistics on the table.
val columnsUsed = new ColumnPruner(this, table).columnsUsed
if (!table.isPartitioned && cacheMode == CacheType.TACHYON) {
SharkEnv.tachyonUtil.pushDownColumnPruning(rdd, columnsUsed)
}
val shouldPrune = SharkConfVars.getBoolVar(localHConf, SharkConfVars.MAP_PRUNING) &&
childOperators(0).isInstanceOf[FilterOperator] &&
indexToStats.size == rdd.partitions.size
val prunedRdd: RDD[_] = if (shouldPrune) {
val startTime = System.currentTimeMillis
val printPruneDebug = SharkConfVars.getBoolVar(
localHConf, SharkConfVars.MAP_PRUNING_PRINT_DEBUG)
// Must initialize the condition evaluator in FilterOperator to get the
// udfs and object inspectors set.
val filterOp = childOperators(0).asInstanceOf[FilterOperator]
filterOp.initializeOnSlave()
def prunePartitionFunc(index: Int): Boolean = {
if (printPruneDebug) {
logInfo("\\nPartition " + index + "\\n" + indexToStats(index))
}
// Only test for pruning if we have stats on the column.
val partitionStats = indexToStats(index)
if (partitionStats != null && partitionStats.stats != null) {
MapSplitPruning.test(partitionStats, filterOp.conditionEvaluator)
} else {
true
}
}
// Do the pruning.
val prunedRdd = PartitionPruningRDD.create(rdd, prunePartitionFunc)
val timeTaken = System.currentTimeMillis - startTime
logInfo("Map pruning %d partitions into %s partitions took %d ms".format(
rdd.partitions.size, prunedRdd.partitions.size, timeTaken))
prunedRdd
} else {
rdd
}
prunedRdd.mapPartitions { iter =>
if (iter.hasNext) {
val tablePartition1 = iter.next()
val tablePartition = tablePartition1.asInstanceOf[TablePartition]
tablePartition.prunedIterator(columnsUsed)
} else {
Iterator.empty
}
}
}
/**
* Create an RDD for a table stored in Hadoop.
*/
def makeRDDFromHadoop(): RDD[_] = {
// Try to have the InputFormats filter predicates.
TableScanOperator.addFilterExprToConf(localHConf, hiveOp)
val hadoopReader = new HadoopTableReader(tableDesc, localHConf)
if (table.isPartitioned) {
logDebug("Making %d Hive partitions".format(parts.size))
// The returned RDD contains arrays of size two with the elements as
// (deserialized row, column partition value).
return hadoopReader.makeRDDForPartitionedTable(parts)
} else {
// The returned RDD contains deserialized row Objects.
return hadoopReader.makeRDDForTable(table)
}
}
// All RDD processing is done in execute().
override def processPartition(split: Int, iter: Iterator[_]): Iterator[_] =
throw new UnsupportedOperationException("TableScanOperator.processPartition()")
}
object TableScanOperator extends LogHelper {
/**
* Add filter expressions and column metadata to the HiveConf. This is meant to be called on the
* master - it's impractical to add filters during slave-local JobConf creation in HadoopRDD,
* since we would have to serialize the HiveTableScanOperator.
*/
private def addFilterExprToConf(hiveConf: HiveConf, hiveTableScanOp: HiveTableScanOperator) {
// Push down projections for this TableScanOperator to Hadoop JobConf
if (hiveTableScanOp.getNeededColumnIDs() != null) {
ColumnProjectionUtils.appendReadColumnIDs(hiveConf, hiveTableScanOp.getNeededColumnIDs())
} else {
ColumnProjectionUtils.setFullyReadColumns(hiveConf)
}
ColumnProjectionUtils.appendReadColumnNames(hiveConf, hiveTableScanOp.getNeededColumns())
val tableScanDesc = hiveTableScanOp.getConf()
if (tableScanDesc == null) return
val rowSchema = hiveTableScanOp.getSchema
if (rowSchema != null) {
// Add column names to the HiveConf.
val columnNames = new StringBuilder
for (columnInfo <- rowSchema.getSignature()) {
if (columnNames.length > 0) {
columnNames.append(",")
}
columnNames.append(columnInfo.getInternalName())
}
val columnNamesString = columnNames.toString()
hiveConf.set(serdeConstants.LIST_COLUMNS, columnNamesString)
// Add column types to the HiveConf.
val columnTypes = new StringBuilder
for (columnInfo <- rowSchema.getSignature()) {
if (columnTypes.length > 0) {
columnTypes.append(",")
}
columnTypes.append(columnInfo.getType().getTypeName())
}
val columnTypesString = columnTypes.toString()
hiveConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypesString)
}
// Push down predicate filters.
val filterExprNode = tableScanDesc.getFilterExpr()
if (filterExprNode != null) {
val filterText = filterExprNode.getExprString()
hiveConf.set(TableScanDesc.FILTER_TEXT_CONF_STR, filterText)
logDebug("Filter text: " + filterText)
val filterExprNodeSerialized = Utilities.serializeExpression(filterExprNode)
hiveConf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExprNodeSerialized)
logDebug("Filter expression: " + filterExprNodeSerialized)
}
}
}
| stefanvanwouw/puppet-shark | files/shark-0.9.0/src/main/scala/shark/execution/TableScanOperator.scala | Scala | mit | 12,008 |
package org.decaf.distributed
import akka.actor.ActorSystem
package object server {
lazy val ServerActorSystem = ActorSystem("server")
}
| adamdecaf/distributed | server/src/main/scala/package.scala | Scala | apache-2.0 | 140 |
package com.lookout.borderpatrol.session
import com.lookout.borderpatrol.session.secret.InMemorySecretStore
import org.scalatest.{FlatSpec, Matchers}
import com.lookout.borderpatrol.session.id.{Generator => IdGenerator}
class CryptoSpec extends FlatSpec with Matchers {
behavior of "Generator"
it should "create entropy the size of input" in {
Generator(1) should have size 1
}
it should "create new values on apply" in {
val g1 = Generator(2)
val g2 = Generator(2)
g1 should not equal g2
}
behavior of "CryptKey"
implicit val secretStore = new InMemorySecretStore(Secrets.mockSecrets)
val idGenerator = new IdGenerator
val id = idGenerator.next
val bytes = Generator(16)
it should "encrypt a sequence" in {
val cryptKey = CryptKey.apply(id, secretStore.current)
cryptKey.encrypt(bytes) should not equal (bytes)
cryptKey.encrypt(bytes).size should be > bytes.size
}
it should "have the same output as input when decrypting what it encrypted" in {
val cryptKey = CryptKey.apply(id, secretStore.current)
cryptKey.decrypt(cryptKey.encrypt(bytes)) shouldEqual (bytes)
}
}
| rtyler/borderpatrol | borderpatrol-core/src/test/scala/com/lookout/borderpatrol/session/CryptoSpec.scala | Scala | mit | 1,141 |
object Test {
def main(args: Array[String]): Unit = {
val a = 1 + 2 // [break] [step: a * 9]
val b = a * 9 // [step: plus] [step: x * x]
def plus(x: Int, y: Int) = {
val a = x * x // [step: y * y]
val b = y * y // [step: a + b]
a + b // [step: plus]
}
val c = plus(a, b) // [step: print] [cont]
print(c)
}
} | som-snytt/dotty | tests/debug/nested-method.scala | Scala | apache-2.0 | 434 |
package org.jetbrains.plugins.scala
package lang
package completion3
import com.intellij.codeInsight.completion.CompletionType.SMART
import com.intellij.testFramework.EditorTestUtil
/**
* @author Alexander Podkhalyuzin
*/
class ScalaSmartAnonymousFunctionCompletionTest extends ScalaCodeInsightTestBase {
import EditorTestUtil.{CARET_TAG => CARET}
def testAbstractTypeInfoFromFirstClause(): Unit = doCompletionTest(
fileText =
s"""
|def foo[T](x: T)(y: T => Int) = 1
|foo(2)($CARET)
""".stripMargin,
resultText =
s"""
|def foo[T](x: T)(y: T => Int) = 1
|foo(2)((i: Int) => $CARET)
""".stripMargin
)
def testSimpleCaseTest(): Unit = doCompletionTest(
fileText =
s"""
|def foo(x: String => String) = 1
|foo {$CARET}
""".stripMargin,
resultText =
s"""
|def foo(x: String => String) = 1
|foo {case str: String => $CARET}
""".stripMargin
)
def testSimple(): Unit = doCompletionTest(
fileText =
s"""
|def foo(x: String => String) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo(x: String => String) = 1
|foo((str: String) => $CARET)
""".stripMargin
)
def testJustTuple(): Unit = doCompletionTest(
fileText =
s"""
|def foo(x: Tuple2[Int, Int] => Int) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo(x: Tuple2[Int, Int] => Int) = 1
|foo((tuple: (Int, Int)) => $CARET)
""".stripMargin
)
def testCaseTuple(): Unit = doCompletionTest(
fileText =
s"""
|def foo(x: Tuple2[Int, Int] => Int) = 1
|foo{$CARET}
""".stripMargin,
resultText =
s"""
|def foo(x: Tuple2[Int, Int] => Int) = 1
|foo{case (i: Int, i0: Int) => $CARET}
""".stripMargin
)
def testAbstractTypeInfoWithUpper(): Unit = doCompletionTest(
fileText =
s"""
|def foo[T <: Runnable](x: (T, String) => String) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo[T <: Runnable](x: (T, String) => String) = 1
|foo((value: Runnable, str: String) => $CARET)
""".stripMargin
)
def testAbstractTypeInfoWithLower(): Unit = doCompletionTest(
fileText =
s"""
|def foo[T >: Int](x: (T, String) => String) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo[T >: Int](x: (T, String) => String) = 1
|foo((value: Int, str: String) => $CARET)
""".stripMargin
)
def testAbstractTypeInfoTypeParameters(): Unit = doCompletionTest(
fileText =
s"""
|def foo[T <: Runnable](x: T => String) = 1
|class X extends Runnable
|foo[X]($CARET)
""".stripMargin,
resultText =
s"""
|def foo[T <: Runnable](x: T => String) = 1
|class X extends Runnable
|foo[X]((x: X) => $CARET)
""".stripMargin
)
def testFewParams(): Unit = doCompletionTest(
fileText =
s"""
|def foo(c: (Int, Int, Int, Int) => Int) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo(c: (Int, Int, Int, Int) => Int) = 1
|foo((i: Int, i0: Int, i1: Int, i2: Int) => $CARET)
""".stripMargin
)
def testFewParamsDifferent(): Unit = doCompletionTest(
fileText =
s"""
|def foo(x: (Int, String, Int, String) => Int) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo(x: (Int, String, Int, String) => Int) = 1
|foo((i: Int, str: String, i0: Int, str0: String) => $CARET)
""".stripMargin
)
def testAbstractTypeInfo(): Unit = doCompletionTest(
fileText =
s"""
|def foo[T](x: (T, String) => String) = 1
|foo($CARET)
""".stripMargin,
resultText =
s"""
|def foo[T](x: (T, String) => String) = 1
|foo((value: T, str: String) => $CARET)
""".stripMargin
)
def testAliasType(): Unit = doCompletionTest(
fileText =
s"""
|type T = Int => String
|def zoo(p: T) {}
|zoo($CARET)
""".stripMargin,
resultText =
s"""
|type T = Int => String
|def zoo(p: T) {}
|zoo((i: Int) => $CARET)
""".stripMargin
)
private def doCompletionTest(fileText: String, resultText: String): Unit =
doCompletionTest(fileText, resultText, item = "", time = 2, completionType = SMART)
} | loskutov/intellij-scala | test/org/jetbrains/plugins/scala/lang/completion3/ScalaSmartAnonymousFunctionCompletionTest.scala | Scala | apache-2.0 | 4,628 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.types.StringType
/**
* A command for users to list the databases/schemas.
* If a databasePattern is supplied then the databases that only match the
* pattern would be listed.
* The syntax of using this command in SQL is:
* {{{
* SHOW (DATABASES|SCHEMAS) [LIKE 'identifier_with_wildcards'];
* }}}
*/
case class ShowDatabasesCommand(databasePattern: Option[String]) extends RunnableCommand {
// The result of SHOW DATABASES has one column called 'databaseName'
override val output: Seq[Attribute] = {
AttributeReference("databaseName", StringType, nullable = false)() :: Nil
}
override def run(sparkSession: SparkSession): Seq[Row] = {
val catalog = sparkSession.sessionState.catalog
val databases =
databasePattern.map(catalog.listDatabases).getOrElse(catalog.listDatabases())
databases.map { d => Row(d) }
}
}
/**
* Command for setting the current database.
* {{{
* USE database_name;
* }}}
*/
case class SetDatabaseCommand(databaseName: String) extends RunnableCommand {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.sessionState.catalog.setCurrentDatabase(databaseName)
Seq.empty[Row]
}
}
| wangyixiaohuihui/spark2-annotation | sql/core/src/main/scala/org/apache/spark/sql/execution/command/databases.scala | Scala | apache-2.0 | 2,254 |
package com.tomogle.fpinscala
import org.scalatest.{FlatSpec, Matchers}
class GettingStartedTest extends FlatSpec with Matchers {
behavior of "fib"
import GettingStarted.fib
it should "give 0 for 1st element" in {
fib(1) shouldBe 0
}
it should "give 1 for 2nd element" in {
fib(2) shouldBe 1
}
it should "give 1 for 3rd element" in {
fib(3) shouldBe 1
}
it should "give 2 for 4th element" in {
fib(4) shouldBe 2
}
it should "give 3 for 5th element" in {
fib(5) shouldBe 3
}
it should "give 5 for 6th element" in {
fib(6) shouldBe 5
}
behavior of "isSorted"
import GettingStarted.isSorted
it should "give true for a sorted Int array" in {
isSorted(Array(1,2,3), (a: Int, b: Int) => a < b) shouldBe true
}
it should "give false for an Int array with out of order element in middle" in {
isSorted(Array(1,2,1,3), (a: Int, b: Int) => a < b) shouldBe false
}
it should "give false for an Int array with out of order element at start" in {
isSorted(Array(2,1,3), (a: Int, b: Int) => a < b) shouldBe false
}
it should "give false for an Int array with out of order element at end" in {
isSorted(Array(1,2,3,4,5,6,7,2), (a: Int, b: Int) => a < b) shouldBe false
}
behavior of "curry"
import GettingStarted.curry
it should "give the same behavior when curried as uncurried" in {
val f = (a: Int, b: Int) => a * b
val curriedF: (Int) => (Int) => Int = curry(f)
val fResult = f(1,3)
val curriedFResult = curriedF(1)(3)
fResult should equal(curriedFResult)
}
behavior of "unCurry"
import GettingStarted.unCurry
it should "give the same behavior when curried as uncurried" in {
val f = (a: Int) => (b: Int) => a * b
val unCurriedF = unCurry(f)
val fResult = f(1)(8)
val unCurriedFResult = unCurriedF(1, 8)
fResult should equal(unCurriedFResult)
}
behavior of "compose"
import GettingStarted.compose
it should "give the same behavior when composed as when called as f(g())" in {
val f = (b: Int) => b.toString
val g = (a: Int) => a * 3
val composeResult = compose(f, g)(2)
val fgResult = f(g(2))
composeResult should equal(fgResult)
}
}
| tom-ogle/scala-scratch-code | src/test/scala/com/tomogle/fpinscala/GettingStartedTest.scala | Scala | mit | 2,218 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.stats.message
import io.gatling.BaseSpec
import io.gatling.commons.stats.{ KO, OK, Status }
import org.scalacheck.Gen.alphaStr
class StatusSpec extends BaseSpec {
"Status.apply" should "return OK when passing 'OK'" in {
Status("OK") shouldBe OK
}
it should "return OK when passing 'KO'" in {
Status("KO") shouldBe KO
}
it should "throw an IllegalArgumentException on any other string" in {
forAll(alphaStr.suchThat(s => s != "OK" && s != "KO")) { string =>
an[IllegalArgumentException] should be thrownBy Status(string)
}
}
}
| gatling/gatling | gatling-core/src/test/scala/io/gatling/core/stats/message/StatusSpec.scala | Scala | apache-2.0 | 1,209 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.akka.typed.oo
package multi
import javax.inject.Inject
import javax.inject.Provider
import akka.actor.ActorSystem
import akka.actor.typed.scaladsl.adapter._
import akka.actor.typed.ActorRef
import com.google.inject.AbstractModule
import com.google.inject.TypeLiteral
import com.google.inject.name.Names
import play.api.Configuration
import play.api.libs.concurrent.AkkaGuiceSupport
object AppModule extends AbstractModule with AkkaGuiceSupport {
override def configure() = {
def bindHelloActor(name: String) = {
bind(new TypeLiteral[ActorRef[HelloActor.SayHello]]() {})
.annotatedWith(Names.named(name))
.toProvider(new Provider[ActorRef[HelloActor.SayHello]] {
@Inject var actorSystem: ActorSystem = _
def get() = actorSystem.spawn(HelloActor.create(), name)
})
.asEagerSingleton()
}
def bindConfiguredActor(name: String) = {
bind(new TypeLiteral[ActorRef[ConfiguredActor.GetConfig]]() {})
.annotatedWith(Names.named(name))
.toProvider(new Provider[ActorRef[ConfiguredActor.GetConfig]] {
@Inject var actorSystem: ActorSystem = _
@Inject var configuration: Configuration = _
def get() = actorSystem.spawn(ConfiguredActor.create(configuration), name)
})
.asEagerSingleton()
}
bindHelloActor("hello-actor1")
bindHelloActor("hello-actor2")
bindConfiguredActor("configured-actor1")
bindConfiguredActor("configured-actor2")
}
}
| benmccann/playframework | documentation/manual/working/commonGuide/akka/code/scalaguide/akka/typed/oo/multi/AppModule.scala | Scala | apache-2.0 | 1,580 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
package scala.collection
package mutable
/** This class can be used as an adaptor to create mutable sets from
* immutable set implementations. Only method `empty` has
* to be redefined if the immutable set on which this mutable set is
* originally based is not empty. `empty` is supposed to
* return the representation of an empty set.
*
* @author Matthias Zenger
* @version 1.0, 21/07/2003
* @since 1
*/
@serializable
class ImmutableSetAdaptor[A](protected var set: immutable.Set[A]) extends Set[A] {
override def size: Int = set.size
override def isEmpty: Boolean = set.isEmpty
def contains(elem: A): Boolean = set.contains(elem)
override def foreach[U](f: A => U): Unit = set.foreach(f)
override def exists(p: A => Boolean): Boolean = set.exists(p)
override def toList: List[A] = set.toList
override def toString = set.toString
def iterator: Iterator[A] = set.iterator
@deprecated("use `iterator' instead") override def elements: Iterator[A] = iterator
def +=(elem: A): this.type = { set = set + elem; this }
def -=(elem: A): this.type = { set = set - elem; this }
override def clear(): Unit = { set = set.empty }
}
| cran/rkafkajars | java/scala/collection/mutable/ImmutableSetAdaptor.scala | Scala | apache-2.0 | 1,707 |
/*
* Copyright 2014 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.netty
import java.util.concurrent.TimeoutException
import scala.concurrent.{CanAwait, ExecutionContext, Future}
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success, Try}
import io.netty.util.concurrent.{Future => NettyFuture, GenericFutureListener}
object NettyFutureWrapper {
private class Listener[T, U](func: Try[T] => U) extends GenericFutureListener[NettyFuture[T]] {
def operationComplete(f: NettyFuture[T]): Unit = {
if (f.isSuccess()) func(Success(f.get)) else func(Failure(f.cause))
}
}
}
/**
* Wraps Netty's Future as a Scala Future while ensuring that the ExecutionContext
* is Netty's EventLoopGroup
*/
final case class NettyFutureWrapper[T](f: NettyFuture[T]) extends Future[T] {
def isCompleted: Boolean = f.isDone
def onComplete[U](func: Try[T] => U)(implicit executor: ExecutionContext): Unit = {
// Note: calling flatMap will trigger this exception since it uses some Internal Executor in the Future class. Comment out if you need to use flatMap
//require(executor.isInstanceOf[NettyExecutionContext], "Expected the ExecutionContext to be a NettyExecutionContext but got: "+executor)
f.addListener(new NettyFutureWrapper.Listener(func))
}
def ready(atMost: Duration)(implicit permit: CanAwait): this.type = atMost match {
case Duration.Undefined => throw new IllegalArgumentException("Cannot use Duration.Undefined")
case Duration.Inf => f.await(); this
case _ => if (f.await(atMost.length, atMost.unit)) this else throw new TimeoutException()
}
def result(atMost: Duration)(implicit permit: CanAwait): T = {
ready(atMost)
assert(f.isDone)
if (f.isSuccess) f.get else throw f.cause
}
def value: Option[Try[T]] = if (f.isDone) Some(if (f.isSuccess) Success(f.get) else Failure(f.cause)) else None
// TODO: implement (new for Scala 2.12)
def transform[S](f: Try[T] => Try[S])(implicit executor: ExecutionContext): Future[S] = ???
// TODO: implement (new for Scala 2.12)
def transformWith[S](f: Try[T] => Future[S])(implicit executor: ExecutionContext): Future[S] = ???
} | er1c/fm-http | src/main/scala/fm/netty/NettyFutureWrapper.scala | Scala | apache-2.0 | 2,752 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.oap.utils
import org.apache.hadoop.conf.Configuration
import org.apache.parquet.hadoop.ParquetInputFormat
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
class FilterHelperSuite extends SparkFunSuite {
val conf = SQLConf.get
test("Pushed And Set") {
val requiredSchema = new StructType()
.add(StructField("a", IntegerType))
.add(StructField("b", StringType))
val filters = Seq(GreaterThan("a", 1), EqualTo("b", "2"))
val expected = s"""and(gt(a, 1), eq(b, Binary{"2"}))"""
conf.setConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED, true)
val pushed = FilterHelper.tryToPushFilters(conf, requiredSchema, filters)
assert(pushed.isDefined)
assert(pushed.get.toString.equals(expected))
val config = new Configuration()
FilterHelper.setFilterIfExist(config, pushed)
val humanReadable = config.get(ParquetInputFormat.FILTER_PREDICATE + ".human.readable")
assert(humanReadable.nonEmpty)
assert(humanReadable.equals(expected))
}
test("Not Pushed") {
val requiredSchema = new StructType()
.add(StructField("a", IntegerType))
.add(StructField("b", StringType))
val filters = Seq(GreaterThan("a", 1), EqualTo("b", "2"))
conf.setConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED, false)
val pushed = FilterHelper.tryToPushFilters(conf, requiredSchema, filters)
assert(pushed.isEmpty)
val config = new Configuration()
FilterHelper.setFilterIfExist(config, pushed)
assert(config.get(ParquetInputFormat.FILTER_PREDICATE) == null)
assert(config.get(ParquetInputFormat.FILTER_PREDICATE + ".human.readable") == null)
}
}
| Intel-bigdata/OAP | oap-cache/oap/src/test/scala/org/apache/spark/sql/execution/datasources/oap/utils/FilterHelperSuite.scala | Scala | apache-2.0 | 2,578 |
import org.scalatest._
import Matchers._
class SimpleTest extends FunSuite {
def sum(a: Int, b: Int): Int = a + b
test("sum works for 1 + 2 = 3") {
sum(1, 2) shouldEqual 3
}
}
| malancas/CUNY-CoLAG-Sims-2016-17 | Yang-weighted-model/src/test/scala/SimpleTest.scala | Scala | gpl-3.0 | 186 |
/*
* Copyright 2013 Stephane Godbillon (@sgodbillon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.bson.buffer
import reactivemongo.bson._
import scala.util.{ Failure, Success, Try }
trait BufferHandler {
def serialize(bson: BSONValue, buffer: WritableBuffer): WritableBuffer
def deserialize(buffer: ReadableBuffer): Try[(String, BSONValue)]
def write(buffer: WritableBuffer, document: BSONDocument) = {
serialize(document, buffer)
}
def write(buffer: WritableBuffer, arr: BSONArray) = {
serialize(arr, buffer)
}
def readDocument(buffer: ReadableBuffer): Try[BSONDocument]
def writeDocument(document: BSONDocument, buffer: WritableBuffer): WritableBuffer
def stream(buffer: ReadableBuffer): Stream[(String, BSONValue)] = {
val elem = deserialize(buffer)
if (elem.isSuccess)
elem.get #:: stream(buffer)
else Stream.empty
}
}
object DefaultBufferHandler extends BufferHandler {
sealed trait BufferWriter[B <: BSONValue] {
def write(value: B, buffer: WritableBuffer): WritableBuffer
}
sealed trait BufferReader[B <: BSONValue] {
def read(buffer: ReadableBuffer): B
}
sealed trait BufferRW[B <: BSONValue] extends BufferWriter[B] with BufferReader[B]
val handlersByCode: Map[Byte, BufferRW[_ <: BSONValue]] = Map(
0x01.toByte -> BSONDoubleBufferHandler,
0x02.toByte -> BSONStringBufferHandler,
0x03.toByte -> BSONDocumentBufferHandler,
0x04.toByte -> BSONArrayBufferHandler, // array
0x05.toByte -> BSONBinaryBufferHandler, // binary TODO
0x06.toByte -> BSONUndefinedBufferHandler, // undefined,
0x07.toByte -> BSONObjectIDBufferHandler, // objectid,
0x08.toByte -> BSONBooleanBufferHandler, // boolean
0x09.toByte -> BSONDateTimeBufferHandler, // datetime
0x0A.toByte -> BSONNullBufferHandler, // null
0x0B.toByte -> BSONRegexBufferHandler, // regex
0x0C.toByte -> BSONDBPointerBufferHandler, // dbpointer
0x0D.toByte -> BSONJavaScriptBufferHandler, // JS
0x0E.toByte -> BSONSymbolBufferHandler, // symbol
0x0F.toByte -> BSONJavaScriptWSBufferHandler, // JS with scope
0x10.toByte -> BSONIntegerBufferHandler,
0x11.toByte -> BSONTimestampBufferHandler, // timestamp,
0x12.toByte -> BSONLongBufferHandler, // long,
0xFF.toByte -> BSONMinKeyBufferHandler, // min
0x7F.toByte -> BSONMaxKeyBufferHandler) // max
object BSONDoubleBufferHandler extends BufferRW[BSONDouble] {
def write(value: BSONDouble, buffer: WritableBuffer): WritableBuffer = buffer.writeDouble(value.value)
def read(buffer: ReadableBuffer): BSONDouble = BSONDouble(buffer.readDouble)
}
object BSONStringBufferHandler extends BufferRW[BSONString] {
def write(value: BSONString, buffer: WritableBuffer): WritableBuffer = buffer.writeString(value.value)
def read(buffer: ReadableBuffer): BSONString = BSONString(buffer.readString)
}
object BSONDocumentBufferHandler extends BufferRW[BSONDocument] {
def write(doc: BSONDocument, buffer: WritableBuffer) = {
val now = buffer.index
buffer.writeInt(0)
doc.elements.foreach { e =>
buffer.writeByte(e._2.code.toByte)
buffer.writeCString(e._1)
serialize(e._2, buffer)
}
buffer.setInt(now, (buffer.index - now + 1))
buffer.writeByte(0)
buffer
}
def read(b: ReadableBuffer) = {
val startIndex = b.index
val length = b.readInt
val buffer = b.slice(length - 4)
b.discard(length - 4)
def makeStream(): Stream[Try[(String, BSONValue)]] = {
if (buffer.readable > 1) { // last is 0
val code = buffer.readByte
val name = buffer.readCString
val elem = Try(name -> DefaultBufferHandler.handlersByCode.get(code).map(_.read(buffer)).get)
elem #:: makeStream
} else Stream.empty
}
val stream = makeStream
stream.force // TODO remove
new BSONDocument(stream)
}
}
object BSONArrayBufferHandler extends BufferRW[BSONArray] {
def write(array: BSONArray, buffer: WritableBuffer) = {
val now = buffer.index
buffer.writeInt(0)
array.values.zipWithIndex.foreach { e =>
buffer.writeByte(e._1.code.toByte)
buffer.writeCString(e._2.toString)
serialize(e._1, buffer)
}
buffer.setInt(now, (buffer.index - now + 1))
buffer.writeByte(0)
buffer
}
def read(b: ReadableBuffer) = {
val startIndex = b.index
val length = b.readInt
val buffer = b.slice(length - 4)
b.discard(length - 4)
def makeStream(): Stream[Try[BSONValue]] = {
if (buffer.readable > 1) { // last is 0
val code = buffer.readByte
val name = buffer.readCString
val elem = Try(DefaultBufferHandler.handlersByCode.get(code).map(_.read(buffer)).get)
elem #:: makeStream
} else Stream.empty
}
val stream = makeStream
stream.force // TODO remove
new BSONArray(stream)
}
}
object BSONBinaryBufferHandler extends BufferRW[BSONBinary] {
def write(binary: BSONBinary, buffer: WritableBuffer) = {
buffer.writeInt(binary.value.readable)
buffer.writeByte(binary.subtype.value.toByte)
val bin = binary.value.slice(binary.value.readable)
buffer.writeBytes(bin.readArray(bin.readable)) // TODO
buffer
}
def read(buffer: ReadableBuffer) = {
val length = buffer.readInt
val subtype = Subtype.apply(buffer.readByte)
val bin = buffer.slice(length)
buffer.discard(length)
BSONBinary(bin, subtype)
}
}
object BSONUndefinedBufferHandler extends BufferRW[BSONUndefined.type] {
def write(undefined: BSONUndefined.type, buffer: WritableBuffer) = buffer
def read(buffer: ReadableBuffer) = BSONUndefined
}
object BSONObjectIDBufferHandler extends BufferRW[BSONObjectID] {
def write(objectId: BSONObjectID, buffer: WritableBuffer) = buffer writeBytes objectId.value
def read(buffer: ReadableBuffer) = BSONObjectID(buffer.readArray(12))
}
object BSONBooleanBufferHandler extends BufferRW[BSONBoolean] {
def write(boolean: BSONBoolean, buffer: WritableBuffer) = buffer writeByte (if (boolean.value) 1 else 0)
def read(buffer: ReadableBuffer) = BSONBoolean(buffer.readByte == 0x01)
}
object BSONDateTimeBufferHandler extends BufferRW[BSONDateTime] {
def write(dateTime: BSONDateTime, buffer: WritableBuffer) = buffer writeLong dateTime.value
def read(buffer: ReadableBuffer) = BSONDateTime(buffer.readLong)
}
object BSONNullBufferHandler extends BufferRW[BSONNull.type] {
def write(`null`: BSONNull.type, buffer: WritableBuffer) = buffer
def read(buffer: ReadableBuffer) = BSONNull
}
object BSONRegexBufferHandler extends BufferRW[BSONRegex] {
def write(regex: BSONRegex, buffer: WritableBuffer) = { buffer writeCString regex.value; buffer writeCString regex.flags }
def read(buffer: ReadableBuffer) = BSONRegex(buffer.readCString, buffer.readCString)
}
object BSONDBPointerBufferHandler extends BufferRW[BSONDBPointer] {
def write(pointer: BSONDBPointer, buffer: WritableBuffer) = buffer // TODO
def read(buffer: ReadableBuffer) = BSONDBPointer(buffer.readCString, buffer.readArray(12))
}
object BSONJavaScriptBufferHandler extends BufferRW[BSONJavaScript] {
def write(js: BSONJavaScript, buffer: WritableBuffer) = buffer writeString js.value
def read(buffer: ReadableBuffer) = BSONJavaScript(buffer.readString)
}
object BSONSymbolBufferHandler extends BufferRW[BSONSymbol] {
def write(symbol: BSONSymbol, buffer: WritableBuffer) = buffer writeString symbol.value
def read(buffer: ReadableBuffer) = BSONSymbol(buffer.readString)
}
object BSONJavaScriptWSBufferHandler extends BufferRW[BSONJavaScriptWS] {
def write(jsws: BSONJavaScriptWS, buffer: WritableBuffer) = buffer writeString jsws.value
def read(buffer: ReadableBuffer) = BSONJavaScriptWS(buffer.readString)
}
object BSONIntegerBufferHandler extends BufferRW[BSONInteger] {
def write(value: BSONInteger, buffer: WritableBuffer) = buffer writeInt value.value
def read(buffer: ReadableBuffer): BSONInteger = BSONInteger(buffer.readInt)
}
object BSONTimestampBufferHandler extends BufferRW[BSONTimestamp] {
def write(ts: BSONTimestamp, buffer: WritableBuffer) = buffer writeLong ts.value
def read(buffer: ReadableBuffer) = BSONTimestamp(buffer.readLong)
}
object BSONLongBufferHandler extends BufferRW[BSONLong] {
def write(long: BSONLong, buffer: WritableBuffer) = buffer writeLong long.value
def read(buffer: ReadableBuffer) = BSONLong(buffer.readLong)
}
object BSONMinKeyBufferHandler extends BufferRW[BSONMinKey.type] {
def write(b: BSONMinKey.type, buffer: WritableBuffer) = buffer
def read(buffer: ReadableBuffer) = BSONMinKey
}
object BSONMaxKeyBufferHandler extends BufferRW[BSONMaxKey.type] {
def write(b: BSONMaxKey.type, buffer: WritableBuffer) = buffer
def read(buffer: ReadableBuffer) = BSONMaxKey
}
def serialize(bson: BSONValue, buffer: WritableBuffer): WritableBuffer = {
handlersByCode.get(bson.code).get.asInstanceOf[BufferRW[BSONValue]].write(bson, buffer)
}
def deserialize(buffer: ReadableBuffer): Try[(String, BSONValue)] = Try {
if (buffer.readable > 0) {
val code = buffer.readByte
buffer.readString -> handlersByCode.get(code).map(_.read(buffer)).get
} else throw new NoSuchElementException("buffer can not be read, end of buffer reached")
}
def readDocument(buffer: ReadableBuffer): Try[BSONDocument] = Try {
BSONDocumentBufferHandler.read(buffer)
}
def writeDocument(document: BSONDocument, buffer: WritableBuffer): WritableBuffer =
serialize(document, buffer)
}
sealed trait BSONIterator extends Iterator[BSONElement] {
val buffer: ReadableBuffer
val startIndex = buffer.index
val documentSize = buffer.readInt
def next: BSONElement = {
val code = buffer.readByte
buffer.readString -> DefaultBufferHandler.handlersByCode.get(code).map(_.read(buffer)).get
}
def hasNext = buffer.index - startIndex + 1 < documentSize
def mapped: Map[String, BSONElement] = {
for (el <- this) yield (el._1, el)
}.toMap
}
object BSONIterator {
private[bson] def pretty(i: Int, it: Iterator[Try[BSONElement]]): String = {
val prefix = (0 to i).map { i => " " }.mkString("")
(for (tryElem <- it) yield {
tryElem match {
case Success(elem) => elem._2 match {
case doc: BSONDocument => prefix + elem._1 + ": {\\n" + pretty(i + 1, doc.stream.iterator) + "\\n" + prefix + "}"
case array: BSONArray => prefix + elem._1 + ": [\\n" + pretty(i + 1, array.iterator) + "\\n" + prefix + "]"
case _ => prefix + elem._1 + ": " + elem._2.toString
}
case Failure(e) => prefix + "ERROR[" + e.getMessage() + "]"
}
}).mkString(",\\n")
}
/** Makes a pretty String representation of the given iterator of BSON elements. */
def pretty(it: Iterator[Try[BSONElement]]): String = "{\\n" + pretty(0, it) + "\\n}"
}
| sgodbillon/reactive-bson-2.9 | src/main/scala/bufferhandlers.scala | Scala | apache-2.0 | 11,593 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.log.LogConfig
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.{TopicPartition, Uuid}
import org.apache.kafka.common.requests.FetchRequest.PartitionData
import org.apache.kafka.common.requests.{FetchRequest, FetchResponse}
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo}
import java.util.{Optional, Properties}
import scala.jdk.CollectionConverters._
/**
* This test verifies that the KIP-541 broker-level FetchMaxBytes configuration is honored.
*/
class FetchRequestMaxBytesTest extends BaseRequestTest {
override def brokerCount: Int = 1
private var producer: KafkaProducer[Array[Byte], Array[Byte]] = null
private val testTopic = "testTopic"
private val testTopicPartition = new TopicPartition(testTopic, 0)
private val messages = IndexedSeq(
multiByteArray(1),
multiByteArray(500),
multiByteArray(1040),
multiByteArray(500),
multiByteArray(50))
private def multiByteArray(length: Int): Array[Byte] = {
val array = new Array[Byte](length)
array.indices.foreach(i => array(i) = (i % 5).toByte)
array
}
private def oneByteArray(value: Byte): Array[Byte] = {
val array = new Array[Byte](1)
array(0) = value
array
}
@BeforeEach
override def setUp(testInfo: TestInfo): Unit = {
super.setUp(testInfo)
producer = TestUtils.createProducer(bootstrapServers())
}
@AfterEach
override def tearDown(): Unit = {
if (producer != null)
producer.close()
super.tearDown()
}
override protected def brokerPropertyOverrides(properties: Properties): Unit = {
super.brokerPropertyOverrides(properties)
properties.put(KafkaConfig.FetchMaxBytes, "1024")
}
private def createTopics(): Unit = {
val topicConfig = new Properties
topicConfig.setProperty(LogConfig.MinInSyncReplicasProp, 1.toString)
createTopic(testTopic,
numPartitions = 1,
replicationFactor = 1,
topicConfig = topicConfig)
// Produce several messages as single batches.
messages.indices.foreach(i => {
val record = new ProducerRecord(testTopic, 0, oneByteArray(i.toByte), messages(i))
val future = producer.send(record)
producer.flush()
future.get()
})
}
private def sendFetchRequest(leaderId: Int, request: FetchRequest): FetchResponse = {
connectAndReceive[FetchResponse](request, destination = brokerSocketServer(leaderId))
}
/**
* Tests that each of our fetch requests respects FetchMaxBytes.
*
* Note that when a single batch is larger than FetchMaxBytes, it will be
* returned in full even if this is larger than FetchMaxBytes. See KIP-74.
*/
@Test
def testConsumeMultipleRecords(): Unit = {
createTopics()
expectNextRecords(IndexedSeq(messages(0), messages(1)), 0)
expectNextRecords(IndexedSeq(messages(2)), 2)
expectNextRecords(IndexedSeq(messages(3), messages(4)), 3)
}
private def expectNextRecords(expected: IndexedSeq[Array[Byte]],
fetchOffset: Long): Unit = {
val response = sendFetchRequest(0,
FetchRequest.Builder.forConsumer(3, Int.MaxValue, 0,
Map(testTopicPartition ->
new PartitionData(Uuid.ZERO_UUID, fetchOffset, 0, Integer.MAX_VALUE, Optional.empty())).asJava).build(3))
val records = FetchResponse.recordsOrFail(response.responseData(getTopicNames().asJava, 3).get(testTopicPartition)).records()
assertNotNull(records)
val recordsList = records.asScala.toList
assertEquals(expected.size, recordsList.size)
recordsList.zipWithIndex.foreach {
case (record, i) => {
val buffer = record.value().duplicate()
val array = new Array[Byte](buffer.remaining())
buffer.get(array)
assertArrayEquals(expected(i),
array, s"expectNextRecords unexpected element ${i}")
}
}
}
}
| TiVo/kafka | core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala | Scala | apache-2.0 | 4,822 |
/*
* Copyright 2017 Alexey Kuzin <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package choiceroulette.gui
import choiceroulette.configuration.ConfigurationManager
import choiceroulette.gui.utils.FxUtils
import scaldi.Injectable._
import scaldi.Injector
import scalafx.Includes.handle
import scalafx.scene.paint.Color
import scalafx.stage.StageStyle
/** Full application stage showing FullMainPane.
*
* @author Alexey Kuzin <[email protected]>
*/
class FullStage(splash: Option[Splash], configManager: ConfigurationManager)
(override implicit val injector: Injector)
extends ApplicationStage(splash, configManager, inject [MainPane]('FullMainPane)) {
initStyle(StageStyle.Decorated)
minWidth = 840
minHeight = 700
mainPane.background = FxUtils.backgroundColor(Color.web("#212323"))
width = configManager.getDouble(GuiConfigs.windowWidthConfigKey, minWidth)
height = configManager.getDouble(GuiConfigs.windowHeightConfigKey, minHeight)
width.onChange(configManager.setDouble(GuiConfigs.windowWidthConfigKey, width.value))
height.onChange(configManager.setDouble(GuiConfigs.windowHeightConfigKey, height.value))
onCloseRequest = handle(configManager.onExit())
}
| leviathan941/choiceroulette | guiapp/src/main/scala/choiceroulette/gui/FullStage.scala | Scala | apache-2.0 | 1,757 |
package chess
package format.pgn
class TagTest extends ChessTest {
"Tags" should {
// http://www.saremba.de/chessgml/standards/pgn/pgn-complete.htm#c8.1.1
"be sorted" in {
Tags(
List(
Tag(Tag.Site, "https://lichess.org/QuCzSfxw"),
Tag(Tag.Round, "-"),
Tag(Tag.Date, "2018.05.04"),
Tag(Tag.Black, "penguingim1"),
Tag(Tag.White, "DrDrunkenstein"),
Tag(Tag.Result, "1-0"),
Tag(Tag.UTCDate, "2018.05.04"),
Tag(Tag.UTCTime, "20:59:23"),
Tag(Tag.WhiteElo, "2870"),
Tag(Tag.BlackElo, "2862"),
Tag(Tag.WhiteRatingDiff, "+12"),
Tag(Tag.BlackRatingDiff, "-7"),
Tag(Tag.Event, "Titled Arena 5")
)
).sorted.value.map(_.name) must_== List(
Tag.Event,
Tag.Site,
Tag.Date,
Tag.Round,
Tag.White,
Tag.Black,
Tag.Result,
Tag.UTCDate,
Tag.UTCTime,
Tag.WhiteElo,
Tag.BlackElo,
Tag.WhiteRatingDiff,
Tag.BlackRatingDiff
)
}
}
}
| ornicar/scalachess | src/test/scala/format/pgn/TagTest.scala | Scala | mit | 1,092 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v3
import org.joda.time.LocalDate
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600j.v3.retriever.CT600JBoxRetriever
case class J30A(value: Option[LocalDate]) extends SchemeDateBox{
override def validate(boxRetriever: CT600JBoxRetriever): Set[CtValidation] =
validateSchemeDate(boxRetriever.j25(), boxRetriever.j25A(), boxRetriever.j30())
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600j/v3/J30A.scala | Scala | apache-2.0 | 992 |
package services
import java.io._
import java.util.Properties
import javax.inject.{Inject, Singleton}
import javax.mail.Session
import javax.mail.internet.{InternetAddress, MimeMessage}
import conf.AppConf
import common._
import com.google.api.client.util.Base64
import com.google.api.services.gmail.model._
import com.google.api.services.gmail.{Gmail, GmailRequest}
import scala.collection.JavaConverters._
import scala.concurrent._
import scala.language.postfixOps
@Singleton
class GmailClient @Inject()(implicit exec: ExecutionContext, appConf: AppConf, googleAuthorization: GoogleAuthorization, gmailThrottlerClient: GmailThrottlerClient) {
private def seq[R <: GmailRequest[T], T](requests: List[R], mapper: R => Future[T]): Future[List[T]] = {
val results = requests.map(mapper(_))
Future.sequence(results)
}
def watch(userId: String, labelIds: List[String]): Future[WatchResponse] = {
for {
gmail <- googleAuthorization.getService(userId)
watchResponse <- callWatch(userId, gmail, labelIds)
} yield watchResponse
}
private def callWatch(userId: String, gmail: Gmail, labelIds: List[String]): Future[WatchResponse] = {
val request = gmail.users.watch(userId, new WatchRequest().setTopicName(appConf.getGmailTopic).setLabelIds(labelIds.asJava))
gmailThrottlerClient.scheduleWatch(userId, request)
}
def listLabels(userId: String): Future[List[Label]] =
for {
gmail <- googleAuthorization.getService(userId)
label <- listLabels(userId, gmail)
} yield label
private def listLabels(userId: String, gmail: Gmail): Future[List[Label]] = {
val request = gmail.users().labels().list(userId)
gmailThrottlerClient.scheduleListLabels(userId, request).map { response =>
val labels = response.getLabels
Option(labels).fold[List[Label]](List())(_.asScala.toList)
}
}
def createLabel(userId: String, labelName: String): Future[Label] =
for {
gmail <- googleAuthorization.getService(userId)
label <- createLabel(userId, gmail, labelName)
} yield label
private def createLabel(userId: String, gmail: Gmail, labelName: String): Future[Label] = {
val label = new Label().setName(labelName).setLabelListVisibility("labelShow").setMessageListVisibility("show")
val request = gmail.users().labels().create(userId, label)
gmailThrottlerClient.scheduleCreateLabel(userId, request)
}
def addLabels(userId: String, query: String, labelIds: List[String]): Future[List[Thread]] =
for {
gmail <- googleAuthorization.getService(userId)
threads <- listThreads(userId, query)
messages <- addLabels(userId, gmail, threads, labelIds)
} yield messages
private def addLabels(userId: String, gmail: Gmail, threads: List[Thread], labelIds: List[String]): Future[List[Thread]] = {
val modifyThreadRequest = new ModifyThreadRequest().setAddLabelIds(labelIds.asJava)
val requests = threads.map(pm => gmail.users().threads().modify(userId, pm.getId, modifyThreadRequest))
seq[Gmail#Users#Threads#Modify, Thread](requests, gmailThrottlerClient.scheduleModifyThread(userId, _))
}
def removeLabels(userId: String, query: String, labelIds: List[String]): Future[List[Thread]] =
for {
gmail <- googleAuthorization.getService(userId)
threads <- listThreads(userId, query)
messages <- removeLabels(userId, gmail, threads, labelIds)
} yield messages
private def removeLabels(userId: String, gmail: Gmail, threads: List[Thread], labelIds: List[String]): Future[List[Thread]] = {
val modifyThreadRequest = new ModifyThreadRequest().setRemoveLabelIds(labelIds.asJava)
val requests = threads.map(pm => gmail.users().threads().modify(userId, pm.getId, modifyThreadRequest))
seq[Gmail#Users#Threads#Modify, Thread](requests, gmailThrottlerClient.scheduleModifyThread(userId, _))
}
def deleteLabel(userId: String, labelId: String): Future[Unit] =
for {
gmail <- googleAuthorization.getService(userId)
result <- deleteLabel(userId, gmail, labelId)
} yield result
private def deleteLabel(userId: String, gmail: Gmail, labelId: String): Future[Unit] = {
val request = gmail.users().labels().delete(userId, labelId)
gmailThrottlerClient.scheduleDeleteLabel(userId, request)
}
def listThreads(userId: String, query: String): Future[List[Thread]] =
for {
gmail <- googleAuthorization.getService(userId)
request = gmail.users.threads.list(userId).setQ(query)
threads <- listThreads(userId, gmail, query)
} yield threads
private def listThreads(userId: String, gmail: Gmail, query: String): Future[List[Thread]] = {
val request = gmail.users.threads.list(userId).setQ(query)
foldListThreads(userId, request)
}
def foldListThreads(userId: String, request: Gmail#Users#Threads#List): Future[List[Thread]] =
foldPages[ListThreadsResponse, Thread](response => Option(response.getNextPageToken), _.getThreads, nextPageTokenOpt => {
nextPageTokenOpt.fold()(request.setPageToken(_))
gmailThrottlerClient.scheduleListThreads(userId, request)
})
def listMessagesOfThread(userId: String, threadId: String): Future[List[Message]] =
for {
gmail <- googleAuthorization.getService(userId)
partialMessages <- listMessagesOfThread(userId, gmail, threadId)
messages <- getMessages(userId, gmail, partialMessages)
} yield messages
private def listMessagesOfThread(userId: String, gmail: Gmail, threadId: String): Future[List[Message]] = {
val request = gmail.users.threads.get(userId, threadId)
gmailThrottlerClient.scheduleGetThread(userId, request).map(_.getMessages.asScala.toList)
}
def listMessages(userId: String, query: String): Future[List[Message]] =
for {
gmail <- googleAuthorization.getService(userId)
partialMessages <- listMessages(userId, gmail, query)
messages <- getMessages(userId, gmail, partialMessages)
} yield messages
private def getMessages(userId: String, gmail: Gmail, partialMessages: List[Message]): Future[List[Message]] = {
val messagesRequests = partialMessages.map(pm => gmail.users.messages.get(userId, pm.getId))
seq[Gmail#Users#Messages#Get, Message](messagesRequests, gmailThrottlerClient.scheduleGetMessage(userId, _))
}
def getMessage(userId: String, messageId: String): Future[Message] =
for {
gmail <- googleAuthorization.getService(userId)
partialMessage = new Message().setId(messageId)
messages <- getMessage(userId, gmail, partialMessage)
} yield messages
private def getMessage(userId: String, gmail: Gmail, partialMessage: Message): Future[Message] = {
val request = gmail.users.messages.get(userId, partialMessage.getId)
gmailThrottlerClient.scheduleGetMessage(userId, request)
}
private def listMessages(userId: String, gmail: Gmail, query: String): Future[List[Message]] = {
val request = gmail.users.messages.list(userId).setQ(query)
foldListMessages(userId, request)
}
def foldListMessages(userId: String, request: Gmail#Users#Messages#List): Future[List[Message]] =
foldPages[ListMessagesResponse, Message](response => Option(response.getNextPageToken), _.getMessages, nextPageTokenOpt => {
nextPageTokenOpt.fold()(request.setPageToken(_))
gmailThrottlerClient.scheduleListMessages(userId, request)
})
def reply(userId: String, threadId: String, toAddress: String, personal: String, subject: String, content: String): Future[Message] =
for {
gmail <- googleAuthorization.getService(userId)
partialMessage <- reply(userId, gmail, threadId, toAddress, personal, subject, content)
message <- getMessage(userId, gmail, partialMessage)
} yield message
private def reply(userId: String, gmail: Gmail, threadId: String, toAddress: String, personal: String, subject: String, content: String): Future[Message] = {
val message = buildMessage(userId, threadId, toAddress, personal, subject, content)
val request = gmail.users.messages.send(userId, message)
gmailThrottlerClient.scheduleSendMessage(userId, request)
}
private def buildMessage(userId: String, threadId: String, toAddress: String, personal: String, subject: String, content: String) = {
val props = new Properties
val session = Session.getDefaultInstance(props, null)
val email = new MimeMessage(session)
email.addHeader("Subject", subject)
email.setSubject("Re: " + subject)
email.setFrom(new InternetAddress(userId, personal))
email.addRecipient(javax.mail.Message.RecipientType.TO, new InternetAddress(toAddress))
email.setText(content)
toMessage(email).setThreadId(threadId)
}
private def toMessage(message: MimeMessage): Message = {
val bytes: ByteArrayOutputStream = new ByteArrayOutputStream
message.writeTo(bytes)
val encodedEmail: String = Base64.encodeBase64URLSafeString(bytes.toByteArray)
new Message().setRaw(encodedEmail)
}
def getLastHistory(userId: String, startHistoryId: BigInt): Future[ListHistoryResponse] =
for {
gmail <- googleAuthorization.getService(userId)
result <- lastHistory(userId, gmail, startHistoryId)
} yield result
private def lastHistory(userId: String, gmail: Gmail, startHistoryId: BigInt): Future[ListHistoryResponse] = {
val request = gmail.users.history().list(userId)
request.setStartHistoryId(startHistoryId.bigInteger)
gmailThrottlerClient.scheduleListHistory(userId, request)
}
def listHistory(userId: String, startHistoryId: BigInt): Future[List[History]] =
for {
gmail <- googleAuthorization.getService(userId)
result <- listHistory(userId, gmail, startHistoryId)
} yield result
private def listHistory(userId: String, gmail: Gmail, startHistoryId: BigInt): Future[List[History]] = {
val request = gmail.users.history().list(userId)
request.setStartHistoryId(startHistoryId.bigInteger)
foldListHistory(userId, request)
}
def foldListHistory(userId: String, request: Gmail#Users#History#List): Future[List[History]] =
foldPages[ListHistoryResponse, History](response => Option(response.getNextPageToken), _.getHistory, nextPageTokenOpt => {
nextPageTokenOpt.fold()(request.setPageToken(_))
gmailThrottlerClient.scheduleListHistory(userId, request)
})
def getAttachment(userId: String, messageId: String, attachmentId: String): Future[MessagePartBody] =
for {
gmail <- googleAuthorization.getService(userId)
result <- getAttachment(userId, gmail, messageId, attachmentId)
} yield result
private def getAttachment(userId: String, gmail: Gmail, messageId: String, attachmentId: String): Future[MessagePartBody] = {
val request = gmail.users.messages().attachments().get(userId, messageId, attachmentId)
gmailThrottlerClient.scheduleGetMessageAttachment(userId, request)
}
private def foldPages[R, T](nextPageTokenGetter: R => Option[String], payloadGetter: R => java.util.List[T], requestBuilder: Option[String] => Future[R]): Future[List[T]] = {
def fold(request: => Future[R], acc: List[T] = List()): Future[List[T]] = {
request.flatMap(response => {
// payloadGetter(response) can return null if the request gets nothing
val newPayload = Option(payloadGetter(response)).map(_.asScala.toList).fold(List[T]())(identity)
val newAcc = acc ++ newPayload
nextPageTokenGetter(response).fold(fs(newAcc)) { nextPageToken => fold(requestBuilder(Some(nextPageToken)), newAcc) }
})
}
fold(requestBuilder(None))
}
}
| phdezann/connectus | connectus-backend/app/services/GmailClient.scala | Scala | mit | 11,589 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import _root_.io.netty.util.internal.logging.{InternalLoggerFactory, Slf4JLoggerFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.BeforeAndAfterEach
import org.scalatest.Suite
/** Manages a local `spark` {@link SparkSession} variable, correctly stopping it after each test. */
trait LocalSparkSession extends BeforeAndAfterEach with BeforeAndAfterAll { self: Suite =>
@transient var spark: SparkSession = _
override def beforeAll() {
super.beforeAll()
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE)
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
}
override def afterEach() {
try {
resetSparkContext()
SparkSession.clearActiveSession()
SparkSession.clearDefaultSession()
} finally {
super.afterEach()
}
}
def resetSparkContext(): Unit = {
LocalSparkSession.stop(spark)
spark = null
}
}
object LocalSparkSession {
def stop(spark: SparkSession) {
if (spark != null) {
spark.stop()
}
// To avoid RPC rebinding to the same port, since it doesn't unbind immediately on shutdown
System.clearProperty("spark.driver.port")
}
/** Runs `f` by passing in `sc` and ensures that `sc` is stopped. */
def withSparkSession[T](sc: SparkSession)(f: SparkSession => T): T = {
try {
f(sc)
} finally {
stop(sc)
}
}
}
| bravo-zhang/spark | sql/core/src/test/scala/org/apache/spark/sql/LocalSparkSession.scala | Scala | apache-2.0 | 2,229 |
package com.github.agourlay.cornichon.steps.wrapped
import cats.data.{ NonEmptyList, StateT }
import cats.effect.IO
import com.github.agourlay.cornichon.core.Done.rightDone
import com.github.agourlay.cornichon.core._
case class RepeatWithStep(nested: List[Step], elements: List[String], elementName: String) extends WrapperStep {
require(elements.nonEmpty, "repeatWith block must contain a non empty sequence of elements")
val printElements = s"[${elements.mkString(", ")}]"
val title = s"RepeatWith block with elements $printElements"
override val stateUpdate: StepState = StateT { runState =>
def repeatSuccessSteps(remainingElements: List[String], runState: RunState): IO[(RunState, Either[(String, FailedStep), Done])] =
remainingElements match {
case Nil =>
IO.pure((runState, rightDone))
case element :: tail =>
// reset logs at each loop to have the possibility to not aggregate in failure case
val rs = runState.resetLogStack
val runStateWithIndex = rs.addToSession(elementName, element)
ScenarioRunner.runStepsShortCircuiting(nested, runStateWithIndex).flatMap {
case (onceMoreRunState, stepResult) =>
stepResult.fold(
failed => {
// In case of failure only the logs of the last run are shown to avoid giant traces.
IO.pure((onceMoreRunState, Left((element, failed))))
},
_ => {
val successState = runState.mergeNested(onceMoreRunState)
repeatSuccessSteps(tail, successState)
}
)
}
}
repeatSuccessSteps(elements, runState.nestedContext).timed
.map {
case (executionTime, (repeatedState, report)) =>
val depth = runState.depth
val (logStack, res) = report match {
case Right(_) =>
val wrappedLogStack = SuccessLogInstruction(s"RepeatWith block with elements $printElements succeeded", depth, Some(executionTime)) +: repeatedState.logStack :+ successTitleLog(depth)
(wrappedLogStack, rightDone)
case Left((failedElement, failedStep)) =>
val wrappedLogStack = FailureLogInstruction(s"RepeatWith block with elements $printElements failed at element '$failedElement'", depth, Some(executionTime)) +: repeatedState.logStack :+ failedTitleLog(depth)
val artificialFailedStep = FailedStep.fromSingle(failedStep.step, RepeatWithBlockContainFailedSteps(failedElement, failedStep.errors))
(wrappedLogStack, Left(artificialFailedStep))
}
(runState.mergeNested(repeatedState, logStack), res)
}
}
}
case class RepeatWithBlockContainFailedSteps(element: String, errors: NonEmptyList[CornichonError]) extends CornichonError {
lazy val baseErrorMessage = s"RepeatWith block failed for element '$element'"
override val causedBy = errors.toList
}
| agourlay/cornichon | cornichon-core/src/main/scala/com/github/agourlay/cornichon/steps/wrapped/RepeatWithStep.scala | Scala | apache-2.0 | 2,985 |
/**
* Copyright (C) 2009-2010 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.fusesource.scalate.support.TemplatePackage
import org.fusesource.scalate.{Binding, TemplateSource}
/**
* Defines the template package of reusable imports, attributes and methods across templates
*/
class ScalatePackage extends TemplatePackage {
def header(source: TemplateSource, bindings: List[Binding]) = """
// common imports go here
"""
}
| arashi01/scalate-samples | scalate-sample-sitegen/ext/ScalatePackage.scala | Scala | apache-2.0 | 1,108 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala.DataStream
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.functions.aggfunctions.{ListAggWithRetractAggFunction, ListAggWsWithRetractAggFunction}
import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.VarSumAggFunction
import org.apache.flink.table.planner.runtime.batch.sql.agg.{MyPojoAggFunction, VarArgsAggFunction}
import org.apache.flink.table.planner.runtime.utils.StreamingWithAggTestBase.AggMode
import org.apache.flink.table.planner.runtime.utils.StreamingWithMiniBatchTestBase.MiniBatchMode
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.runtime.utils.TimeTestUtil.TimestampAndWatermarkWithOffset
import org.apache.flink.table.planner.runtime.utils.UserDefinedFunctionTestUtils._
import org.apache.flink.table.planner.runtime.utils.{GenericAggregateFunction, StreamingWithAggTestBase, TestData, TestingRetractSink}
import org.apache.flink.table.planner.utils.DateTimeTestUtil.{localDate, localDateTime, localTime => mLocalTime}
import org.apache.flink.table.runtime.typeutils.BigDecimalTypeInfo
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import java.lang.{Integer => JInt, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import scala.collection.{Seq, mutable}
import scala.util.Random
@RunWith(classOf[Parameterized])
class AggregateITCase(
aggMode: AggMode,
miniBatch: MiniBatchMode,
backend: StateBackendMode)
extends StreamingWithAggTestBase(aggMode, miniBatch, backend) {
val data = List(
(1000L, 1, "Hello"),
(2000L, 2, "Hello"),
(3000L, 3, "Hello"),
(4000L, 4, "Hello"),
(5000L, 5, "Hello"),
(6000L, 6, "Hello"),
(7000L, 7, "Hello World"),
(8000L, 8, "Hello World"),
(20000L, 20, "Hello World"))
@Test
def testEmptyInputAggregation(): Unit = {
val data = new mutable.MutableList[(Int, Int)]
data .+= ((1, 1))
data .+= ((2, 2))
data .+= ((3, 3))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b)
tEnv.registerTable("T", t)
val t1 = tEnv.sqlQuery(
"select sum(a), avg(a), min(a), count(a), count(1) from T where a > 9999 group by b")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List()
assertEquals(expected, sink.getRetractResults)
}
@Test
def testShufflePojo(): Unit = {
val data = new mutable.MutableList[(Int, Int)]
data .+= ((1, 1))
data .+= ((2, 2))
data .+= ((3, 3))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b)
tEnv.registerTable("T", t)
tEnv.registerFunction("pojoFunc", MyToPojoFunc)
val t1 = tEnv.sqlQuery(
"select sum(a), avg(a), min(a), count(a), count(1) from T group by pojoFunc(b)")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List(
"1,1,1,1,1",
"2,2,2,1,1",
"3,3,3,1,1")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Ignore("[FLINK-12215] Fix this when introduce SqlProcessFunction.")
@Test
def testEmptyInputAggregationWithoutGroupBy(): Unit = {
val data = new mutable.MutableList[(Int, Int)]
data .+= ((1, 1))
data .+= ((2, 2))
data .+= ((3, 3))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b)
tEnv.registerTable("T", t)
val t1 = tEnv.sqlQuery(
"select sum(a), avg(a), min(a), count(a), count(1) from T where a > 9999")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("null,null,null,0,0")
assertEquals(expected, sink.getRetractResults)
}
@Test
def testAggregationWithoutWatermark(): Unit = {
// NOTE: Different from AggregateITCase, we do not set stream time characteristic
// of environment to event time, so that emitWatermark() actually does nothing.
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime)
val data = new mutable.MutableList[(Int, Int)]
data .+= ((1, 1))
data .+= ((2, 2))
data .+= ((3, 3))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b)
tEnv.registerTable("T", t)
val t1 = tEnv.sqlQuery(
"select sum(a), avg(a), min(a), count(a), count(1) from T")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("6,2,1,3,3")
assertEquals(expected, sink.getRetractResults)
}
@Test
def testDistinctGroupBy(): Unit = {
val sqlQuery =
"SELECT b, " +
" SUM(DISTINCT (a * 3)), " +
" COUNT(DISTINCT SUBSTRING(c FROM 1 FOR 2))," +
" COUNT(DISTINCT c) " +
"FROM MyTable " +
"GROUP BY b"
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val result = tEnv.sqlQuery(sqlQuery).toRetractStream[Row]
val sink = new TestingRetractSink
result.addSink(sink)
env.execute()
val expected = List(
"1,3,1,1",
"2,15,1,2",
"3,45,3,3",
"4,102,1,4",
"5,195,1,5",
"6,333,1,6")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testCountDistinct(): Unit = {
val ids = List(
1,
2, 2,
3, 3, 3,
4, 4, 4, 4,
5, 5, 5, 5, 5)
val dateTimes = List(
"1970-01-01 00:00:01",
"1970-01-01 00:00:02", null,
"1970-01-01 00:00:04", "1970-01-01 00:00:05", "1970-01-01 00:00:06",
"1970-01-01 00:00:07", null, null, "1970-01-01 00:00:10",
"1970-01-01 00:00:11", "1970-01-01 00:00:11", "1970-01-01 00:00:13",
"1970-01-01 00:00:14", "1970-01-01 00:00:15")
val dates = List(
"1970-01-01",
"1970-01-02", null,
"1970-01-04", "1970-01-05", "1970-01-06",
"1970-01-07", null, null, "1970-01-10",
"1970-01-11", "1970-01-11", "1970-01-13", "1970-01-14", "1970-01-15")
val times = List(
"00:00:01",
"00:00:02", null,
"00:00:04", "00:00:05", "00:00:06",
"00:00:07", null, null, "00:00:10",
"00:00:11", "00:00:11", "00:00:13", "00:00:14", "00:00:15")
val integers = List(
"1",
"2", null,
"4", "5", "6",
"7", null, null, "10",
"11", "11", "13", "14", "15")
val chars = List(
"A",
"B", null,
"D", "E", "F",
"H", null, null, "K",
"L", "L", "N", "O", "P")
val data = new mutable.MutableList[Row]
for (i <- ids.indices) {
val v = integers(i)
val decimal = if (v == null) null else new JBigDecimal(v)
val int = if (v == null) null else JInt.valueOf(v)
val long = if (v == null) null else JLong.valueOf(v)
data.+=(Row.of(
Int.box(ids(i)), localDateTime(dateTimes(i)), localDate(dates(i)),
mLocalTime(times(i)), decimal, int, long, chars(i)))
}
val inputs = util.Random.shuffle(data)
val rowType = new RowTypeInfo(
Types.INT, Types.LOCAL_DATE_TIME, Types.LOCAL_DATE, Types.LOCAL_TIME,
Types.DECIMAL, Types.INT, Types.LONG, Types.STRING)
val t = failingDataSource(inputs)(rowType).toTable(tEnv, 'id, 'a, 'b, 'c, 'd, 'e, 'f, 'g)
tEnv.createTemporaryView("T", t)
val t1 = tEnv.sqlQuery(
s"""
|SELECT
| id,
| count(distinct a),
| count(distinct b),
| count(distinct c),
| count(distinct d),
| count(distinct e),
| count(distinct f),
| count(distinct g)
|FROM T GROUP BY id
""".stripMargin)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List(
"1,1,1,1,1,1,1,1",
"2,1,1,1,1,1,1,1",
"3,3,3,3,3,3,3,3",
"4,2,2,2,2,2,2,2",
"5,4,4,4,4,4,4,4")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctWithRetract(): Unit = {
// this case covers LongArrayValueWithRetractionGenerator and LongValueWithRetractionGenerator
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((1, 1L, "A"))
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
// b, count(a) as cnt
// 1, 3
// 2, 2
// 3, 3
// 4, 4
// 5, 2
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT
| count(distinct cnt),
| sum(distinct cnt),
| max(distinct cnt),
| min(distinct cnt),
| avg(distinct cnt),
| count(distinct max_a)
|FROM (
| SELECT b, count(a) as cnt, max(a) as max_a
| FROM T
| GROUP BY b)
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("3,9,4,2,3,5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctAggregateMoreThan64(): Unit = {
// this case is used to cover DistinctAggCodeGen#LongArrayValueWithoutRetractionGenerator
val data = new mutable.MutableList[(Int, Int)]
for (i <- 0 until 100) {
for (j <- 0 until 100 - i) {
data.+=((j, i))
}
}
val t = failingDataSource(Random.shuffle(data)).toTable(tEnv, 'a, 'b)
tEnv.registerTable("T", t)
val distincts = for (i <- 0 until 100) yield {
s"count(distinct a) filter (where b = $i)"
}
val sql =
s"""
|SELECT
| ${distincts.mkString(", ")}
|FROM T
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List((1 to 100).reverse.mkString(","))
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctAggWithNullValues(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, null))
data.+=((7, 3L, "C"))
data.+=((8, 4L, "B"))
data.+=((9, 4L, null))
data.+=((10, 4L, null))
data.+=((11, 4L, "A"))
data.+=((12, 4L, "D"))
data.+=((13, 4L, null))
data.+=((14, 4L, "E"))
data.+=((15, 5L, "A"))
data.+=((16, 5L, null))
data.+=((17, 5L, "B"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
tEnv.registerFunction("CntNullNonNull", new CountNullNonNull)
val t1 = tEnv.sqlQuery(
"SELECT b, count(*), CntNullNonNull(DISTINCT c) FROM T GROUP BY b")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,1|0", "2,2,1|0", "3,4,1|1", "4,7,4|1", "5,3,2|1")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testGroupByAgg(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val t1 = tEnv.sqlQuery("SELECT b, count(c), sum(a) FROM T GROUP BY b")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,1", "2,2,5", "3,3,15", "4,4,34", "5,2,23")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
def testCountWithNullableIfCall(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
s"""
|select
| b
| ,count(1)
| ,count(if(c in ('A', 'B'), cast(null as integer), 1)) as cnt
| ,count(if(c not in ('A', 'B'), 1, cast(null as integer))) as cnt1
|from T
|group by b
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,0,0", "2,2,0,0", "3,3,3,3", "4,4,2,2", "5,2,0,0")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testNestedGroupByAgg(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT sum(b), count(a), max(a), min(a), c
|FROM (
| SELECT b, count(c) as c, sum(a) as a
| FROM T
| GROUP BY b)
|GROUP BY c
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1,1,1,1", "3,1,15,15,3", "4,1,34,34,4", "7,2,23,5,2")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
/** test unbounded groupBy (without window) **/
@Test
def testUnboundedGroupBy(): Unit = {
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val sqlQuery = "SELECT b, COUNT(a) FROM MyTable GROUP BY b"
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1", "2,2", "3,3", "4,4", "5,5", "6,6")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testWindowWithUnboundedAgg(): Unit = {
val t = failingDataSource(TestData.tupleData5.map {
case (a, b, c, d, e) => (b, a, c, d, e)
}).assignTimestampsAndWatermarks(
new TimestampAndWatermarkWithOffset[(Long, Int, Int, String, Long)](0L))
.toTable(tEnv, 'rowtime.rowtime, 'a, 'c, 'd, 'e)
tEnv.registerTable("MyTable", t)
val innerSql =
"""
|SELECT a,
| SUM(DISTINCT e) b,
| MIN(DISTINCT e) c,
| COUNT(DISTINCT e) d
|FROM MyTable
|GROUP BY a, TUMBLE(rowtime, INTERVAL '0.005' SECOND)
""".stripMargin
val sqlQuery = "SELECT c, MAX(a), COUNT(DISTINCT d) FROM (" + innerSql + ") GROUP BY c"
val results = tEnv.sqlQuery(sqlQuery).toRetractStream[Row]
val sink = new TestingRetractSink
results.addSink(sink)
env.execute()
val expected = List(
"1,5,3",
"2,5,2")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testListAggWithNullData(): Unit = {
val dataWithNull = List(
(1, 1, null),
(2, 1, null),
(3, 1, null))
val t: DataStream[(Int, Int, String)] = failingDataSource(dataWithNull)
val streamTable = t.toTable(tEnv, 'id, 'len, 'content)
tEnv.registerTable("T", streamTable)
val sqlQuery =
s"""
|SELECT len, listagg(content, '#') FROM T GROUP BY len
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,null")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testListAggWithoutDelimiterTreatNull(): Unit = {
val dataWithNull = List(
(1, 1, null),
(2, 1, null),
(3, 1, null))
val t: DataStream[(Int, Int, String)] = failingDataSource(dataWithNull)
val streamTable = t.toTable(tEnv, 'id, 'len, 'content)
tEnv.registerTable("T", streamTable)
val sqlQuery =
s"""
|SELECT len, listagg(content) FROM T GROUP BY len
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,null")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testListAggWithDistinct(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "A"))
data.+=((7, 4L, "EF"))
data.+=((1, 1L, "A"))
data.+=((8, 4L, "EF"))
data.+=((8, 4L, null))
val sqlQuery = "SELECT b, LISTAGG(DISTINCT c, '#') FROM MyTable GROUP BY b"
tEnv.registerTable("MyTable",
failingDataSource(data).toTable(tEnv).as('a, 'b, 'c))
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("1,A", "2,B", "3,C#A", "4,EF")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testUnboundedGroupByCollect(): Unit = {
val sqlQuery = "SELECT b, COLLECT(a) FROM MyTable GROUP BY b"
val t = failingDataSource(TestData.tupleData3).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
// TODO: [BLINK-16716210] the string result of collect is not determinist
// TODO: sort the map result in the future
val expected = List(
"1,{1=1}",
"2,{2=1, 3=1}",
"3,{4=1, 5=1, 6=1}",
"4,{7=1, 8=1, 9=1, 10=1}",
"5,{11=1, 12=1, 13=1, 14=1, 15=1}",
"6,{16=1, 17=1, 18=1, 19=1, 20=1, 21=1}")
assertMapStrEquals(expected.sorted.toString, sink.getRetractResults.sorted.toString)
}
@Test
def testUnboundedGroupByCollectWithObject(): Unit = {
val sqlQuery = "SELECT b, COLLECT(c) FROM MyTable GROUP BY b"
val data = List(
(1, 1, List(12, "45.6")),
(2, 2, List(12, "45.612")),
(3, 2, List(13, "41.6")),
(4, 3, List(14, "45.2136")),
(5, 3, List(18, "42.6"))
)
tEnv.registerTable("MyTable",
failingDataSource(data).toTable(tEnv, 'a, 'b, 'c))
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List(
"1,{List(12, 45.6)=1}",
"2,{List(13, 41.6)=1, List(12, 45.612)=1}",
"3,{List(18, 42.6)=1, List(14, 45.2136)=1}")
assertMapStrEquals(expected.sorted.toString, sink.getRetractResults.sorted.toString)
}
@Test
def testGroupBySingleValue(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "B"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((6, 3L, "C"))
data.+=((7, 4L, "B"))
data.+=((8, 4L, "A"))
data.+=((9, 4L, "D"))
data.+=((10, 4L, "E"))
data.+=((11, 5L, "A"))
data.+=((12, 5L, "B"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T1", t)
tEnv.registerTable("T2", t)
val t1 = tEnv.sqlQuery("SELECT * FROM T2 WHERE T2.a < (SELECT count(*) * 0.3 FROM T1)")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("1,1,A", "2,2,B", "3,2,B", "4,3,C", "5,3,C")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testPojoField(): Unit = {
val data = Seq(
(1, new MyPojo(5, 105)),
(1, new MyPojo(6, 11)),
(1, new MyPojo(7, 12)))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b)
tEnv.registerTable("MyTable", t)
tEnv.registerFunction("pojoFunc", new MyPojoAggFunction)
tEnv.registerFunction("pojoToInt", MyPojoFunc)
val sql = "SELECT pojoToInt(pojoFunc(b)) FROM MyTable group by a"
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("128")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDecimalSum(): Unit = {
val data = new mutable.MutableList[Row]
data.+=(Row.of(BigDecimal(1).bigDecimal))
data.+=(Row.of(BigDecimal(2).bigDecimal))
data.+=(Row.of(BigDecimal(2).bigDecimal))
data.+=(Row.of(BigDecimal(3).bigDecimal))
val rowType = new RowTypeInfo(BigDecimalTypeInfo.of(7, 2))
val t = failingDataSource(data)(rowType).toTable(tEnv, 'd)
tEnv.registerTable("T", t)
val sql =
"""
|select c, sum(d) from (
| select d, count(d) c from T group by d
|) group by c
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,4.00", "2,2.00")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDifferentTypesSumWithRetract(): Unit = {
val data = List(
(1.toByte, 1.toShort, 1, 1L, 1.0F, 1.0, "a"),
(2.toByte, 2.toShort, 2, 2L, 2.0F, 2.0, "a"),
(3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "a"),
(3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "a"),
(1.toByte, 1.toShort, 1, 1L, 1.0F, 1.0, "b"),
(2.toByte, 2.toShort, 2, 2L, 2.0F, 2.0, "b"),
(3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "c"),
(3.toByte, 3.toShort, 3, 3L, 3.0F, 3.0, "c")
)
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c, 'd, 'e, 'f, 'g)
tEnv.registerTable("T", t)
// We use sub-query + limit here to ensure retraction
val sql =
"""
|SELECT sum(a), sum(b), sum(c), sum(d), sum(e), sum(f), sum(h) FROM (
| SELECT *, CAST(c AS DECIMAL(3, 2)) AS h FROM T LIMIT 8
|) GROUP BY g
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("9,9,9,9,9.0,9.0,9.00", "3,3,3,3,3.0,3.0,3.00", "6,6,6,6,6.0,6.0,6.00")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testAggAfterUnion(): Unit = {
val data = List(
(1L, 1, "Hello"),
(2L, 2, "Hello"),
(2L, 3, "Hello"),
(3L, 4, "Hello"),
(3L, 5, "Hello"),
(7L, 6, "Hello"),
(7L, 7, "Hello World"),
(7L, 8, "Hello World"),
(10L, 20, "Hello World"))
val t1 = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T1", t1)
val t2 = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T2", t2)
val sql =
"""
|SELECT a, sum(b), count(distinct c)
|FROM (
| SELECT * FROM T1
| UNION ALL
| SELECT * FROM T2
|) GROUP BY a
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,2,1", "2,10,1", "3,18,1", "7,42,2", "10,40,1")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testVarArgsNoGroupBy(): Unit = {
val data = List(
(1, 1L, "5", "3"),
(1, 22L, "15", "13"),
(3, 33L, "25", "23"))
val t = failingDataSource(data).toTable(tEnv, 'id, 's, 's1, 's2)
tEnv.registerTable("MyTable", t)
tEnv.registerFunction("func", new VarArgsAggFunction)
val sql = "SELECT func(s, s1, s2) FROM MyTable"
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("140")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testVarArgsWithGroupBy(): Unit = {
val data = List(
(1, 1L, "5", "3"),
(1, 22L, "15", "13"),
(3, 33L, "25", "23"))
val t = failingDataSource(data).toTable(tEnv, 'id, 's, 's1, 's2)
tEnv.registerTable("MyTable", t)
tEnv.registerFunction("func", new VarArgsAggFunction)
val sink = new TestingRetractSink
tEnv
.sqlQuery("SELECT id, func(s, s1, s2) FROM MyTable group by id")
.toRetractStream[Row]
.addSink(sink)
env.execute()
val expected = List("1,59", "3,81")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testMinMaxWithBinaryString(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((1, 1L, "A"))
data.+=((2, 2L, "B"))
data.+=((3, 2L, "BC"))
data.+=((4, 3L, "C"))
data.+=((5, 3L, "CD"))
data.+=((6, 3L, "DE"))
data.+=((7, 4L, "EF"))
data.+=((8, 4L, "FG"))
data.+=((9, 4L, "HI"))
data.+=((10, 4L, "IJ"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT b, min(c), max(c)
|FROM (
| SELECT a, b, listagg(c) as c
| FROM T
| GROUP BY a, b)
|GROUP BY b
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,A,A", "2,B,BC", "3,C,DE", "4,EF,IJ")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testBigDataOfMinMaxWithBinaryString(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
for (i <- 0 until 100) {
data.+=((i % 10, i, i.toString))
}
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val sql =
"""
|SELECT a, min(b), max(c), min(c) FROM T GROUP BY a
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("0,0,90,0", "1,1,91,1", "2,2,92,12", "3,3,93,13",
"4,4,94,14", "5,5,95,15", "6,6,96,16", "7,7,97,17",
"8,8,98,18", "9,9,99,19")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testAggWithFilterClause(): Unit = {
val data = new mutable.MutableList[(Int, Long, String, Boolean)]
data.+=((1, 5L, "B", true))
data.+=((1, 4L, "C", false))
data.+=((1, 2L, "A", true))
data.+=((2, 1L, "A", true))
data.+=((2, 2L, "B", false))
data.+=((1, 6L, "A", true))
data.+=((2, 2L, "B", false))
data.+=((3, 5L, "B", true))
data.+=((2, 3L, "C", true))
data.+=((2, 3L, "D", true))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c, 'd)
tEnv.registerTable("T", t)
// test declarative and imperative aggregates
val sql =
"""
|SELECT
| a,
| sum(b) filter (where c = 'A'),
| count(distinct c) filter (where d is true),
| max(b)
|FROM T GROUP BY a
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,8,2,6", "2,1,3,3", "3,null,1,5")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testMinMaxWithDecimal(): Unit = {
val data = new mutable.MutableList[Row]
data.+=(Row.of(BigDecimal(1).bigDecimal))
data.+=(Row.of(BigDecimal(2).bigDecimal))
data.+=(Row.of(BigDecimal(2).bigDecimal))
data.+=(Row.of(BigDecimal(4).bigDecimal))
data.+=(Row.of(BigDecimal(3).bigDecimal))
// a, count(a) as cnt
// 1, 1
// 2, 2
// 4, 1
// 3, 1
//
// cnt, min(a), max(a)
// 1, 1, 4
// 2, 2, 2
val rowType = new RowTypeInfo(BigDecimalTypeInfo.of(7, 2))
val t = failingDataSource(data)(rowType).toTable(tEnv, 'a)
tEnv.registerTable("T", t)
val sql =
"""
|select cnt, min(a), max(a) from (
| select a, count(a) as cnt from T group by a
|) group by cnt
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,1.00,4.00", "2,2.00,2.00")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testCollectOnClusteredFields(): Unit = {
val data = List(
(1, 1, (12, "45.6")),
(2, 2, (12, "45.612")),
(3, 2, (13, "41.6")),
(4, 3, (14, "45.2136")),
(5, 3, (18, "42.6"))
)
tEnv.registerTable("src", env.fromCollection(data).toTable(tEnv, 'a, 'b, 'c))
val sql = "SELECT a, b, COLLECT(c) as `set` FROM src GROUP BY a, b"
val view1 = tEnv.sqlQuery(sql)
tEnv.registerTable("v1", view1)
val toCompositeObj = ToCompositeObj
tEnv.registerFunction("toCompObj", toCompositeObj)
val sql1 =
s"""
|SELECT
| a, b, COLLECT(toCompObj(t.sid, 'a', 100, t.point)) as info
|from (
| select
| a, b, uuid() as u, V.sid, V.point
| from
| v1, unnest(v1.`set`) as V(sid, point)
|) t
|group by t.a, t.b, t.u
""".stripMargin
val sink = new TestingRetractSink
tEnv.sqlQuery(sql1).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List(
"1,1,{CompositeObj(12,a,100,45.6)=1}",
"2,2,{CompositeObj(12,a,100,45.612)=1}",
"3,2,{CompositeObj(13,a,100,41.6)=1}",
"4,3,{CompositeObj(14,a,100,45.2136)=1}",
"5,3,{CompositeObj(18,a,100,42.6)=1}")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
/** Test LISTAGG **/
@Test
def testListAgg(): Unit = {
tEnv.registerFunction("listagg_retract", new ListAggWithRetractAggFunction)
tEnv.registerFunction("listagg_ws_retract", new ListAggWsWithRetractAggFunction)
val sqlQuery =
s"""
|SELECT
| listagg(c), listagg(c, '-'), listagg_retract(c), listagg_ws_retract(c, '+')
|FROM MyTable
|GROUP BY c
|""".stripMargin
val data = new mutable.MutableList[(Int, Long, String)]
for (i <- 0 until 10) {
data.+=((i, 1L, "Hi"))
}
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi-Hi-Hi-Hi-Hi-Hi-Hi-Hi-Hi-Hi," +
"Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi,Hi+Hi+Hi+Hi+Hi+Hi+Hi+Hi+Hi+Hi")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testSTDDEV(): Unit = {
val sqlQuery = "SELECT STDDEV_SAMP(a), STDDEV_POP(a) FROM MyTable GROUP BY c"
val data = new mutable.MutableList[(Double, Long, String)]
for (i <- 0 until 10) {
data.+=((i, 1L, "Hi"))
}
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("3.0276503540974917,2.8722813232690143")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
/** test VAR_POP **/
@Test
def testVAR_POP(): Unit = {
val sqlQuery = "SELECT VAR_POP(a) FROM MyTable GROUP BY c"
val data = new mutable.MutableList[(Int, Long, String)]
data.+=((2900, 1L, "Hi"))
data.+=((2500, 1L, "Hi"))
data.+=((2600, 1L, "Hi"))
data.+=((3100, 1L, "Hello"))
data.+=((11000, 1L, "Hello"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
// TODO: define precise behavior of VAR_POP()
val expected = List(15602500.toString, 28889.toString)
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testLongVarargsAgg(): Unit = {
tEnv.registerFunction("var_sum", new VarSumAggFunction)
val sqlQuery = s"SELECT a, " +
s"var_sum(${0.until(260).map(_ => "b").mkString(",")}) from MyTable group by a"
val data = Seq[(Int, Int)]((1, 1), (2,2))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b)
tEnv.registerTable("MyTable", t)
val sink = new TestingRetractSink
tEnv.sqlQuery(sqlQuery).toRetractStream[Row].addSink(sink)
env.execute()
val expected = List("1,260", "2,520")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testCountDistinctWithBinaryRowSource(): Unit = {
// this case is failed before, because of object reuse problem
val data = (0 until 100).map {i => ("1", "1", s"${i%50}", "1")}.toList
// use BinaryRow source here for BinaryString reuse
val t = failingBinaryRowSource(data).toTable(tEnv, 'a, 'b, 'c, 'd)
tEnv.registerTable("src", t)
val sql =
s"""
|SELECT
| a,
| b,
| COUNT(distinct c) as uv
|FROM (
| SELECT
| a, b, c, d
| FROM
| src where b <> ''
| UNION ALL
| SELECT
| a, 'ALL' as b, c, d
| FROM
| src where b <> ''
|) t
|GROUP BY
| a, b
""".stripMargin
val t1 = tEnv.sqlQuery(sql)
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink)
env.execute("test")
val expected = List("1,1,50", "1,ALL,50")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testDistinctWithMultiFilter(): Unit = {
val t = failingDataSource(TestData.tupleData3).toTable(tEnv).as('a, 'b, 'c)
tEnv.registerTable("MyTable", t)
val sqlQuery =
s"""
|SELECT
| b,
| SUM(DISTINCT (a * 3)),
| COUNT(DISTINCT SUBSTRING(c FROM 1 FOR 2)),
| COUNT(DISTINCT c),
| COUNT(DISTINCT c) filter (where MOD(a, 3) = 0),
| COUNT(DISTINCT c) filter (where MOD(a, 3) = 1)
|FROM MyTable
|GROUP BY b
""".stripMargin
val result = tEnv.sqlQuery(sqlQuery).toRetractStream[Row]
val sink = new TestingRetractSink
result.addSink(sink)
env.execute()
val expected = List(
"1,3,1,1,0,1", "2,15,1,2,1,0",
"3,45,3,3,1,1", "4,102,1,4,1,2",
"5,195,1,5,2,1", "6,333,1,6,2,2")
assertEquals(expected.sorted, sink.getRetractResults.sorted)
}
@Test
def testPruneUselessAggCall(): Unit = {
val data = new mutable.MutableList[(Int, Long, String)]
data .+= ((1, 1L, "Hi"))
data .+= ((2, 2L, "Hello"))
data .+= ((3, 2L, "Hello world"))
val t = failingDataSource(data).toTable(tEnv, 'a, 'b, 'c)
tEnv.registerTable("T", t)
val t1 = tEnv.sqlQuery(
"select a from (select b, max(a) as a, count(*), max(c) as c from T group by b) T1")
val sink = new TestingRetractSink
t1.toRetractStream[Row].addSink(sink).setParallelism(1)
env.execute()
val expected = List("1", "3")
assertEquals(expected, sink.getRetractResults)
}
@Test
def testGenericTypesWithoutStateClean(): Unit = {
// because we don't provide a way to disable state cleanup.
// TODO verify all tests with state cleanup closed.
tEnv.getConfig.setIdleStateRetentionTime(Time.days(0), Time.days(0))
val t = failingDataSource(Seq(1, 2, 3)).toTable(tEnv, 'a)
val results = t
.select(new GenericAggregateFunction()('a))
.toRetractStream[Row]
val sink = new TestingRetractSink
results.addSink(sink).setParallelism(1)
env.execute()
}
}
| gyfora/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/AggregateITCase.scala | Scala | apache-2.0 | 37,690 |
/* NSC -- new Scala compiler
* Copyright 2005-2012 LAMP/EPFL
* @author Martin Odersky
*/
package dotty.tools.dotc
package backend.jvm
import dotty.tools.backend.jvm.GenBCodePipeline
import dotty.tools.dotc.ast.Trees.Select
import dotty.tools.dotc.ast.tpd._
import dotty.tools.dotc.core.Names.TermName
import dotty.tools.dotc.core.StdNames
import dotty.tools.dotc.core.StdNames._
import dotty.tools.dotc.core.Types.{JavaArrayType, ErrorType, Type}
import scala.collection.{ mutable, immutable }
import core.Contexts.Context
import core.Symbols.{Symbol, NoSymbol}
/** Scala primitive operations are represented as methods in `Any` and
* `AnyVal` subclasses. Here we demultiplex them by providing a mapping
* from their symbols to integers. Different methods exist for
* different value types, but with the same meaning (like plus, minus,
* etc.). They will all be mapped to the same int.
*
* Note: The three equal methods have the following semantics:
* - `"=="` checks for `null`, and if non-null, calls
* `java.lang.Object.equals`
* `(class: Any; modifier: final)`. Primitive: `EQ`
* - `"eq"` usual reference comparison
* `(class: AnyRef; modifier: final)`. Primitive: `ID`
* - `"equals"` user-defined equality (Java semantics)
* `(class: Object; modifier: none)`. Primitive: `EQUALS`
*
* Inspired from the `scalac` compiler.
*/
class DottyPrimitives(ctx: Context) {
import scala.tools.nsc.backend.ScalaPrimitives._
private lazy val primitives: immutable.Map[Symbol, Int] = init
/** Return the code for the given symbol. */
def getPrimitive(sym: Symbol): Int = {
primitives(sym)
}
/**
* Return the primitive code of the given operation. If the
* operation is an array get/set, we inspect the type of the receiver
* to demux the operation.
*
* @param fun The method symbol
* @param tpe The type of the receiver object. It is used only for array
* operations
*/
def getPrimitive(app: Apply, tpe: Type)(implicit ctx: Context): Int = {
val fun = app.fun.symbol
val defn = ctx.definitions
val code = app.fun match {
case Select(_, nme.primitive.arrayLength) =>
LENGTH
case Select(_, nme.primitive.arrayUpdate) =>
UPDATE
case Select(_, nme.primitive.arrayApply) =>
APPLY
case _ => getPrimitive(fun)
}
def elementType: Type = tpe.widenDealias match {
case defn.ArrayOf(el) => el
case JavaArrayType(el) => el
case _ =>
ctx.error(s"expected Array $tpe")
ErrorType
}
code match {
case APPLY =>
defn.scalaClassName(elementType) match {
case tpnme.Boolean => ZARRAY_GET
case tpnme.Byte => BARRAY_GET
case tpnme.Short => SARRAY_GET
case tpnme.Char => CARRAY_GET
case tpnme.Int => IARRAY_GET
case tpnme.Long => LARRAY_GET
case tpnme.Float => FARRAY_GET
case tpnme.Double => DARRAY_GET
case _ => OARRAY_GET
}
case UPDATE =>
defn.scalaClassName(elementType) match {
case tpnme.Boolean => ZARRAY_SET
case tpnme.Byte => BARRAY_SET
case tpnme.Short => SARRAY_SET
case tpnme.Char => CARRAY_SET
case tpnme.Int => IARRAY_SET
case tpnme.Long => LARRAY_SET
case tpnme.Float => FARRAY_SET
case tpnme.Double => DARRAY_SET
case _ => OARRAY_SET
}
case LENGTH =>
defn.scalaClassName(elementType) match {
case tpnme.Boolean => ZARRAY_LENGTH
case tpnme.Byte => BARRAY_LENGTH
case tpnme.Short => SARRAY_LENGTH
case tpnme.Char => CARRAY_LENGTH
case tpnme.Int => IARRAY_LENGTH
case tpnme.Long => LARRAY_LENGTH
case tpnme.Float => FARRAY_LENGTH
case tpnme.Double => DARRAY_LENGTH
case _ => OARRAY_LENGTH
}
case _ =>
code
}
}
/** Initialize the primitive map */
private def init: immutable.Map[Symbol, Int] = {
implicit val ctx: Context = this.ctx
import core.Symbols.defn
val primitives = new mutable.HashMap[Symbol, Int]()
/** Add a primitive operation to the map */
def addPrimitive(s: Symbol, code: Int): Unit = {
assert(!(primitives contains s), "Duplicate primitive " + s)
primitives(s) = code
}
def addPrimitives(cls: Symbol, method: TermName, code: Int)(implicit ctx: Context): Unit = {
val alts = cls.info.member(method).alternatives.map(_.symbol)
if (alts.isEmpty)
ctx.error(s"Unknown primitive method $cls.$method")
else alts foreach (s =>
addPrimitive(s,
s.info.paramTypess match {
case List(tp :: _) if code == ADD && tp =:= ctx.definitions.StringType => CONCAT
case _ => code
}
)
)
}
// scala.Any
addPrimitive(defn.Any_==, EQ)
addPrimitive(defn.Any_!=, NE)
addPrimitive(defn.Any_isInstanceOf, IS)
addPrimitive(defn.Any_asInstanceOf, AS)
addPrimitive(defn.Any_##, HASH)
// java.lang.Object
addPrimitive(defn.Object_eq, ID)
addPrimitive(defn.Object_ne, NI)
/* addPrimitive(defn.Any_==, EQ)
addPrimitive(defn.Any_!=, NE)*/
addPrimitive(defn.Object_synchronized, SYNCHRONIZED)
/*addPrimitive(defn.Any_isInstanceOf, IS)
addPrimitive(defn.Any_asInstanceOf, AS)*/
// java.lang.String
addPrimitive(defn.String_+, CONCAT)
import core.StdNames.nme
// scala.Array
lazy val ArrayClass = defn.ArrayClass
addPrimitives(ArrayClass, nme.length, LENGTH)
addPrimitives(ArrayClass, nme.apply, APPLY)
addPrimitives(ArrayClass, nme.update, UPDATE)
// scala.Boolean
lazy val BooleanClass = defn.BooleanClass
addPrimitives(BooleanClass, nme.EQ, EQ)
addPrimitives(BooleanClass, nme.NE, NE)
addPrimitives(BooleanClass, nme.UNARY_!, ZNOT)
addPrimitives(BooleanClass, nme.ZOR, ZOR)
addPrimitives(BooleanClass, nme.ZAND, ZAND)
addPrimitives(BooleanClass, nme.OR, OR)
addPrimitives(BooleanClass, nme.AND, AND)
addPrimitives(BooleanClass, nme.XOR, XOR)
// scala.Byte
lazy val ByteClass = defn.ByteClass
addPrimitives(ByteClass, nme.EQ, EQ)
addPrimitives(ByteClass, nme.NE, NE)
addPrimitives(ByteClass, nme.ADD, ADD)
addPrimitives(ByteClass, nme.SUB, SUB)
addPrimitives(ByteClass, nme.MUL, MUL)
addPrimitives(ByteClass, nme.DIV, DIV)
addPrimitives(ByteClass, nme.MOD, MOD)
addPrimitives(ByteClass, nme.LT, LT)
addPrimitives(ByteClass, nme.LE, LE)
addPrimitives(ByteClass, nme.GT, GT)
addPrimitives(ByteClass, nme.GE, GE)
addPrimitives(ByteClass, nme.XOR, XOR)
addPrimitives(ByteClass, nme.OR, OR)
addPrimitives(ByteClass, nme.AND, AND)
addPrimitives(ByteClass, nme.LSL, LSL)
addPrimitives(ByteClass, nme.LSR, LSR)
addPrimitives(ByteClass, nme.ASR, ASR)
// conversions
addPrimitives(ByteClass, nme.toByte, B2B)
addPrimitives(ByteClass, nme.toShort, B2S)
addPrimitives(ByteClass, nme.toChar, B2C)
addPrimitives(ByteClass, nme.toInt, B2I)
addPrimitives(ByteClass, nme.toLong, B2L)
// unary methods
addPrimitives(ByteClass, nme.UNARY_+, POS)
addPrimitives(ByteClass, nme.UNARY_-, NEG)
addPrimitives(ByteClass, nme.UNARY_~, NOT)
addPrimitives(ByteClass, nme.toFloat, B2F)
addPrimitives(ByteClass, nme.toDouble, B2D)
// scala.Short
lazy val ShortClass = defn.ShortClass
addPrimitives(ShortClass, nme.EQ, EQ)
addPrimitives(ShortClass, nme.NE, NE)
addPrimitives(ShortClass, nme.ADD, ADD)
addPrimitives(ShortClass, nme.SUB, SUB)
addPrimitives(ShortClass, nme.MUL, MUL)
addPrimitives(ShortClass, nme.DIV, DIV)
addPrimitives(ShortClass, nme.MOD, MOD)
addPrimitives(ShortClass, nme.LT, LT)
addPrimitives(ShortClass, nme.LE, LE)
addPrimitives(ShortClass, nme.GT, GT)
addPrimitives(ShortClass, nme.GE, GE)
addPrimitives(ShortClass, nme.XOR, XOR)
addPrimitives(ShortClass, nme.OR, OR)
addPrimitives(ShortClass, nme.AND, AND)
addPrimitives(ShortClass, nme.LSL, LSL)
addPrimitives(ShortClass, nme.LSR, LSR)
addPrimitives(ShortClass, nme.ASR, ASR)
// conversions
addPrimitives(ShortClass, nme.toByte, S2B)
addPrimitives(ShortClass, nme.toShort, S2S)
addPrimitives(ShortClass, nme.toChar, S2C)
addPrimitives(ShortClass, nme.toInt, S2I)
addPrimitives(ShortClass, nme.toLong, S2L)
// unary methods
addPrimitives(ShortClass, nme.UNARY_+, POS)
addPrimitives(ShortClass, nme.UNARY_-, NEG)
addPrimitives(ShortClass, nme.UNARY_~, NOT)
addPrimitives(ShortClass, nme.toFloat, S2F)
addPrimitives(ShortClass, nme.toDouble, S2D)
// scala.Char
lazy val CharClass = defn.CharClass
addPrimitives(CharClass, nme.EQ, EQ)
addPrimitives(CharClass, nme.NE, NE)
addPrimitives(CharClass, nme.ADD, ADD)
addPrimitives(CharClass, nme.SUB, SUB)
addPrimitives(CharClass, nme.MUL, MUL)
addPrimitives(CharClass, nme.DIV, DIV)
addPrimitives(CharClass, nme.MOD, MOD)
addPrimitives(CharClass, nme.LT, LT)
addPrimitives(CharClass, nme.LE, LE)
addPrimitives(CharClass, nme.GT, GT)
addPrimitives(CharClass, nme.GE, GE)
addPrimitives(CharClass, nme.XOR, XOR)
addPrimitives(CharClass, nme.OR, OR)
addPrimitives(CharClass, nme.AND, AND)
addPrimitives(CharClass, nme.LSL, LSL)
addPrimitives(CharClass, nme.LSR, LSR)
addPrimitives(CharClass, nme.ASR, ASR)
// conversions
addPrimitives(CharClass, nme.toByte, C2B)
addPrimitives(CharClass, nme.toShort, C2S)
addPrimitives(CharClass, nme.toChar, C2C)
addPrimitives(CharClass, nme.toInt, C2I)
addPrimitives(CharClass, nme.toLong, C2L)
// unary methods
addPrimitives(CharClass, nme.UNARY_+, POS)
addPrimitives(CharClass, nme.UNARY_-, NEG)
addPrimitives(CharClass, nme.UNARY_~, NOT)
addPrimitives(CharClass, nme.toFloat, C2F)
addPrimitives(CharClass, nme.toDouble, C2D)
// scala.Int
lazy val IntClass = defn.IntClass
addPrimitives(IntClass, nme.EQ, EQ)
addPrimitives(IntClass, nme.NE, NE)
addPrimitives(IntClass, nme.ADD, ADD)
addPrimitives(IntClass, nme.SUB, SUB)
addPrimitives(IntClass, nme.MUL, MUL)
addPrimitives(IntClass, nme.DIV, DIV)
addPrimitives(IntClass, nme.MOD, MOD)
addPrimitives(IntClass, nme.LT, LT)
addPrimitives(IntClass, nme.LE, LE)
addPrimitives(IntClass, nme.GT, GT)
addPrimitives(IntClass, nme.GE, GE)
addPrimitives(IntClass, nme.XOR, XOR)
addPrimitives(IntClass, nme.OR, OR)
addPrimitives(IntClass, nme.AND, AND)
addPrimitives(IntClass, nme.LSL, LSL)
addPrimitives(IntClass, nme.LSR, LSR)
addPrimitives(IntClass, nme.ASR, ASR)
// conversions
addPrimitives(IntClass, nme.toByte, I2B)
addPrimitives(IntClass, nme.toShort, I2S)
addPrimitives(IntClass, nme.toChar, I2C)
addPrimitives(IntClass, nme.toInt, I2I)
addPrimitives(IntClass, nme.toLong, I2L)
// unary methods
addPrimitives(IntClass, nme.UNARY_+, POS)
addPrimitives(IntClass, nme.UNARY_-, NEG)
addPrimitives(IntClass, nme.UNARY_~, NOT)
addPrimitives(IntClass, nme.toFloat, I2F)
addPrimitives(IntClass, nme.toDouble, I2D)
// scala.Long
lazy val LongClass = defn.LongClass
addPrimitives(LongClass, nme.EQ, EQ)
addPrimitives(LongClass, nme.NE, NE)
addPrimitives(LongClass, nme.ADD, ADD)
addPrimitives(LongClass, nme.SUB, SUB)
addPrimitives(LongClass, nme.MUL, MUL)
addPrimitives(LongClass, nme.DIV, DIV)
addPrimitives(LongClass, nme.MOD, MOD)
addPrimitives(LongClass, nme.LT, LT)
addPrimitives(LongClass, nme.LE, LE)
addPrimitives(LongClass, nme.GT, GT)
addPrimitives(LongClass, nme.GE, GE)
addPrimitives(LongClass, nme.XOR, XOR)
addPrimitives(LongClass, nme.OR, OR)
addPrimitives(LongClass, nme.AND, AND)
addPrimitives(LongClass, nme.LSL, LSL)
addPrimitives(LongClass, nme.LSR, LSR)
addPrimitives(LongClass, nme.ASR, ASR)
// conversions
addPrimitives(LongClass, nme.toByte, L2B)
addPrimitives(LongClass, nme.toShort, L2S)
addPrimitives(LongClass, nme.toChar, L2C)
addPrimitives(LongClass, nme.toInt, L2I)
addPrimitives(LongClass, nme.toLong, L2L)
// unary methods
addPrimitives(LongClass, nme.UNARY_+, POS)
addPrimitives(LongClass, nme.UNARY_-, NEG)
addPrimitives(LongClass, nme.UNARY_~, NOT)
addPrimitives(LongClass, nme.toFloat, L2F)
addPrimitives(LongClass, nme.toDouble, L2D)
// scala.Float
lazy val FloatClass = defn.FloatClass
addPrimitives(FloatClass, nme.EQ, EQ)
addPrimitives(FloatClass, nme.NE, NE)
addPrimitives(FloatClass, nme.ADD, ADD)
addPrimitives(FloatClass, nme.SUB, SUB)
addPrimitives(FloatClass, nme.MUL, MUL)
addPrimitives(FloatClass, nme.DIV, DIV)
addPrimitives(FloatClass, nme.MOD, MOD)
addPrimitives(FloatClass, nme.LT, LT)
addPrimitives(FloatClass, nme.LE, LE)
addPrimitives(FloatClass, nme.GT, GT)
addPrimitives(FloatClass, nme.GE, GE)
// conversions
addPrimitives(FloatClass, nme.toByte, F2B)
addPrimitives(FloatClass, nme.toShort, F2S)
addPrimitives(FloatClass, nme.toChar, F2C)
addPrimitives(FloatClass, nme.toInt, F2I)
addPrimitives(FloatClass, nme.toLong, F2L)
addPrimitives(FloatClass, nme.toFloat, F2F)
addPrimitives(FloatClass, nme.toDouble, F2D)
// unary methods
addPrimitives(FloatClass, nme.UNARY_+, POS)
addPrimitives(FloatClass, nme.UNARY_-, NEG)
// scala.Double
lazy val DoubleClass = defn.DoubleClass
addPrimitives(DoubleClass, nme.EQ, EQ)
addPrimitives(DoubleClass, nme.NE, NE)
addPrimitives(DoubleClass, nme.ADD, ADD)
addPrimitives(DoubleClass, nme.SUB, SUB)
addPrimitives(DoubleClass, nme.MUL, MUL)
addPrimitives(DoubleClass, nme.DIV, DIV)
addPrimitives(DoubleClass, nme.MOD, MOD)
addPrimitives(DoubleClass, nme.LT, LT)
addPrimitives(DoubleClass, nme.LE, LE)
addPrimitives(DoubleClass, nme.GT, GT)
addPrimitives(DoubleClass, nme.GE, GE)
// conversions
addPrimitives(DoubleClass, nme.toByte, D2B)
addPrimitives(DoubleClass, nme.toShort, D2S)
addPrimitives(DoubleClass, nme.toChar, D2C)
addPrimitives(DoubleClass, nme.toInt, D2I)
addPrimitives(DoubleClass, nme.toLong, D2L)
addPrimitives(DoubleClass, nme.toFloat, D2F)
addPrimitives(DoubleClass, nme.toDouble, D2D)
// unary methods
addPrimitives(DoubleClass, nme.UNARY_+, POS)
addPrimitives(DoubleClass, nme.UNARY_-, NEG)
primitives.toMap
}
def isPrimitive(fun: Tree): Boolean = {
(primitives contains fun.symbol(ctx)) ||
(fun.symbol(ctx) == NoSymbol // the only trees that do not have a symbol assigned are array.{update,select,length,clone}}
&& (fun match {
case Select(_, StdNames.nme.clone_) => false // but array.clone is NOT a primitive op.
case _ => true
}))
}
}
| densh/dotty | src/dotty/tools/backend/jvm/scalaPrimitives.scala | Scala | bsd-3-clause | 15,295 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.detailed.filters.csp
import java.nio.charset.Charset
import java.nio.charset.StandardCharsets
import java.security.MessageDigest
import java.util.Base64
// #csp-hash-generator
class CSPHashGenerator(digestAlgorithm: String) {
private val digestInstance: MessageDigest = {
digestAlgorithm match {
case "sha256" =>
MessageDigest.getInstance("SHA-256")
case "sha384" =>
MessageDigest.getInstance("SHA-384")
case "sha512" =>
MessageDigest.getInstance("SHA-512")
}
}
def generateUTF8(str: String): String = {
generate(str, StandardCharsets.UTF_8)
}
def generate(str: String, charset: Charset): String = {
val bytes = str.getBytes(charset)
encode(digestInstance.digest(bytes))
}
protected def encode(digestBytes: Array[Byte]): String = {
val rawHash = Base64.getMimeEncoder.encodeToString(digestBytes)
s"'$digestAlgorithm-$rawHash'"
}
}
// #csp-hash-generator
| benmccann/playframework | documentation/manual/working/commonGuide/filters/code/scalaguide/detailed/filters/csp/CSPHashGenerator.scala | Scala | apache-2.0 | 1,032 |
package com.wavesplatform.state.diffs
import cats.instances.list._
import cats.instances.map._
import cats.syntax.semigroup._
import cats.syntax.traverse._
import com.wavesplatform.account.Address
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.state._
import com.wavesplatform.transaction.Asset.{IssuedAsset, Waves}
import com.wavesplatform.transaction.TxValidationError.{GenericError, Validation}
import com.wavesplatform.transaction.transfer.MassTransferTransaction.ParsedTransfer
import com.wavesplatform.transaction.transfer._
object MassTransferTransactionDiff {
def apply(blockchain: Blockchain, blockTime: Long)(tx: MassTransferTransaction): Either[ValidationError, Diff] = {
def parseTransfer(xfer: ParsedTransfer): Validation[(Map[Address, Portfolio], Long)] = {
for {
recipientAddr <- blockchain.resolveAlias(xfer.address)
portfolio = tx.assetId
.fold(Map(recipientAddr -> Portfolio(xfer.amount, LeaseBalance.empty, Map.empty))) { asset =>
Map(recipientAddr -> Portfolio(0, LeaseBalance.empty, Map(asset -> xfer.amount)))
}
} yield (portfolio, xfer.amount)
}
val portfoliosEi = tx.transfers.toList.traverse(parseTransfer)
portfoliosEi.flatMap { list: List[(Map[Address, Portfolio], Long)] =>
val sender = Address.fromPublicKey(tx.sender)
val foldInit = (Map(sender -> Portfolio(-tx.fee, LeaseBalance.empty, Map.empty)), 0L)
val (recipientPortfolios, totalAmount) = list.fold(foldInit) { (u, v) =>
(u._1 combine v._1, u._2 + v._2)
}
val completePortfolio =
recipientPortfolios
.combine(
tx.assetId
.fold(Map(sender -> Portfolio(-totalAmount, LeaseBalance.empty, Map.empty))) { asset =>
Map(sender -> Portfolio(0, LeaseBalance.empty, Map(asset -> -totalAmount)))
}
)
val assetIssued = tx.assetId match {
case Waves => true
case asset @ IssuedAsset(_) => blockchain.assetDescription(asset).isDefined
}
Either.cond(
assetIssued,
Diff(portfolios = completePortfolio, scriptsRun = DiffsCommon.countScriptRuns(blockchain, tx)),
GenericError(s"Attempt to transfer a nonexistent asset")
)
}
}
}
| wavesplatform/Waves | node/src/main/scala/com/wavesplatform/state/diffs/MassTransferTransactionDiff.scala | Scala | mit | 2,315 |
package org.jetbrains.plugins.scala.lang.refactoring.util
import java.awt.Point
import javax.swing.SwingUtilities
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.ui.MessageType
import com.intellij.openapi.ui.popup.{Balloon, JBPopupFactory}
import com.intellij.psi.PsiElement
import com.intellij.refactoring.ui.ConflictsDialog
import com.intellij.ui.awt.RelativePoint
import com.intellij.util.containers.MultiMap
/**
* @author Alexander Podkhalyuzin
*/
trait ConflictsReporter {
def reportConflicts(project: Project, conflicts: MultiMap[PsiElement, String]): Boolean
}
trait EmptyConflictsReporter extends ConflictsReporter {
override def reportConflicts(project: Project, conflicts: MultiMap[PsiElement, String]) = false
}
trait DialogConflictsReporter extends ConflictsReporter {
override def reportConflicts(project: Project, conflicts: MultiMap[PsiElement, String]): Boolean = {
val conflictsDialog = new ConflictsDialog(project, conflicts, null, true, false)
conflictsDialog.show()
conflictsDialog.isOK
}
}
class BalloonConflictsReporter(editor: Editor) extends ConflictsReporter {
def reportConflicts(project: Project, conflicts: MultiMap[PsiElement, String]): Boolean = {
val messages = conflicts.values().toArray.toSet
createWarningBalloon(messages.mkString("\\n"))
true //this means that we do nothing, only show balloon
}
private def createWarningBalloon(message: String): Unit = {
SwingUtilities invokeLater new Runnable {
def run(): Unit = {
val popupFactory = JBPopupFactory.getInstance
val bestLocation = popupFactory.guessBestPopupLocation(editor)
val screenPoint: Point = bestLocation.getScreenPoint
val y: Int = screenPoint.y - editor.getLineHeight * 2
val builder = popupFactory.createHtmlTextBalloonBuilder(message, null, MessageType.WARNING.getPopupBackground, null)
val balloon: Balloon = builder.setFadeoutTime(-1).setShowCallout(false).createBalloon
balloon.show(new RelativePoint(new Point(screenPoint.x, y)), Balloon.Position.above)
}
}
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/util/ConflictsReporter.scala | Scala | apache-2.0 | 2,156 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, Every, One, Many, Prettifier}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
import exceptions.TestFailedException
class EveryShouldContainInOrderElementsOfLogicalAndSpec extends FunSpec {
private val prettifier = Prettifier.default
//ADDITIONAL//
val invertedListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = a != b
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = upperCase(a) == upperCase(b)
}
private def upperCase(value: Any): Any =
value match {
case l: Every[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val upperCaseListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = upperCase(a) == upperCase(b)
}
val fileName: String = "EveryShouldContainInOrderElementsOfLogicalAndSpec.scala"
describe("an Every") {
val fumList: Every[String] = Every("fex", "fum", "fum", "foe", "fie", "fie", "fee")
val toList: Every[String] = Every("too", "you", "you", "to", "birthday", "happy", "happy")
describe("when used with (contain inOrderElementsOf xx and contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (contain inOrderElementsOf Seq("fum", "foe", "fie", "fee") and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("fee", "fie", "foe", "fum") and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("fum", "foe", "fie", "fee") and contain inOrderElementsOf Seq("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("fum", "foe", "fie", "fee")) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM") and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM") and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
(fumList should (contain inOrderElementsOf Seq(" FUM ", " FOE ", " FIE ", " FEE ") and contain inOrderElementsOf Seq(" FUM ", " FOE ", " FIE ", " FEE "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (contain inOrderElementsOf Seq("fum", "fum", "foe", "fie", "fee") and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
fumList should (contain inOrderElementsOf Seq("fum", "foe", "fie", "fee") and contain inOrderElementsOf Seq("fum", "fum", "foe", "fie", "fee"))
}
}
describe("when used with (equal xx and contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (equal (fumList) and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
}
checkMessageStackDepth(e1, FailureMessages.didNotEqual(prettifier, fumList, toList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (equal (fumList) and contain inOrderElementsOf Seq("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, FailureMessages.equaled(prettifier, fumList, fumList) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))
}
checkMessageStackDepth(e1, FailureMessages.didNotEqual(prettifier, fumList, toList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (equal (fumList) and (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FAM")))
}
checkMessageStackDepth(e2, FailureMessages.equaled(prettifier, fumList, fumList) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FAM")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (equal (toList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (toList) and contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.equaled(prettifier, fumList, toList) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (equal (fumList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.didNotEqual(prettifier, fumList, fumList), fileName, thisLineNumber - 2)
(fumList should (equal (toList) and contain inOrderElementsOf Seq(" FUM ", " FOE ", " FIE ", " FEE "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (equal (fumList) and contain inOrderElementsOf Seq("fum", "fum", "foe", "fie", "fee"))
}
}
describe("when used with (be xx and contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (be (fumList) and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
val e1 = intercept[TestFailedException] {
fumList should (be (toList) and contain inOrderElementsOf Seq("fum", "foe", "fie", "fee"))
}
checkMessageStackDepth(e1, FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (be (fumList) and contain inOrderElementsOf Seq("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e2, FailureMessages.wasEqualTo(prettifier, fumList, fumList) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (be (fumList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))
val e1 = intercept[TestFailedException] {
fumList should (be (toList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))
}
checkMessageStackDepth(e1, FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (be (fumList) and (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e2, FailureMessages.wasEqualTo(prettifier, fumList, fumList) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (be (fumList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be (fumList) and contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.wasEqualTo(prettifier, fumList, fumList) + ", but " + FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (be (toList) and contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
(fumList should (be (fumList) and contain inOrderElementsOf Seq(" FUM ", " FOE ", " FIE ", " FEE "))) (after being lowerCased and trimmed)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (be (fumList) and contain inOrderElementsOf Seq("fum", "fum", "foe", "fie", "fee"))
}
}
describe("when used with (contain inOrderElementsOf xx and be xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (contain inOrderElementsOf Seq("fum", "foe", "fie", "fee") and be (fumList))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("fum", "foe", "fie", "fee") and be (toList))
}
checkMessageStackDepth(e1, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("fum", "foe", "fie", "fee")) + ", but " + FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("fee", "fie", "foe", "fum") and be (fumList))
}
checkMessageStackDepth(e2, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and be (fumList))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM") and be (toList))
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (contain inOrderElementsOf Seq("HAPPY", "BIRTHDAY", "TO", "YOU") and (be (fumList)))
}
checkMessageStackDepth(e2, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("HAPPY", "BIRTHDAY", "TO", "YOU")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and be (fumList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain inOrderElementsOf Seq("FEE", "FIE", "FOE", "FUM") and be (fumList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (contain inOrderElementsOf Seq("FUM", "FOE", "FIE", "FEE") and be (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")) + ", but " + FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
(fumList should (contain inOrderElementsOf Seq(" FUM ", " FOE ", " FIE ", " FEE ") and be (fumList))) (after being lowerCased and trimmed)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (contain inOrderElementsOf Seq("fum", "fum", "foe", "fie", "fee") and be (fumList))
}
}
describe("when used with (not contain inOrderElementsOf xx and not contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")))
val e1 = intercept[TestFailedException] {
fumList should (not contain inOrderElementsOf (Seq("fum", "foe", "fie", "fee")) and not contain inOrderElementsOf (Seq("happy", "birthday", "to", "you")))
}
checkMessageStackDepth(e1, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("fum", "foe", "fie", "fee")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")) and not contain inOrderElementsOf (Seq("fum", "foe", "fie", "fee")))
}
checkMessageStackDepth(e2, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("fee", "fie", "foe", "fum")) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("fum", "foe", "fie", "fee")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
val e1 = intercept[TestFailedException] {
fumList should (not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE")) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) and (not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE"))))
}
checkMessageStackDepth(e2, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) and not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAllElementsOfInOrder(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE")) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fie", "fum")) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")))
fumList should (not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fie", "fum")))
}
}
describe("when used with (not equal xx and not contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (not equal (toList) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, FailureMessages.equaled(prettifier, fumList, fumList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not equal (toList) and not contain inOrderElementsOf (Seq("fum", "foe", "fie", "fee")))
}
checkMessageStackDepth(e2, FailureMessages.didNotEqual(prettifier, fumList, toList) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("fum", "foe", "fie", "fee")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, FailureMessages.equaled(prettifier, fumList, fumList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not equal (toList) and (not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE"))))
}
checkMessageStackDepth(e2, FailureMessages.didNotEqual(prettifier, fumList, toList) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (not equal (fumList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (fumList) and not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.didNotEqual(prettifier, fumList, fumList) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not equal (toList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.equaled(prettifier, fumList, toList), fileName, thisLineNumber - 2)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (not equal (toList) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fie", "fum")))
}
}
describe("when used with (not be xx and not contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (not be (toList) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")))
val e1 = intercept[TestFailedException] {
fumList should (not be (fumList) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fum")))
}
checkMessageStackDepth(e1, FailureMessages.wasEqualTo(prettifier, fumList, fumList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not be (toList) and not contain inOrderElementsOf (Seq("fum", "foe", "fie", "fee")))
}
checkMessageStackDepth(e2, FailureMessages.wasNotEqualTo(prettifier, fumList, toList) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("fum", "foe", "fie", "fee")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (not be (toList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
val e1 = intercept[TestFailedException] {
fumList should (not be (fumList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, FailureMessages.wasEqualTo(prettifier, fumList, fumList), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
fumList should (not be (toList) and (not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE"))))
}
checkMessageStackDepth(e2, FailureMessages.wasNotEqualTo(prettifier, fumList, toList) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (not be (toList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be (toList) and not contain inOrderElementsOf (Seq("FUM", "FOE", "FIE", "FEE")))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.wasNotEqualTo(prettifier, fumList, toList) + ", but " + FailureMessages.containedAllElementsOfInOrder(prettifier, fumList, Seq("FUM", "FOE", "FIE", "FEE")), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(fumList should (not be (fumList) and not contain inOrderElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, FailureMessages.wasEqualTo(prettifier, fumList, fumList), fileName, thisLineNumber - 2)
(fumList should (not contain inOrderElementsOf (Seq(" FEE ", " FIE ", " FOE ", " FUU ")) and not contain inOrderElementsOf (Seq(" FEE ", " FIE ", " FOE ", " FUU ")))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
it("should do nothing when RHS contain duplicated value") {
fumList should (not be (toList) and not contain inOrderElementsOf (Seq("fee", "fie", "foe", "fie", "fum")))
}
}
}
describe("every of Everys") {
val list1s: Every[Every[Int]] = Every(Every(0, 1, 2, 2, 3), Every(0, 1, 2, 2, 3), Every(0, 1, 2, 2, 3))
val lists: Every[Every[Int]] = Every(Every(0, 1, 2, 3, 3), Every(0, 1, 2, 3, 3), Every(8, 2, 3, 4))
val hiLists: Every[Every[String]] = Every(Every("he", "hi", "hello"), Every("he", "hi", "hello"), Every("he", "hi", "hello"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \\n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \\n" +
"in " + decorateToStringValue(prettifier, left)
describe("used with contain inOrderElementsOf xx and contain inOrderElementsOf xx") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (contain inOrderElementsOf Seq(1, 2, 3) and contain inOrderElementsOf Seq(1, 2, 3))
atLeast (2, lists) should (contain inOrderElementsOf Seq(1, 2, 3) and contain inOrderElementsOf Seq(1, 2, 3))
atMost (2, lists) should (contain inOrderElementsOf Seq(1, 2, 3) and contain inOrderElementsOf Seq(1, 2, 3))
no (lists) should (contain inOrderElementsOf Seq(3, 6, 9) and contain inOrderElementsOf Seq(3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (contain inOrderElementsOf Seq(1, 2, 3) and contain inOrderElementsOf Seq(1, 2, 3))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(prettifier, lists(2)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq(1, 2, 3)) + " in order", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (list1s) should (contain inOrderElementsOf Seq(1, 2, 3) and contain inOrderElementsOf Seq(1, 3, 4))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, list1s(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq(1, 2, 3)) + " in order" + ", but " + decorateToStringValue(prettifier, list1s(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq(1, 3, 4)) + " in order", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (contain inOrderElementsOf Seq("hi", "hello") and contain inOrderElementsOf Seq("hello", "hi"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("hi", "hello")) + " in order" + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("hello", "hi")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain inOrderElementsOf Seq("HI", "HELLO") and contain inOrderElementsOf Seq("HI", "HELLO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain inOrderElementsOf Seq("HO", "HELLO") and contain inOrderElementsOf Seq("HI", "HELLO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HO", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (contain inOrderElementsOf Seq("HI", "HELLO") and contain inOrderElementsOf Seq("HELLO", "HI"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order" + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HELLO", "HI")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (contain inOrderElementsOf Seq("HI", "HELLO") and contain inOrderElementsOf Seq("HI", "HELLO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain inOrderElementsOf Seq("HO", "HELLO") and contain inOrderElementsOf Seq("HI", "HELLO"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HO", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (contain inOrderElementsOf Seq("HI", "HELLO") and contain inOrderElementsOf Seq("HELLO", "HI"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order" + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HELLO", "HI")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should do nothing when RHS contain duplicated value") {
all (list1s) should (contain inOrderElementsOf Seq(1, 2, 2, 3) and contain inOrderElementsOf Seq(1, 2, 3))
all (list1s) should (contain inOrderElementsOf Seq(1, 2, 3) and contain inOrderElementsOf Seq(1, 2, 2, 3))
}
}
describe("when used with (be xx and contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (be (Many(0, 1, 2, 2, 3)) and contain inOrderElementsOf Seq(1, 2, 3))
atLeast (2, lists) should (be (Many(0, 1, 2, 3, 3)) and contain inOrderElementsOf Seq(1, 2, 3))
atMost (2, lists) should (be (Many(3, 2, 1)) and contain inOrderElementsOf Seq(1, 2, 3))
no (lists) should (be (Many(3, 6, 9)) and contain inOrderElementsOf Seq(3, 4, 5))
val e1 = intercept[TestFailedException] {
all (lists) should (be (Many(0, 1, 2, 3, 3)) and contain inOrderElementsOf Seq(1, 2, 3))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(prettifier, lists(2)) + " was not equal to " + decorateToStringValue(prettifier, Many(0, 1, 2, 3, 3)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (list1s) should (be (Many(0, 1, 2, 2, 3)) and contain inOrderElementsOf Seq(2, 3, 8))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, list1s(0)) + " was equal to " + decorateToStringValue(prettifier, Many(0, 1, 2, 2, 3)) + ", but " + decorateToStringValue(prettifier, list1s(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq(2, 3, 8)) + " in order", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (be (Many("he", "hi", "hello")) and contain inOrderElementsOf Seq("hello", "hi"))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was equal to " + decorateToStringValue(prettifier, Many("he", "hi", "hello")) + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("hello", "hi")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (list1s) should (be (Many(0, 1, 2, 2, 3)) and contain inOrderElementsOf Seq(2, 3, 8))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(prettifier, list1s(0)) + " was equal to " + decorateToStringValue(prettifier, Many(0, 1, 2, 2, 3)) + ", but " + decorateToStringValue(prettifier, list1s(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq(2, 3, 8)) + " in order", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be (Many("he", "hi", "hello")) and contain inOrderElementsOf Seq("HI", "HELLO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be (Many("HI", "HELLO")) and contain inOrderElementsOf Seq("HI", "HELLO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was not equal to " + decorateToStringValue(prettifier, Many("HI", "HELLO")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (be (Many("he", "hi", "hello")) and contain inOrderElementsOf Seq("HELLO", "HI"))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was equal to " + decorateToStringValue(prettifier, Many("he", "hi", "hello")) + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HELLO", "HI")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (be (Many("he", "hi", "hello")) and contain inOrderElementsOf Seq("HI", "HELLO"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be (Many("HI", "HELLO")) and contain inOrderElementsOf Seq("HI", "HELLO"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was not equal to " + decorateToStringValue(prettifier, Many("HI", "HELLO")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (be (Many("he", "hi", "hello")) and contain inOrderElementsOf Seq("HELLO", "HI"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was equal to " + decorateToStringValue(prettifier, Many("he", "hi", "hello")) + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HELLO", "HI")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should do nothing when RHS contain duplicated value") {
all (list1s) should (be (Many(0, 1, 2, 2, 3)) and contain inOrderElementsOf Seq(1, 2, 2, 3))
}
}
describe("when used with (not contain inOrderElementsOf xx and not contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (not contain inOrderElementsOf (Seq(3, 2, 8)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
atLeast (2, lists) should (not contain inOrderElementsOf (Seq(3, 8, 5)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
atMost (2, lists) should (not contain inOrderElementsOf (Seq(2, 4, 3)) and contain inOrderElementsOf (Seq(2, 3, 4)))
no (list1s) should (not contain inOrderElementsOf (Seq(1, 2, 3)) and not contain inOrderElementsOf (Seq(1, 2, 3)))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain inOrderElementsOf (Seq(2, 3, 4)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(prettifier, lists(2)) + " contained all elements of " + decorateToStringValue(prettifier, Seq(2, 3, 4)) + " in order", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (not contain inOrderElementsOf (Seq(3, 6, 8)) and not contain inOrderElementsOf (Seq(2, 3, 4)))
}
checkMessageStackDepth(e2, allErrMsg(2, decorateToStringValue(prettifier, lists(2)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq(3, 6, 8)) + " in order" + ", but " + decorateToStringValue(prettifier, lists(2)) + " contained all elements of " + decorateToStringValue(prettifier, Seq(2, 3, 4)) + " in order", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (not contain inOrderElementsOf (Seq("hi", "hello")) and not contain inOrderElementsOf (Seq("ho", "hey", "howdy")))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("hi", "hello")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (not contain inOrderElementsOf (Seq("ho", "hey", "howdy")) and not contain inOrderElementsOf (Seq("hi", "hello")))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("ho", "hey", "howdy")) + " in order" + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("hi", "hello")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain inOrderElementsOf (Seq("HELLO", "HI")) and not contain inOrderElementsOf (Seq("HELLO", "HO")))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain inOrderElementsOf (Seq("HI", "HELLO")) and not contain inOrderElementsOf (Seq("HO", "HE")))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (not contain inOrderElementsOf (Seq("HELLO", "HI")) and not contain inOrderElementsOf (Seq("HI", "HELLO")))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HELLO", "HI")) + " in order" + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (not contain inOrderElementsOf (Seq("HELLO", "HI")) and not contain inOrderElementsOf (Seq("HELLO", "HO")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain inOrderElementsOf (Seq("HI", "HELLO")) and not contain inOrderElementsOf (Seq("HO", "HE")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (not contain inOrderElementsOf (Seq("HELLO", "HI")) and not contain inOrderElementsOf (Seq("HI", "HELLO")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " did not contain all elements of " + decorateToStringValue(prettifier, Seq("HELLO", "HI")) + " in order" + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should do nothing when RHS contain duplicated value") {
all (list1s) should (not contain inOrderElementsOf (Seq(1, 3, 3, 8)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
all (list1s) should (not contain inOrderElementsOf (Seq(1, 3, 3, 8)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
}
}
describe("when used with (not be xx and not contain inOrderElementsOf xx)") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (not be (One(2)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
atLeast (2, lists) should (not be (One(3)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
atMost (2, lists) should (not be (Many(8, 2, 3, 4)) and not contain inOrderElementsOf (Seq(2, 3, 4)))
no (list1s) should (not be (Many(0, 1, 2, 2, 3)) and not contain inOrderElementsOf (Seq(1, 2, 3)))
val e1 = intercept[TestFailedException] {
all (lists) should (not be (Many(8, 2, 3, 4)) and not contain inOrderElementsOf (Seq(8, 3, 4)))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(prettifier, lists(2)) + " was equal to " + decorateToStringValue(prettifier, Many(8, 2, 3, 4)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (lists) should (not be (One(3)) and not contain inOrderElementsOf (Seq(2, 3, 4)))
}
checkMessageStackDepth(e2, allErrMsg(2, decorateToStringValue(prettifier, lists(2)) + " was not equal to " + decorateToStringValue(prettifier, One(3)) + ", but " + decorateToStringValue(prettifier, lists(2)) + " contained all elements of " + decorateToStringValue(prettifier, Seq(2, 3, 4)) + " in order", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
val e3 = intercept[TestFailedException] {
all (hiLists) should (not be (Many("he", "hi", "hello")) and not contain inOrderElementsOf (Seq("ho", "hey", "howdy")))
}
checkMessageStackDepth(e3, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was equal to " + decorateToStringValue(prettifier, Many("he", "hi", "hello")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e4 = intercept[TestFailedException] {
all (hiLists) should (not be (One("ho")) and not contain inOrderElementsOf (Seq("hi", "hello")))
}
checkMessageStackDepth(e4, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was not equal to " + decorateToStringValue(prettifier, One("ho")) + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("hi", "hello")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be (One("ho")) and not contain inOrderElementsOf (Seq("HO", "HELLO")))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be (Many("he", "hi", "hello")) and not contain inOrderElementsOf (Seq("HELLO", "HI")))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was equal to " + decorateToStringValue(prettifier, Many("he", "hi", "hello")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
all (hiLists) should (not be (One("ho")) and not contain inOrderElementsOf (Seq("HI", "HELLO")))
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was not equal to " + decorateToStringValue(prettifier, One("ho")) + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (not be (One("ho")) and not contain inOrderElementsOf (Seq("HO", "HELLO")))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be (Many("he", "hi", "hello")) and not contain inOrderElementsOf (Seq("HELLO", "HI")))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was equal to " + decorateToStringValue(prettifier, Many("he", "hi", "hello")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
val e2 = intercept[TestFailedException] {
(all (hiLists) should (not be (One("ho")) and not contain inOrderElementsOf (Seq("HI", "HELLO")))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e2, allErrMsg(0, decorateToStringValue(prettifier, hiLists(0)) + " was not equal to " + decorateToStringValue(prettifier, One("ho")) + ", but " + decorateToStringValue(prettifier, hiLists(0)) + " contained all elements of " + decorateToStringValue(prettifier, Seq("HI", "HELLO")) + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should do nothing when RHS contain duplicated value") {
all (list1s) should (not be (One(2)) and not contain inOrderElementsOf (Seq(1, 3, 3, 8)))
}
}
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/EveryShouldContainInOrderElementsOfLogicalAndSpec.scala | Scala | apache-2.0 | 49,551 |
package org.npmaven.model
import net.liftweb.http.LiftRules
import org.specs2.mutable.Specification
object BowerSpecs extends Specification {
"Bower object" should {
val angular = Bower("angular", "1.3.14", "./angular.js")
"extract the angular.json to a case class via String" in {
val json = LiftRules.loadResourceAsString("/org/npmaven/model/bower.json")
val bower = json.flatMap(Bower(_))
bower should be equalTo(Some(angular))
}
"extract the angular.json to a case class via bytes" in {
val jsonBytes = LiftRules.loadResource("/org/npmaven/model/bower.json")
val bower = jsonBytes.flatMap(Bower(_))
bower should be equalTo(Some(angular))
}
"extract a None if missing the main" in {
val json = """{
"name":"yo-momma",
"version":"42"
}"""
Bower(json) should be equalTo(None)
}
"extract a None if the json is just garbage" in {
val json = "garbage"
Bower(json) should be equalTo(None)
}
}
}
| npmaven/npmaven | src/test/scala/org/npmaven/model/BowerSpecs.scala | Scala | apache-2.0 | 1,024 |
package com.etsy.conjecture.scalding.train
import cascading.flow._
import cascading.operation._
import cascading.pipe._
import cascading.pipe.joiner.InnerJoin
import com.etsy.conjecture.data._
import com.etsy.conjecture.model._
import com.twitter.scalding._
class LargeModelTrainer[L <: Label, M <: UpdateableModel[L, M]](strategy: ModelTrainerStrategy[L, M], training_bins: Int) extends AbstractModelTrainer[L, M] {
import Dsl._
def train(instances: Pipe, instanceField: Symbol = 'instance, modelField: Symbol = 'model): Pipe = {
trainRecursively(None, modelField, binTrainingData(instances, instanceField), instanceField, strategy.getIters)
}
def reTrain(instances: Pipe, instanceField: Symbol, model: Pipe, modelField: Symbol): Pipe = {
throw new UnsupportedOperationException("not implemented due to expensiveness of model duplication")
}
def binTrainingData(instances: Pipe, instanceField: Symbol): Pipe = {
instances
.project(instanceField)
.map(instanceField -> 'bin) { b: LabeledInstance[L] => b.hashCode % training_bins }
}
// This implements a full iteration of training, ending with a pipe with a model.
protected def trainIteration(modelPipe: Option[Pipe], modelField: Symbol, instancePipe: Pipe, instanceField: Symbol): Pipe = {
val iterationField = '__iteration__
val modelCountField = '__model_count__
// Subsample instances.
val subsampled = instancePipe.filter(instanceField) { i: LabeledInstance[L] => math.random < strategy.sampleProb(i.getLabel) }
// Get models on each mapper.
(modelPipe match {
case Some(pipe) => subsampled.joinWithSmaller('bin -> 'bin, pipe, new InnerJoin(), training_bins)
case _ => subsampled.map(instanceField -> (instanceField, modelField)) { x: LabeledInstance[L] => (x, strategy.getModel) }
})
// Count iteration numbers.
.insert(iterationField, 0)
.insert(modelCountField, 1)
// Convert instances to instance list.
.map(instanceField -> instanceField) { i: LabeledInstance[L] => List(i) }
// Perform map-side aggregation of models, which are then sent to a single reduce node for merging.
.groupBy('bin) {
_.reduce[(M, List[LabeledInstance[L]], Int, Int)](
(modelField, instanceField, iterationField, modelCountField) -> (modelField, instanceField, iterationField, modelCountField))(strategy.modelReduceFunction)
.reducers(training_bins)
}
.mapTo((modelField, iterationField) -> modelField) { x: (M, Int) => strategy.endIteration(x._1, x._2, training_bins) }
// flatten submodels and aggregate on different reducers.
.flatMapTo(modelField -> ('param, 'value)) { m: M =>
println("epoch: " + m.getEpoch)
m.setParameter("__epoch__", m.getEpoch)
new Iterable[(String, Double)]() {
def iterator() = {
new Iterator[(String, Double)]() {
val it = m.decompose
def hasNext: Boolean = { it.hasNext }
def next: (String, Double) = { val e = it.next(); (e.getKey, e.getValue) }
}
}
}
}
.groupBy('param) { _.sum[Double]('value).forceToReducers }
// Duplicate the summed parameters rather than duplicating the reconstructed model, for speed reasons.
.flatMapTo(('param, 'value) -> ('bin, 'param, 'value)) {
b: (String, Double) =>
(0 until training_bins).map { i => (i, b._1, b._2) }
}
// Reconstruct the model for each bin. Uses a hacked on Scalding operator due to kryo serialization not supporting copy().
.groupBy('bin) {
_.every {
pipe =>
new Every(
pipe,
('param, 'value),
new FoldAggregator[(String, Double), M](
{
(model: M, param: (String, Double)) =>
if (param._1 == "__epoch__") {
val epoch = (param._2 / training_bins).toLong
println("epoch: " + epoch)
model.setEpoch(epoch)
} else {
model.setParameter(param._1, param._2)
}
model
},
strategy.getModel,
modelField,
implicitly[TupleConverter[(String, Double)]],
implicitly[TupleSetter[M]]) {
override def start(flowProcess: FlowProcess[_], call: AggregatorCall[M]) = call.setContext(strategy.getModel)
})
}
.reducers(training_bins)
}
.project('bin, modelField)
}
protected def trainRecursively(modelPipe: Option[Pipe], modelField: Symbol, instancePipe: Pipe, instanceField: Symbol, iterations: Int): Pipe = {
val updatedPipe = trainIteration(modelPipe, modelField, instancePipe, instanceField)
if (iterations == 1) {
updatedPipe.filter('bin) { b: Int => b == 0 }.mapTo(modelField -> modelField) { strategy.modelPostProcess }.groupAll { _.pass }
} else {
trainRecursively(Some(updatedPipe), modelField, instancePipe, instanceField, iterations - 1)
}
}
}
| mathkann/Conjecture | src/main/scala/com/etsy/conjecture/scalding/train/LargeModelTrainer.scala | Scala | mit | 5,961 |
package org.openapitools.models
import io.circe._
import io.finch.circe._
import io.circe.generic.semiauto._
import io.circe.java8.time._
import org.openapitools._
import org.openapitools.models.HudsonMasterComputerexecutors
import org.openapitools.models.HudsonMasterComputermonitorData
import org.openapitools.models.Label1
import scala.collection.immutable.Seq
/**
*
* @param Underscoreclass
* @param displayName
* @param executors
* @param icon
* @param iconClassName
* @param idle
* @param jnlpAgent
* @param launchSupported
* @param loadStatistics
* @param manualLaunchAllowed
* @param monitorData
* @param numExecutors
* @param offline
* @param offlineCause
* @param offlineCauseReason
* @param temporarilyOffline
*/
case class HudsonMasterComputer(Underscoreclass: Option[String],
displayName: Option[String],
executors: Option[Seq[HudsonMasterComputerexecutors]],
icon: Option[String],
iconClassName: Option[String],
idle: Option[Boolean],
jnlpAgent: Option[Boolean],
launchSupported: Option[Boolean],
loadStatistics: Option[Label1],
manualLaunchAllowed: Option[Boolean],
monitorData: Option[HudsonMasterComputermonitorData],
numExecutors: Option[Int],
offline: Option[Boolean],
offlineCause: Option[String],
offlineCauseReason: Option[String],
temporarilyOffline: Option[Boolean]
)
object HudsonMasterComputer {
/**
* Creates the codec for converting HudsonMasterComputer from and to JSON.
*/
implicit val decoder: Decoder[HudsonMasterComputer] = deriveDecoder
implicit val encoder: ObjectEncoder[HudsonMasterComputer] = deriveEncoder
}
| cliffano/swaggy-jenkins | clients/scala-finch/generated/src/main/scala/org/openapitools/models/HudsonMasterComputer.scala | Scala | mit | 1,863 |
package me.reminisce.json
import me.reminisce.database.MongoDBEntities.FBLocation
import me.reminisce.gameboard.board.GameboardEntities._
import spray.json.{JsObject, JsString, JsValue, RootJsonFormat, deserializationError, serializationError}
trait GameQuestionJSONSupport extends BoardJSONSupport with SubjectJSONSupport {
implicit val possibilityFormat: RootJsonFormat[Possibility] = jsonFormat4(Possibility)
implicit val subjectWithIdFormat: RootJsonFormat[SubjectWithId] = jsonFormat2(SubjectWithId)
implicit val fbLocationFormat: RootJsonFormat[FBLocation] = jsonFormat6(FBLocation)
implicit val mcQuestionFormat: RootJsonFormat[MultipleChoiceQuestion] = jsonFormat6(MultipleChoiceQuestion)
implicit val tlQuestionFormat: RootJsonFormat[TimelineQuestion] = jsonFormat11(TimelineQuestion)
implicit val ordQuestionFormat: RootJsonFormat[OrderQuestion] = jsonFormat6(OrderQuestion)
implicit val geoQuestionFormat: RootJsonFormat[GeolocationQuestion] = jsonFormat5(GeolocationQuestion)
implicit object GameQuestionFormat extends RootJsonFormat[GameQuestion] {
override def read(json: JsValue): GameQuestion = json match {
case JsObject(fields) =>
fields.get("kind") match {
case Some(JsString(MultipleChoice.name)) =>
mcQuestionFormat.read(json)
case Some(JsString(Timeline.name)) =>
tlQuestionFormat.read(json)
case Some(JsString(Order.name)) =>
ordQuestionFormat.read(json)
case Some(JsString(Geolocation.name)) =>
geoQuestionFormat.read(json)
case Some(x) =>
deserializationError(s"Not a valid question kind: ${x.toString()}")
case _ =>
deserializationError(s"Not a valid question: ${json.toString()}")
}
case _ =>
deserializationError(s"Not a valid question: ${json.toString()}")
}
override def write(obj: GameQuestion): JsValue = obj match {
case mc: MultipleChoiceQuestion =>
mcQuestionFormat.write(mc)
case tl: TimelineQuestion =>
tlQuestionFormat.write(tl)
case ord: OrderQuestion =>
ordQuestionFormat.write(ord)
case geo: GeolocationQuestion =>
geoQuestionFormat.write(geo)
case _ =>
serializationError(s"Not a valid question: $obj")
}
}
}
trait SubjectJSONSupport extends BoardJSONSupport {
implicit val pageSubjectFormat: RootJsonFormat[PageSubject] = jsonFormat4(PageSubject)
implicit val textPostSubjectFormat: RootJsonFormat[TextPostSubject] = jsonFormat(TextPostSubject, "text", "type", "from")
implicit val imagePostSubjectFormat: RootJsonFormat[ImagePostSubject] = jsonFormat(ImagePostSubject, "text", "imageUrl", "facebookImageUrl", "type", "from")
implicit val videoPostSubjectFormat: RootJsonFormat[VideoPostSubject] = jsonFormat(VideoPostSubject, "text", "thumbnailUrl", "url", "type", "from")
implicit val linkPostSubjectFormat: RootJsonFormat[LinkPostSubject] = jsonFormat(LinkPostSubject, "text", "thumbnailUrl", "url", "type", "from")
implicit object PostSubjectFormat extends RootJsonFormat[PostSubject] {
override def read(json: JsValue): PostSubject = json match {
case JsObject(fields) =>
fields.get("type") match {
case Some(JsString(TextPostType.name)) =>
textPostSubjectFormat.read(json)
case Some(JsString(ImagePostType.name)) =>
imagePostSubjectFormat.read(json)
case Some(JsString(VideoPostType.name)) =>
videoPostSubjectFormat.read(json)
case Some(JsString(LinkPostType.name)) =>
linkPostSubjectFormat.read(json)
case Some(x) =>
deserializationError(s"Not a valid post subject type: ${x.toString}")
case _ =>
deserializationError(s"Not a valid post subject: ${json.toString}")
}
case _ =>
deserializationError(s"Not a valid post subject: ${json.toString}")
}
override def write(obj: PostSubject): JsValue = obj match {
case txt: TextPostSubject =>
textPostSubjectFormat.write(txt)
case img: ImagePostSubject =>
imagePostSubjectFormat.write(img)
case vid: VideoPostSubject =>
videoPostSubjectFormat.write(vid)
case link: LinkPostSubject =>
linkPostSubjectFormat.write(link)
case _ =>
serializationError(s"Not a valid post subject: ${obj.toString}")
}
}
implicit val commentSubjectFormat: RootJsonFormat[CommentSubject] = jsonFormat3(CommentSubject)
implicit val reactionSubjectFormat: RootJsonFormat[ReactionSubject] = jsonFormat2(ReactionSubject)
implicit object SubjectFormat extends RootJsonFormat[Subject] {
override def read(json: JsValue): Subject = json match {
case JsObject(fields) =>
fields.get("type") match {
case Some(JsString(PageSubjectType.name)) =>
pageSubjectFormat.read(json)
case Some(JsString(TextPostType.name)) =>
textPostSubjectFormat.read(json)
case Some(JsString(ImagePostType.name)) =>
imagePostSubjectFormat.read(json)
case Some(JsString(VideoPostType.name)) =>
videoPostSubjectFormat.read(json)
case Some(JsString(LinkPostType.name)) =>
linkPostSubjectFormat.read(json)
case Some(JsString(CommentSubjectType.name)) =>
commentSubjectFormat.read(json)
case Some(JsString(ReactionSubjectType.name)) =>
reactionSubjectFormat.read(json)
case Some(x) =>
deserializationError(s"Not a valid subject type: ${x.toString}")
case _ =>
deserializationError(s"Not a valid subject: ${json.toString}")
}
case _ =>
deserializationError(s"Not a valid subject: ${json.toString}")
}
override def write(obj: Subject): JsValue = obj match {
case page: PageSubject =>
pageSubjectFormat.write(page)
case post: PostSubject =>
PostSubjectFormat.write(post)
case comment: CommentSubject =>
commentSubjectFormat.write(comment)
case react: ReactionSubject =>
reactionSubjectFormat.write(react)
case _ =>
serializationError(s"Not a valid subject: ${obj.toString}")
}
}
implicit val subjFormat: RootJsonFormat[Subject] = SubjectFormat
}
| reminisceme/game-creator | src/main/scala/me/reminisce/json/GameQuestionJSONSupport.scala | Scala | apache-2.0 | 6,356 |
package com.codingkapoor.codingbat
import org.scalatest.Matchers
import org.scalatest.FlatSpec
class WarmupIISpec extends FlatSpec with Matchers {
"stringTimes" should "return a larger string that is 'n' copies of the original string, given a string and a non-negative int 'n'" in {
WarmupII.stringTimes("Hi", 2) should equal("HiHi")
WarmupII.stringTimes("Hi", 3) should equal("HiHiHi")
WarmupII.stringTimes("Hi", 1) should equal("Hi")
}
"frontTimes" should "return n copies of the first three characters of a given string" in {
WarmupII.frontTimes("Chocolate", 2) should equal("ChoCho")
WarmupII.frontTimes("Chocolate", 3) should equal("ChoChoCho")
WarmupII.frontTimes("Abc", 3) should equal("AbcAbcAbc")
}
"countXX" should "return the Count of the number of 'xx' in a given string where overlapping of the pattern is allowed" in {
WarmupII.countXX("abcxx") should equal(1)
WarmupII.countXX("xxx") should equal(2)
WarmupII.countXX("xxxx") should equal(3)
}
"doubleX" should "return true if the first instance of 'x' in a given string is immediately followed by another 'x'" in {
WarmupII.doubleX("axxbb") should equal(true)
WarmupII.doubleX("axaxax") should equal(false)
WarmupII.doubleX("xxxxx") should equal(true)
}
"stringBits" should "return a new string made of every other char in a given string starting with the first, so 'Hello' yields 'Hlo'" in {
WarmupII.stringBits("Hello") should equal("Hlo")
WarmupII.stringBits("Hi") should equal("H")
WarmupII.stringBits("Heeololeo") should equal("Hello")
}
"stringSplosion" should "given a non-empty string like 'Code' return a string like 'CCoCodCode'" in {
WarmupII.stringSplosion("Code") should equal("CCoCodCode")
WarmupII.stringSplosion("abc") should equal("aababc")
WarmupII.stringSplosion("ab") should equal("aab")
}
"last2, given a string" should "return the count of the number of times that a substring made of last 2 characters appears in the substring made of the rest of the string but the last to characters" in {
WarmupII.last2("hixxhi") should equal(1)
WarmupII.last2("xaxxaxaxx") should equal(1)
WarmupII.last2("axxxaaxx") should equal(2)
}
"arrayCount2, given an array of ints" should "return the number of 9's in the array" in {
WarmupII.arrayCount9(List(1, 2, 9)) should equal(1)
WarmupII.arrayCount9(List(1, 9, 9)) should equal(2)
WarmupII.arrayCount9(List(1, 9, 9, 3, 9)) should equal(3)
}
"arrayFront9, given an array of ints" should "return true if one of the first 4 elements in the array is a 9. The array length may be less than 4" in {
WarmupII.arrayFront9(List(1, 2, 9, 3, 4)) should equal(true)
WarmupII.arrayFront9(List(1, 2, 3, 4, 9)) should equal(false)
WarmupII.arrayFront9(List(1, 2, 3, 4, 5)) should equal(false)
}
"array123, given an array of ints" should "return true if the sequence of numbers 1, 2, 3 appears in the array somewhere" in {
WarmupII.array123(List(1, 1, 2, 3, 1)) should equal(true)
WarmupII.array123(List(1, 1, 2, 4, 1)) should equal(false)
WarmupII.array123(List(1, 1, 2, 1, 2, 3)) should equal(true)
}
"stringMatch, given 2 strings, a and b" should """return the number of the positions where they contain the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa", and "az" substrings appear in the same place in both strings""" in {
WarmupII.stringMatch("xxcaazz", "xxbaaz") should equal(3)
WarmupII.stringMatch("abc", "abc") should equal(2)
WarmupII.stringMatch("abc", "axc") should equal(0)
}
"stringX, given a string" should """return a version where all the "x" have been removed. Except an "x" at the very start or end should not be removed""" in {
WarmupII.stringX("xxHxix") should equal("xHix")
WarmupII.stringX("abxxxcd") should equal("abcd")
WarmupII.stringX("xabxxxcdx") should equal("xabcdx")
}
"altPairs, given a string" should """return a string made of the chars at indexes 0,1, 4,5, 8,9 ... so "kittens" yields "kien"""" in {
WarmupII.altPairs("kitten") should equal("kien")
WarmupII.altPairs("Chocolate") should equal("Chole")
WarmupII.altPairs("CodingHorror") should equal("Congrr")
}
"stringYak, given a string" should """return a version where all the "yak" are removed, but the "a" can be any char. The "yak" strings will not overlap.""" in {
WarmupII.stringYak("yakpak") should equal("pak")
WarmupII.stringYak("pakyak") should equal("pak")
WarmupII.stringYak("yak123ya") should equal("123ya")
}
"array667, given an array of ints" should """return the number of times that two 6's are next to each other in the array. Also count instances where the second "6" is actually a 7.""" in {
WarmupII.array667(List(6, 6, 2)) should equal(1)
WarmupII.array667(List(6, 6, 2, 6)) should equal(1)
WarmupII.array667(List(6, 7, 2, 6)) should equal(1)
WarmupII.array667(List(6, 6, 2, 6, 7)) should equal(2)
}
"Given an array of ints, we'll say that a triple is a value appearing 3 times in a row in the array & noTriples" should "return true if the array does not contain any triples." in {
WarmupII.noTriples(List(1, 1, 2, 2, 1)) should equal(true)
WarmupII.noTriples(List(1, 1, 2, 2, 2, 1)) should equal(false)
WarmupII.noTriples(List(1, 1, 1, 2, 2, 2, 1)) should equal(false)
}
"Given an array of ints, has271" should """return true if it contains a 2, 7, 1 pattern -- a value, followed by the value plus 5, followed by the value minus 1. Additionally the 271 counts even if the "1" differs by 2 or less from the correct value.""" in {
WarmupII.has271(List(1, 2, 7, 1)) should equal(true)
WarmupII.has271(List(1, 2, 8, 1)) should equal(false)
WarmupII.has271(List(2, 7, 1)) should equal(true)
}
}
| codingkapoor/scala-coding-bat | src/test/scala/com/codingkapoor/codingbat/WarmupIISpec.scala | Scala | mit | 5,837 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import scala.collection.JavaConversions._
import org.apache.hadoop.conf.Configuration
import org.apache.parquet.schema.OriginalType._
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName._
import org.apache.parquet.schema.Type.Repetition._
import org.apache.parquet.schema._
import org.apache.spark.sql.execution.datasources.parquet.CatalystSchemaConverter.{MAX_PRECISION_FOR_INT32, MAX_PRECISION_FOR_INT64, maxPrecisionForBytes}
import org.apache.spark.sql.types._
import org.apache.spark.sql.{AnalysisException, SQLConf}
/**
* This converter class is used to convert Parquet [[MessageType]] to Spark SQL [[StructType]] and
* vice versa.
* 此转换器类用于将Parquet [[MessageType]]转换为Spark SQL [[StructType]],反之亦然。
*
* Parquet format backwards-compatibility rules are respected when converting Parquet
* [[MessageType]] schemas.
* 转换Parquet [[MessageType]]模式时,会遵循Parquet格式向后兼容性规则
*
* @see https://github.com/apache/parquet-format/blob/master/LogicalTypes.md
*
* @constructor
* @param assumeBinaryIsString Whether unannotated BINARY fields should be assumed to be Spark SQL
* [[StringType]] fields when converting Parquet a [[MessageType]] to Spark SQL
* [[StructType]].
* @param assumeInt96IsTimestamp Whether unannotated INT96 fields should be assumed to be Spark SQL
* [[TimestampType]] fields when converting Parquet a [[MessageType]] to Spark SQL
* [[StructType]]. Note that Spark SQL [[TimestampType]] is similar to Hive timestamp, which
* has optional nanosecond precision, but different from `TIME_MILLS` and `TIMESTAMP_MILLIS`
* described in Parquet format spec.
* @param followParquetFormatSpec Whether to generate standard DECIMAL, LIST, and MAP structure when
* converting Spark SQL [[StructType]] to Parquet [[MessageType]]. For Spark 1.4.x and
* prior versions, Spark SQL only supports decimals with a max precision of 18 digits, and
* uses non-standard LIST and MAP structure. Note that the current Parquet format spec is
* backwards-compatible with these settings. If this argument is set to `false`, we fallback
* to old style non-standard behaviors.
*/
private[parquet] class CatalystSchemaConverter(
assumeBinaryIsString: Boolean = SQLConf.PARQUET_BINARY_AS_STRING.defaultValue.get,
assumeInt96IsTimestamp: Boolean = SQLConf.PARQUET_INT96_AS_TIMESTAMP.defaultValue.get,
followParquetFormatSpec: Boolean = SQLConf.PARQUET_FOLLOW_PARQUET_FORMAT_SPEC.defaultValue.get
) {
def this(conf: SQLConf) = this(
assumeBinaryIsString = conf.isParquetBinaryAsString,
assumeInt96IsTimestamp = conf.isParquetINT96AsTimestamp,
followParquetFormatSpec = conf.followParquetFormatSpec)
def this(conf: Configuration) = this(
assumeBinaryIsString = conf.get(SQLConf.PARQUET_BINARY_AS_STRING.key).toBoolean,
assumeInt96IsTimestamp = conf.get(SQLConf.PARQUET_INT96_AS_TIMESTAMP.key).toBoolean,
followParquetFormatSpec = conf.get(SQLConf.PARQUET_FOLLOW_PARQUET_FORMAT_SPEC.key).toBoolean)
/**
* Converts Parquet [[MessageType]] `parquetSchema` to a Spark SQL [[StructType]].
* 将Parquet [[MessageType]]`parquetSchema`转换为Spark SQL [[StructType]]
*/
def convert(parquetSchema: MessageType): StructType = convert(parquetSchema.asGroupType())
private def convert(parquetSchema: GroupType): StructType = {
val fields = parquetSchema.getFields.map { field =>
field.getRepetition match {
case OPTIONAL =>
StructField(field.getName, convertField(field), nullable = true)
case REQUIRED =>
StructField(field.getName, convertField(field), nullable = false)
case REPEATED =>
// A repeated field that is neither contained by a `LIST`- or `MAP`-annotated group nor
// annotated by `LIST` or `MAP` should be interpreted as a required list of required
// elements where the element type is the type of the field.
val arrayType = ArrayType(convertField(field), containsNull = false)
StructField(field.getName, arrayType, nullable = false)
}
}
StructType(fields)
}
/**
* Converts a Parquet [[Type]] to a Spark SQL [[DataType]].
* 将Parquet [[Type]]转换为Spark SQL [[DataType]]
*/
def convertField(parquetType: Type): DataType = parquetType match {
case t: PrimitiveType => convertPrimitiveField(t)
case t: GroupType => convertGroupField(t.asGroupType())
}
private def convertPrimitiveField(field: PrimitiveType): DataType = {
val typeName = field.getPrimitiveTypeName
val originalType = field.getOriginalType
def typeString =
if (originalType == null) s"$typeName" else s"$typeName ($originalType)"
def typeNotImplemented() =
throw new AnalysisException(s"Parquet type not yet supported: $typeString")
def illegalType() =
throw new AnalysisException(s"Illegal Parquet type: $typeString")
// When maxPrecision = -1, we skip precision range check, and always respect the precision
// specified in field.getDecimalMetadata. This is useful when interpreting decimal types stored
// as binaries with variable lengths.
//当maxPrecision = -1时,我们跳过精确范围检查,并始终遵守field.getDecimalMetadata中指定的精度,
//这在解释存储为具有可变长度的二进制文件的十进制类型时很有用。
def makeDecimalType(maxPrecision: Int = -1): DecimalType = {
val precision = field.getDecimalMetadata.getPrecision
val scale = field.getDecimalMetadata.getScale
CatalystSchemaConverter.analysisRequire(
maxPrecision == -1 || 1 <= precision && precision <= maxPrecision,
s"Invalid decimal precision: $typeName cannot store $precision digits (max $maxPrecision)")
DecimalType(precision, scale)
}
typeName match {
case BOOLEAN => BooleanType
case FLOAT => FloatType
case DOUBLE => DoubleType
case INT32 =>
originalType match {
case INT_8 => ByteType
case INT_16 => ShortType
case INT_32 | null => IntegerType
case DATE => DateType
case DECIMAL => makeDecimalType(MAX_PRECISION_FOR_INT32)
case TIME_MILLIS => typeNotImplemented()
case _ => illegalType()
}
case INT64 =>
originalType match {
case INT_64 | null => LongType
case DECIMAL => makeDecimalType(MAX_PRECISION_FOR_INT64)
case TIMESTAMP_MILLIS => typeNotImplemented()
case _ => illegalType()
}
case INT96 =>
CatalystSchemaConverter.analysisRequire(
assumeInt96IsTimestamp,
"INT96 is not supported unless it's interpreted as timestamp. " +
s"Please try to set ${SQLConf.PARQUET_INT96_AS_TIMESTAMP.key} to true.")
TimestampType
case BINARY =>
originalType match {
case UTF8 | ENUM => StringType
case null if assumeBinaryIsString => StringType
case null => BinaryType
case DECIMAL => makeDecimalType()
case _ => illegalType()
}
case FIXED_LEN_BYTE_ARRAY =>
originalType match {
case DECIMAL => makeDecimalType(maxPrecisionForBytes(field.getTypeLength))
case INTERVAL => typeNotImplemented()
case _ => illegalType()
}
case _ => illegalType()
}
}
private def convertGroupField(field: GroupType): DataType = {
Option(field.getOriginalType).fold(convert(field): DataType) {
// A Parquet list is represented as a 3-level structure:
//
// <list-repetition> group <name> (LIST) {
// repeated group list {
// <element-repetition> <element-type> element;
// }
// }
//
// However, according to the most recent Parquet format spec (not released yet up until
// writing), some 2-level structures are also recognized for backwards-compatibility. Thus,
// we need to check whether the 2nd level or the 3rd level refers to list element type.
//
// See: https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#lists
case LIST =>
CatalystSchemaConverter.analysisRequire(
field.getFieldCount == 1, s"Invalid list type $field")
val repeatedType = field.getType(0)
CatalystSchemaConverter.analysisRequire(
repeatedType.isRepetition(REPEATED), s"Invalid list type $field")
if (isElementType(repeatedType, field.getName)) {
ArrayType(convertField(repeatedType), containsNull = false)
} else {
val elementType = repeatedType.asGroupType().getType(0)
val optional = elementType.isRepetition(OPTIONAL)
ArrayType(convertField(elementType), containsNull = optional)
}
// scalastyle:off
// `MAP_KEY_VALUE` is for backwards-compatibility
// See: https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules-1
// scalastyle:on
case MAP | MAP_KEY_VALUE =>
CatalystSchemaConverter.analysisRequire(
field.getFieldCount == 1 && !field.getType(0).isPrimitive,
s"Invalid map type: $field")
val keyValueType = field.getType(0).asGroupType()
CatalystSchemaConverter.analysisRequire(
keyValueType.isRepetition(REPEATED) && keyValueType.getFieldCount == 2,
s"Invalid map type: $field")
val keyType = keyValueType.getType(0)
CatalystSchemaConverter.analysisRequire(
keyType.isPrimitive,
s"Map key type is expected to be a primitive type, but found: $keyType")
val valueType = keyValueType.getType(1)
val valueOptional = valueType.isRepetition(OPTIONAL)
MapType(
convertField(keyType),
convertField(valueType),
valueContainsNull = valueOptional)
case _ =>
throw new AnalysisException(s"Unrecognized Parquet type: $field")
}
}
// scalastyle:off
// Here we implement Parquet LIST backwards-compatibility rules.
// See: https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#backward-compatibility-rules
// scalastyle:on
private def isElementType(repeatedType: Type, parentName: String): Boolean = {
{
// For legacy 2-level list types with primitive element type, e.g.:
//
// // List<Integer> (nullable list, non-null elements)
// optional group my_list (LIST) {
// repeated int32 element;
// }
//
repeatedType.isPrimitive
} || {
// For legacy 2-level list types whose element type is a group type with 2 or more fields,
// e.g.:
//
// // List<Tuple<String, Integer>> (nullable list, non-null elements)
// optional group my_list (LIST) {
// repeated group element {
// required binary str (UTF8);
// required int32 num;
// };
// }
//
repeatedType.asGroupType().getFieldCount > 1
} || {
// For legacy 2-level list types generated by parquet-avro (Parquet version < 1.6.0), e.g.:
//
// // List<OneTuple<String>> (nullable list, non-null elements)
// optional group my_list (LIST) {
// repeated group array {
// required binary str (UTF8);
// };
// }
//
repeatedType.getName == "array"
} || {
// For Parquet data generated by parquet-thrift, e.g.:
//
// // List<OneTuple<String>> (nullable list, non-null elements)
// optional group my_list (LIST) {
// repeated group my_list_tuple {
// required binary str (UTF8);
// };
// }
//
repeatedType.getName == s"${parentName}_tuple"
}
}
/**
* Converts a Spark SQL [[StructType]] to a Parquet [[MessageType]].
* 将Spark SQL [[StructType]]转换为Parquet [[MessageType]]
*/
def convert(catalystSchema: StructType): MessageType = {
Types.buildMessage().addFields(catalystSchema.map(convertField): _*).named("root")
}
/**
* Converts a Spark SQL [[StructField]] to a Parquet [[Type]].
* 将Spark SQL [[StructField]]转换为Parquet [[Type]]
*/
def convertField(field: StructField): Type = {
convertField(field, if (field.nullable) OPTIONAL else REQUIRED)
}
private def convertField(field: StructField, repetition: Type.Repetition): Type = {
CatalystSchemaConverter.checkFieldName(field.name)
field.dataType match {
// ===================
// Simple atomic types 简单的原子类型
// ===================
case BooleanType =>
Types.primitive(BOOLEAN, repetition).named(field.name)
case ByteType =>
Types.primitive(INT32, repetition).as(INT_8).named(field.name)
case ShortType =>
Types.primitive(INT32, repetition).as(INT_16).named(field.name)
case IntegerType =>
Types.primitive(INT32, repetition).named(field.name)
case LongType =>
Types.primitive(INT64, repetition).named(field.name)
case FloatType =>
Types.primitive(FLOAT, repetition).named(field.name)
case DoubleType =>
Types.primitive(DOUBLE, repetition).named(field.name)
case StringType =>
Types.primitive(BINARY, repetition).as(UTF8).named(field.name)
case DateType =>
Types.primitive(INT32, repetition).as(DATE).named(field.name)
// NOTE: Spark SQL TimestampType is NOT a well defined type in Parquet format spec.
// Spark SQL TimestampType不是Parquet格式规范中定义良好的类型
// As stated in PARQUET-323, Parquet `INT96` was originally introduced to represent nanosecond
// timestamp in Impala for some historical reasons, it's not recommended to be used for any
// other types and will probably be deprecated in future Parquet format spec. That's the
// reason why Parquet format spec only defines `TIMESTAMP_MILLIS` and `TIMESTAMP_MICROS` which
// are both logical types annotating `INT64`.
//
// Originally, Spark SQL uses the same nanosecond timestamp type as Impala and Hive. Starting
// from Spark 1.5.0, we resort to a timestamp type with 100 ns precision so that we can store
// a timestamp into a `Long`. This design decision is subject to change though, for example,
// we may resort to microsecond precision in the future.
//
// For Parquet, we plan to write all `TimestampType` value as `TIMESTAMP_MICROS`, but it's
// currently not implemented yet because parquet-mr 1.7.0 (the version we're currently using)
// hasn't implemented `TIMESTAMP_MICROS` yet.
//
// TODO Implements `TIMESTAMP_MICROS` once parquet-mr has that.
case TimestampType =>
Types.primitive(INT96, repetition).named(field.name)
case BinaryType =>
Types.primitive(BINARY, repetition).named(field.name)
// =====================================
// Decimals (for Spark version <= 1.4.x)
// =====================================
// Spark 1.4.x and prior versions only support decimals with a maximum precision of 18 and
// always store decimals in fixed-length byte arrays. To keep compatibility with these older
// versions, here we convert decimals with all precisions to `FIXED_LEN_BYTE_ARRAY` annotated
// by `DECIMAL`.
case DecimalType.Fixed(precision, scale) if !followParquetFormatSpec =>
Types
.primitive(FIXED_LEN_BYTE_ARRAY, repetition)
.as(DECIMAL)
.precision(precision)
.scale(scale)
.length(CatalystSchemaConverter.minBytesForPrecision(precision))
.named(field.name)
// =====================================
// Decimals (follow Parquet format spec) 小数(遵循Parquet格式规范)
// =====================================
// Uses INT32 for 1 <= precision <= 9
//使用INT32 1 <=精度<= 9
case DecimalType.Fixed(precision, scale)
if precision <= MAX_PRECISION_FOR_INT32 && followParquetFormatSpec =>
Types
.primitive(INT32, repetition)
.as(DECIMAL)
.precision(precision)
.scale(scale)
.named(field.name)
// Uses INT64 for 1 <= precision <= 18
case DecimalType.Fixed(precision, scale)
if precision <= MAX_PRECISION_FOR_INT64 && followParquetFormatSpec =>
Types
.primitive(INT64, repetition)
.as(DECIMAL)
.precision(precision)
.scale(scale)
.named(field.name)
// Uses FIXED_LEN_BYTE_ARRAY for all other precisions
case DecimalType.Fixed(precision, scale) if followParquetFormatSpec =>
Types
.primitive(FIXED_LEN_BYTE_ARRAY, repetition)
.as(DECIMAL)
.precision(precision)
.scale(scale)
.length(CatalystSchemaConverter.minBytesForPrecision(precision))
.named(field.name)
// ===================================================
// ArrayType and MapType (for Spark versions <= 1.4.x)
//ArrayType和MapType(适用于Spark版本<= 1.4.x)
// ===================================================
// Spark 1.4.x and prior versions convert `ArrayType` with nullable elements into a 3-level
// `LIST` structure. This behavior is somewhat a hybrid of parquet-hive and parquet-avro
// (1.6.0rc3): the 3-level structure is similar to parquet-hive while the 3rd level element
// field name "array" is borrowed from parquet-avro.
case ArrayType(elementType, nullable @ true) if !followParquetFormatSpec =>
// <list-repetition> group <name> (LIST) {
// optional group bag {
// repeated <element-type> array;
// }
// }
ConversionPatterns.listType(
repetition,
field.name,
Types
.buildGroup(REPEATED)
// "array_element" is the name chosen by parquet-hive (1.7.0 and prior version)
//“array_element”是parquet-hive(1.7.0及之前版本)选择的名称
.addField(convertField(StructField("array", elementType, nullable)))
.named("bag"))
// Spark 1.4.x and prior versions convert ArrayType with non-nullable elements into a 2-level
// LIST structure. This behavior mimics parquet-avro (1.6.0rc3). Note that this case is
// covered by the backwards-compatibility rules implemented in `isElementType()`.
case ArrayType(elementType, nullable @ false) if !followParquetFormatSpec =>
// <list-repetition> group <name> (LIST) {
// repeated <element-type> element;
// }
ConversionPatterns.listType(
repetition,
field.name,
// "array" is the name chosen by parquet-avro (1.7.0 and prior version)
convertField(StructField("array", elementType, nullable), REPEATED))
// Spark 1.4.x and prior versions convert MapType into a 3-level group annotated by
// MAP_KEY_VALUE. This is covered by `convertGroupField(field: GroupType): DataType`.
case MapType(keyType, valueType, valueContainsNull) if !followParquetFormatSpec =>
// <map-repetition> group <name> (MAP) {
// repeated group map (MAP_KEY_VALUE) {
// required <key-type> key;
// <value-repetition> <value-type> value;
// }
// }
ConversionPatterns.mapType(
repetition,
field.name,
convertField(StructField("key", keyType, nullable = false)),
convertField(StructField("value", valueType, valueContainsNull)))
// ==================================================
// ArrayType and MapType (follow Parquet format spec)
//ArrayType和MapType(遵循Parquet格式规范)
// ==================================================
case ArrayType(elementType, containsNull) if followParquetFormatSpec =>
// <list-repetition> group <name> (LIST) {
// repeated group list {
// <element-repetition> <element-type> element;
// }
// }
Types
.buildGroup(repetition).as(LIST)
.addField(
Types.repeatedGroup()
.addField(convertField(StructField("element", elementType, containsNull)))
.named("list"))
.named(field.name)
case MapType(keyType, valueType, valueContainsNull) =>
// <map-repetition> group <name> (MAP) {
// repeated group key_value {
// required <key-type> key;
// <value-repetition> <value-type> value;
// }
// }
Types
.buildGroup(repetition).as(MAP)
.addField(
Types
.repeatedGroup()
.addField(convertField(StructField("key", keyType, nullable = false)))
.addField(convertField(StructField("value", valueType, valueContainsNull)))
.named("key_value"))
.named(field.name)
// ===========
// Other types
// ===========
case StructType(fields) =>
fields.foldLeft(Types.buildGroup(repetition)) { (builder, field) =>
builder.addField(convertField(field))
}.named(field.name)
case udt: UserDefinedType[_] =>
convertField(field.copy(dataType = udt.sqlType))
case _ =>
throw new AnalysisException(s"Unsupported data type $field.dataType")
}
}
}
private[parquet] object CatalystSchemaConverter {
def checkFieldName(name: String): Unit = {
// ,;{}()\n\t= and space are special characters in Parquet schema
analysisRequire(
!name.matches(".*[ ,;{}()\n\t=].*"),
s"""Attribute name "$name" contains invalid character(s) among " ,;{}()\\n\\t=".
|Please use alias to rename it.
""".stripMargin.split("\n").mkString(" "))
}
def checkFieldNames(schema: StructType): StructType = {
schema.fieldNames.foreach(checkFieldName)
schema
}
def analysisRequire(f: => Boolean, message: String): Unit = {
if (!f) {
throw new AnalysisException(message)
}
}
private def computeMinBytesForPrecision(precision : Int) : Int = {
var numBytes = 1
while (math.pow(2.0, 8 * numBytes - 1) < math.pow(10.0, precision)) {
numBytes += 1
}
numBytes
}
private val MIN_BYTES_FOR_PRECISION = Array.tabulate[Int](39)(computeMinBytesForPrecision)
// Returns the minimum number of bytes needed to store a decimal with a given `precision`.
//返回使用给定的“precision”存储小数所需的最小字节数
def minBytesForPrecision(precision : Int) : Int = {
if (precision < MIN_BYTES_FOR_PRECISION.length) {
MIN_BYTES_FOR_PRECISION(precision)
} else {
computeMinBytesForPrecision(precision)
}
}
val MAX_PRECISION_FOR_INT32 = maxPrecisionForBytes(4)
val MAX_PRECISION_FOR_INT64 = maxPrecisionForBytes(8)
// Max precision of a decimal value stored in `numBytes` bytes
//存储在`numBytes`个字节中的十进制值的最大精度
def maxPrecisionForBytes(numBytes: Int): Int = {
Math.round( // convert double to long
Math.floor(Math.log10( // number of base-10 digits
Math.pow(2, 8 * numBytes - 1) - 1))) // max value stored in numBytes
.asInstanceOf[Int]
}
}
| tophua/spark1.52 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/CatalystSchemaConverter.scala | Scala | apache-2.0 | 24,488 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.externalcontext
import URLRewriter._
import WSRPURLRewriter._
import java.net.URL
import java.net.URLEncoder
import java.util.concurrent.Callable
import java.util.{List ⇒ JList}
import org.orbeon.oxf.pipeline.api.ExternalContext
import org.orbeon.oxf.util.NetUtils
import org.orbeon.oxf.util.URLRewriterUtils
import org.orbeon.oxf.xforms.processor.XFormsResourceServer.DynamicResourcesPath
// This URL rewriter rewrites URLs using the WSRP encoding
class WSRPURLRewriter(
retrievePathMatchers: ⇒ JList[URLRewriterUtils.PathMatcher],
request: ExternalContext.Request,
wsrpEncodeResources: Boolean)
extends URLRewriter {
// We don't initialize the matchers right away, because when the rewriter is created, they may not be available.
// Specifically. the rewriter is typically created along the ExternalContext and PipelineContext, before the PFC has
// been able to place the matchers in the PipelineContext.
private var pathMatchers: JList[URLRewriterUtils.PathMatcher] = null
// For Java callers, use Callable
def this(getPathMatchers: Callable[JList[URLRewriterUtils.PathMatcher]], request: ExternalContext.Request, wsrpEncodeResources: Boolean) =
this(getPathMatchers.call, request, wsrpEncodeResources)
private def getPathMatchers = {
if (pathMatchers eq null)
pathMatchers = Option(retrievePathMatchers) getOrElse URLRewriterUtils.EMPTY_PATH_MATCHER_LIST
pathMatchers
}
def rewriteRenderURL(urlString: String) =
rewritePortletURL(urlString, URLTypeRender, null, null)
def rewriteRenderURL(urlString: String, portletMode: String, windowState: String) =
rewritePortletURL(urlString, URLTypeRender, portletMode, windowState)
def rewriteActionURL(urlString: String) =
rewritePortletURL(urlString, URLTypeBlockingAction, null, null)
def rewriteActionURL(urlString: String, portletMode: String, windowState: String) =
rewritePortletURL(urlString, URLTypeBlockingAction, portletMode, windowState)
def rewriteResourceURL(urlString: String, rewriteMode: Int) =
rewriteResourceURL(urlString, wsrpEncodeResources) // the mode is ignored
def getNamespacePrefix = PrefixTag
private def rewritePortletURL(urlString: String, urlType: Int, portletMode: String, windowState: String): String = {
// Case where a protocol is specified OR it's just a fragment: the URL is left untouched
if (NetUtils.urlHasProtocol(urlString) || urlString.startsWith("#"))
return urlString
// TEMP HACK to avoid multiple rewrites
// TODO: Find out where it happens. Check XFOutputControl with image mediatype for example.
if (urlString.indexOf("wsrp_rewrite") != -1)
return urlString
// Parse URL
val baseURL = new URL("http", "example.org", request.getRequestPath)
val u = new URL(baseURL, urlString)
// Decode query string
val parameters = NetUtils.decodeQueryStringPortlet(u.getQuery)
// Add special path parameter
val path =
if (urlString.startsWith("?"))
// This is a special case that appears to be implemented
// in Web browsers as a convenience. Users may use it.
request.getRequestPath
else
// Regular case, use parsed path
URLRewriterUtils.getRewritingContext("wsrp", "") + u.getPath
parameters.put(PathParameterName, Array(path))
// Encode as "navigational state"
val navigationalState = NetUtils.encodeQueryString2(parameters)
// Encode the URL a la WSRP
encodePortletURL(urlType, navigationalState, portletMode, windowState, u.getRef, secure = false)
}
def rewriteResourceURL(urlString: String, wsrpEncodeResources: Boolean): String = {
// Always encode dynamic resources
if (wsrpEncodeResources || urlString == "/xforms-server" || urlString.startsWith(DynamicResourcesPath)) {
// First rewrite path to support versioned resources
val rewrittenPath = URLRewriterUtils.rewriteResourceURL(request, urlString, getPathMatchers, REWRITE_MODE_ABSOLUTE_PATH_NO_CONTEXT)
// Then do the WSRP encoding
rewritePortletURL(rewrittenPath, URLTypeResource, null, null)
} else
// Generate resource served by the servlet
URLRewriterUtils.rewriteResourceURL(request, urlString, getPathMatchers, REWRITE_MODE_ABSOLUTE_PATH)
}
}
object WSRPURLRewriter {
val PathParameterName = "orbeon.path"
private val URLTypeBlockingAction = 1
private val URLTypeRender = 2
private val URLTypeResource = 3
val URLTypeBlockingActionString = "blockingAction"
val URLTypeRenderString = "render"
val URLTypeResourceString = "resource"
private val URLTypes = Map(
URLTypeBlockingAction → URLTypeBlockingActionString,
URLTypeRender → URLTypeRenderString,
URLTypeResource → URLTypeResourceString
)
val BaseTag = "wsrp_rewrite"
val StartTag = BaseTag + '?'
val EndTag = '/' + BaseTag
val PrefixTag = BaseTag + '_'
val URLTypeParam = "wsrp-urlType"
val ModeParam = "wsrp-mode"
val WindowStateParam = "wsrp-windowState"
val NavigationalStateParam = "wsrp-navigationalState"
val BaseTagLength = BaseTag.length
val StartTagLength = StartTag.length
val EndTagLength = EndTag.length
val PrefixTagLength = PrefixTag.length
/**
* Encode an URL into a WSRP pattern including the string "wsrp_rewrite".
*
* This does not call the portlet API. Used by Portlet2URLRewriter.
*/
def encodePortletURL(urlType: Int, navigationalState: String, mode: String, windowState: String, fragmentId: String, secure: Boolean): String = {
val sb = new StringBuilder(StartTag)
sb.append(URLTypeParam)
sb.append('=')
val urlTypeString = URLTypes.getOrElse(urlType, throw new IllegalArgumentException)
sb.append(urlTypeString)
// Encode mode
if (mode ne null) {
sb.append('&')
sb.append(ModeParam)
sb.append('=')
sb.append(mode)
}
// Encode window state
if (windowState ne null) {
sb.append('&')
sb.append(WindowStateParam)
sb.append('=')
sb.append(windowState)
}
// Encode navigational state
if (navigationalState ne null) {
sb.append('&')
sb.append(NavigationalStateParam)
sb.append('=')
sb.append(URLEncoder.encode(navigationalState, "utf-8"))
}
sb.append(EndTag)
sb.toString
}
} | evlist/orbeon-forms | src/main/scala/org/orbeon/oxf/externalcontext/WSRPURLRewriter.scala | Scala | lgpl-2.1 | 7,495 |
package us.feliscat.time.en
import us.feliscat.m17n.English
import us.feliscat.time.MultiLingualTimeExtractorInGlossaryEntries
import us.feliscat.util.LibrariesConfig
/**
* <pre>
* Created on 2017/02/08.
* </pre>
*
* @author K.Sakamoto
*/
object EnglishTimeExtractorInGlossaryEntries extends MultiLingualTimeExtractorInGlossaryEntries with English {
override protected val glossaryEntries: String = LibrariesConfig.glossaryEntriesForTimeExtractorInEnglish
}
| ktr-skmt/FelisCatusZero-multilingual | libraries/src/main/scala/us/feliscat/time/en/EnglishTimeExtractorInGlossaryEntries.scala | Scala | apache-2.0 | 474 |
package test.verifier
import org.scalatest.{GivenWhenThen, FlatSpec}
import org.scalatest.matchers.ShouldMatchers._
import tap.ast._
import tap.{LocalId, ModuleId, InstId}
import tap.verifier.defs.{DefinitionsLookup, ModuleDefinitions}
import tap.verifier.ModuleVerifier
import tap.verifier.errors._
import tap.types._
import tap.types.Natives._
import tap.types.kinds._
import tap.types.classes.{Qual, IsIn, TypeclassDef}
import tap.types.classes.ClassEnvironments.Inst
import tap.ir._
import language.reflectiveCalls
import test.TapNodeEquality
import tap.types.inference.TIEnv
class ModuleVerifierTests extends FlatSpec with TapNodeEquality with GivenWhenThen {
val nullDefs = ModuleDefinitions.defaults
val nullScopes = Map("Test" -> DefinitionsLookup.empty)
val testDefs = nullDefs.copy(
tcons = nullDefs.tcons ++ Map(ModuleId("Test", "X") -> TCon(ModuleId("Test", "X"), Star),
ModuleId("Test", "X1") -> TCon(ModuleId("Test", "X1"), Kfun(Star, Star))),
dcons = nullDefs.dcons ++ Map(ModuleId("Test", "X") -> TCon(ModuleId("Test", "X"), Star)),
tcs = nullDefs.tcs ++ Map(ModuleId("Test", "Y") -> TypeclassDef(ModuleId("Test", "Y"), Nil, List(TVar("a", Star)), Set("yfn"), Set.empty),
ModuleId("Test", "Y2") -> TypeclassDef(ModuleId("Test", "Y2"), Nil, List(TVar("a", Star), TVar("b", Star)), Set("yfn"), Set.empty)),
tcis = nullDefs.tcis ++ Map(ModuleId("Test", "Y") -> List(Inst("Test", Nil, IsIn(ModuleId("Test", "Y"), List(tString))))),
mts = nullDefs.mts ++ Map(ModuleId("Test", "z") -> Qual(Nil, TCon(ModuleId("Test", "X"), Star)),
ModuleId("Test", "yfn") -> Qual(List(IsIn(ModuleId("Test", "Y"), List(TVar("a", Star)))), TVar("a", Star) fn TVar("a", Star))),
mis = nullDefs.mis ++ Map(ModuleId("Test", "z") -> ValueReadExpr(ModuleId("Test", "X")))
)
val testScopes = Map("Test" -> DefinitionsLookup.empty
.addTCon("X", ModuleId("Test", "X"))
.addTCon("X1", ModuleId("Test", "X1"))
.addDCon("X", ModuleId("Test", "X"))
.addClass("Y", ModuleId("Test", "Y"))
.addClass("Y2", ModuleId("Test", "Y2"))
.addMember("z", ModuleId("Test", "z")))
// ------------------------------------------------------------------------
behavior of "apply"
it should "extend the verifiedDefs with the definitions found in all the passed modules" in {
val mA = ASTModule("ModuleA", List(
ASTDataType("TypeA", List("a"), List(
ASTDataCon("DataA2", List(ASTTypeVar("a"))),
ASTDataCon("DataA1", Nil))),
ASTClass("ClassA", Nil, List("a"), List(
ASTClassMemberDef("cmemberA", ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a"))))))),
ASTClassInst("ClassA", List(ASTClassRef("ClassA", List("a"))), List(ASTTypeApply(ASTTypeCon("TypeA"), List(ASTTypeVar("a")))), List(
ASTClassMemberImpl("cmemberA", ASTFunction(List("x"), ASTValueRead("x"))))),
ASTLet("memberA", ASTFunction(List("x"), ASTValueRead("x")))))
val mB = ASTModule("ModuleB", List(
ASTDataType("TypeB", Nil, List(
ASTDataCon("DataB", Nil))),
ASTClass("ClassB", Nil, List("a"), List(
ASTClassMemberDef("cmemberB", ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a"))))))),
ASTClassInst("ClassB", Nil, List(ASTTypeCon("TypeB")), List(
ASTClassMemberImpl("cmemberB", ASTFunction(List("x"), ASTValueRead("x"))))),
ASTLet("memberB", ASTFunction(List("x"), ASTValueRead("x")))))
val v = new ModuleVerifier(Map(
"Test" -> testScopes("Test"),
"ModuleA" -> DefinitionsLookup.empty
.addTCon("TypeA", ModuleId("ModuleA", "TypeA"))
.addDCon("DataA1", ModuleId("ModuleA", "DataA1"))
.addDCon("DataA2", ModuleId("ModuleA", "DataA2"))
.addClass("ClassA", ModuleId("ModuleA", "ClassA"))
.addMember("cmemberA", ModuleId("ModuleA", "cmemberA"))
.addMember("memberA", ModuleId("ModuleA", "memberA")),
"ModuleB" -> DefinitionsLookup.empty
.addTCon("TypeB", ModuleId("ModuleB", "TypeB"))
.addDCon("DataB", ModuleId("ModuleB", "DataB"))
.addClass("ClassB", ModuleId("ModuleB", "ClassB"))
.addMember("cmemberB", ModuleId("ModuleB", "cmemberB"))
.addMember("memberB", ModuleId("ModuleB", "memberB"))
))
val (ctx, defs) = v.apply(Seq(mA, mB), testDefs, TIEnv.empty)
defs.tcons should be === testDefs.tcons +
(ModuleId("ModuleA", "TypeA") -> TCon(ModuleId("ModuleA", "TypeA"), Kfun(Star, Star))) +
(ModuleId("ModuleB", "TypeB") -> TCon(ModuleId("ModuleB", "TypeB"), Star))
defs.dcons should be === testDefs.dcons +
(ModuleId("ModuleA", "DataA2") -> Forall(0, List(Star), TGen(0, 0) fn TAp(TCon(ModuleId("ModuleA", "TypeA"), Kfun(Star, Star)), TGen(0, 0)))) +
(ModuleId("ModuleA", "DataA1") -> Forall(1, List(Star), TAp(TCon(ModuleId("ModuleA", "TypeA"), Kfun(Star, Star)), TGen(1, 0)))) +
(ModuleId("ModuleB", "DataB") -> TCon(ModuleId("ModuleB", "TypeB"), Star))
defs.tcs should be === testDefs.tcs +
(ModuleId("ModuleA", "ClassA") -> TypeclassDef(ModuleId("ModuleA", "ClassA"), List(), List(TVar("a", Star)), Set("cmemberA"), Set())) +
(ModuleId("ModuleB", "ClassB") -> TypeclassDef(ModuleId("ModuleB", "ClassB"), List(), List(TVar("a", Star)), Set("cmemberB"), Set()))
defs.tcis should be === testDefs.tcis +
(ModuleId("ModuleA", "ClassA") -> List(Inst("ModuleA", List(IsIn(ModuleId("ModuleA", "ClassA"), List(TVar("a", Star)))), IsIn(ModuleId("ModuleA", "ClassA"), List(TAp(TCon(ModuleId("ModuleA", "TypeA"), Kfun(Star, Star)), TVar("a", Star))))))) +
(ModuleId("ModuleB", "ClassB") -> List(Inst("ModuleB", List(), IsIn(ModuleId("ModuleB", "ClassB"), List(TCon(ModuleId("ModuleB", "TypeB"), Star))))))
defs.mts should be === testDefs.mts +
(ModuleId("ModuleA", "cmemberA") -> Qual(List(IsIn(ModuleId("ModuleA", "ClassA"), List(TGen(2, 0)))), Forall(2, List(Star), TGen(2, 0) fn TGen(2, 0)))) +
(ModuleId("ModuleB", "cmemberB") -> Qual(List(IsIn(ModuleId("ModuleB", "ClassB"), List(TGen(3, 0)))), Forall(3, List(Star), TGen(3, 0) fn TGen(3, 0))))
val testMis = testDefs.mis +
(ModuleId("ModuleA", "memberA") -> FunctionExpr(Argument("x"), ValueReadExpr(LocalId("x")))) +
(InstId("ModuleA", ModuleId("ModuleA", "ClassA"), List(ModuleId("ModuleA", "TypeA")), "cmemberA") -> FunctionExpr(Argument("x"), ValueReadExpr(LocalId("x")))) +
(ModuleId("ModuleB", "memberB") -> FunctionExpr(Argument("x"), ValueReadExpr(LocalId("x")))) +
(InstId("ModuleB", ModuleId("ModuleB", "ClassB"), List(ModuleId("ModuleB", "TypeB")), "cmemberB") -> FunctionExpr(Argument("x"), ValueReadExpr(LocalId("x"))))
defs.mis.size should be === testMis.size
(defs.mis zip testMis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "addDataTypeDefs"
it should "throw an error if the name of a type constructor conflicts with an imported definition" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup(Map("A" -> ModuleId("Prelude", "A")), Map.empty, Map.empty, Map.empty)))
val dtd = ASTDataType("A", Nil, Nil)
evaluating {
v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
} should produce [NamespaceError]
}
it should "throw an error if the name of a data constructor conflicts with an imported definition" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup(Map.empty, Map("B" -> ModuleId("Prelude", "B")), Map.empty, Map.empty)))
val dtd = ASTDataType("A", Nil, List(ASTDataCon("B", Nil)))
evaluating {
v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
} should produce [NamespaceError]
}
it should "throw an error if a type constructor is defined more than once in a module" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", Nil, Nil)
evaluating {
v.addDataTypeDefs(TIEnv.empty, Seq(
"Test" -> dtd,
"Test" -> dtd), nullDefs)
} should produce [ModuleDuplicateDefinition]
}
it should "throw an error if a data constructor is defined more than once in a module" in {
Given("a duplicate dcon in the same definition")
val v1 = new ModuleVerifier(nullScopes)
evaluating {
v1.addDataTypeDefs(TIEnv.empty, Seq(
"Test" -> ASTDataType("A", Nil, List(ASTDataCon("Aa", Nil), ASTDataCon("Aa", Nil)))
), nullDefs)
} should produce [ModuleDuplicateDefinition]
Given("a duplicate dcon in different definitions")
val v2 = new ModuleVerifier(nullScopes)
evaluating {
v2.addDataTypeDefs(TIEnv.empty, Seq(
"Test" -> ASTDataType("A1", Nil, List(ASTDataCon("Aa", Nil))),
"Test" -> ASTDataType("A2", Nil, List(ASTDataCon("Aa", Nil)))
), nullDefs)
} should produce [ModuleDuplicateDefinition]
}
it should "handle type constructors without type variables" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", Nil, Nil)
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
defs.tcons should be === nullDefs.tcons + (ModuleId("Test", "A") -> TCon(ModuleId("Test", "A"), Star))
}
it should "handle type constructors with type variables" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", List("p", "q"), Nil)
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
defs.tcons should be === nullDefs.tcons + (ModuleId("Test", "A") -> TCon(ModuleId("Test", "A"), Kfun(Star, Kfun(Star, Star))))
}
it should "handle data constructors with no arguments" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", Nil, List(ASTDataCon("B", Nil)))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
defs.dcons should be === nullDefs.dcons + (ModuleId("Test", "B") -> TCon(ModuleId("Test", "A"), Star))
}
it should "handle data constructors with non-type variable arguments" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addTCon("A", ModuleId("Test", "A"))
.addTCon("B", ModuleId("Test", "B"))))
val dtd1 = ASTDataType("A", Nil, Nil)
val dtd2 = ASTDataType("B", Nil, List(ASTDataCon("B", List(ASTTypeCon("A")))))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd1, "Test" -> dtd2), nullDefs)
val tA = TCon(ModuleId("Test", "A"), Star)
val tB = TCon(ModuleId("Test", "B"), Star)
defs.dcons should be === nullDefs.dcons + (ModuleId("Test", "B") -> (tA fn tB))
}
it should "handle circular dependencies between type constructors" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addTCon("A", ModuleId("Test", "A"))
.addTCon("B", ModuleId("Test", "B"))))
val dtd1 = ASTDataType("A", Nil, List(ASTDataCon("A", List(ASTTypeCon("B")))))
val dtd2 = ASTDataType("B", Nil, List(ASTDataCon("B", List(ASTTypeCon("A")))))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd1, "Test" -> dtd2), nullDefs)
val tA = TCon(ModuleId("Test", "A"), Star)
val tB = TCon(ModuleId("Test", "B"), Star)
defs.dcons should be === nullDefs.dcons +
(ModuleId("Test", "A") -> (tB fn tA)) +
(ModuleId("Test", "B") -> (tA fn tB))
}
it should "handle self-referential data constructors" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addTCon("A", ModuleId("Test", "A"))))
val dtd = ASTDataType("A", Nil, List(ASTDataCon("B", List(ASTTypeCon("A")))))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
val tA = TCon(ModuleId("Test", "A"), Star)
defs.dcons should be === nullDefs.dcons + (ModuleId("Test", "B") -> (tA fn tA))
}
it should "handle data constructors with quantified type variable arguments" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", List("p", "q"), List(ASTDataCon("B", List(ASTTypeVar("p"), ASTTypeVar("q")))))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
val fa = Forall(0, List(Star, Star), TGen(0, 0) fn (TGen(0, 1) fn TAp(TAp(TCon(ModuleId("Test", "A"), Kfun(Star, Kfun(Star, Star))), TGen(0, 0)), TGen(0, 1))))
defs.dcons should be === nullDefs.dcons + (ModuleId("Test", "B") -> fa)
}
it should "infer the kind of type variables based upon their usage in data constructors" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", List("p", "q"), List(ASTDataCon("B", List(ASTTypeApply(ASTTypeVar("p"), List(ASTTypeVar("q")))))))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
val fa = Forall(0, List(Kfun(Star, Star), Star), TAp(TGen(0, 0), TGen(0, 1)) fn TAp(TAp(TCon(ModuleId("Test", "A"), Kfun(Kfun(Star, Star), Kfun(Star, Star))), TGen(0, 0)), TGen(0, 1)))
defs.dcons should be === nullDefs.dcons + (ModuleId("Test", "B") -> fa)
}
it should "throw an error if the kind of type variables conflicts in the data constructors" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("E", List("a", "b"), List(ASTDataCon("X", List(ASTTypeApply(ASTTypeVar("a"), List(ASTTypeVar("b"))))), ASTDataCon("Y", List(ASTTypeApply(ASTTypeVar("b"), List(ASTTypeVar("a")))))))
evaluating { v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs) } should produce [KindConflictError]
}
it should "handle forall usage in data constructors" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", Nil, List(ASTDataCon("B", List(ASTForall(List("a"), ASTTypeVar("a"))))))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), nullDefs)
val fa = Forall(0, List(Star), TGen(0, 0)) fn TCon(ModuleId("Test", "A"), Star)
defs.dcons should be === nullDefs.dcons + (ModuleId("Test", "B") -> fa)
}
it should "extend the tcons and dcons in the definitions list and leave all existing values unchanged" in {
val v = new ModuleVerifier(nullScopes)
val dtd = ASTDataType("A", Nil, List(ASTDataCon("B", Nil)))
val (_, defs) = v.addDataTypeDefs(TIEnv.empty, Seq("Test" -> dtd), testDefs)
defs.tcons should be === testDefs.tcons + (ModuleId("Test", "A") -> TCon(ModuleId("Test", "A"), Star))
defs.dcons should be === testDefs.dcons + (ModuleId("Test", "B") -> TCon(ModuleId("Test", "A"), Star))
defs.tcs should be === testDefs.tcs
defs.tcis should be === testDefs.tcis
defs.mts should be === testDefs.mts
defs.mis.size should be === testDefs.mis.size
(defs.mis zip testDefs.mis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "addTypeclassDefs"
it should "throw an error for typeclasses with no type variables" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, Nil, Nil)
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [VerifierMiscError]
}
it should "throw an error if the name of typeclass conflicts with an imported definition" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty.addClass("A", ModuleId("Prelude", "A"))))
val tc = ASTClass("A", Nil, List("a"), Nil)
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [NamespaceError]
}
it should "throw an error if a typeclass is defined more than once in a module" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a"), Nil)
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc, "Test" -> tc), nullDefs)
} should produce [ModuleDuplicateDefinition]
}
it should "produce typeclass definitions with single type parameters" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
defs.tcs should be === Map(
ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Star)), Set.empty, Set.empty)
)
}
it should "produce typeclass definitions with multiple type parameters" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a", "b", "c"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
defs.tcs should be === Map(
ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Star), TVar("b", Star), TVar("c", Star)), Set.empty, Set.empty)
)
}
it should "throw an error for superclass references to classes that are not in scope" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("B", List(ASTClassRef("X", List("a"))), List("a"), Nil)
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [UnknownTypeclassError]
}
it should "throw an error for typeclasses that pass undeclared type variables to a superclass" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))))
val tcA = ASTClass("A", Nil, List("p"), Nil)
val tcB = ASTClass("B", List(ASTClassRef("A", List("z"))), List("q"), Nil)
evaluating {
v.addTypeclassDefs(Seq("Test" -> tcA, "Test" -> tcB), nullDefs)
} should produce [UnknownTypeVariableError]
}
it should "throw an error for superclass references with the wrong arity" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))))
val tcA = ASTClass("A", Nil, List("p"), Nil)
val tcB = ASTClass("B", List(ASTClassRef("A", List("x", "y"))), List("x", "y"), Nil)
evaluating {
v.addTypeclassDefs(Seq("Test" -> tcA, "Test" -> tcB), nullDefs)
} should produce [TypeclassArityError]
}
it should "throw an error for recursive typeclass hierarchies" in {
When("a typeclass has itself as a superclass")
val v1 = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))))
val tc = ASTClass("A", List(ASTClassRef("A", List("a"))), List("a"), Nil)
evaluating {
v1.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [TypeclassRecursiveHeirarchyError]
When("a typeclass's superclass has the typeclass as a superclass")
val v2 = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))))
val tcA = ASTClass("A", List(ASTClassRef("B", List("a"))), List("a"), Nil)
val tcB = ASTClass("B", List(ASTClassRef("A", List("b"))), List("b"), Nil)
evaluating {
v2.addTypeclassDefs(Seq("Test" -> tcA, "Test" -> tcB), nullDefs)
} should produce [TypeclassRecursiveHeirarchyError]
}
it should "produce typeclass definitions with a superclass" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))))
val tcA = ASTClass("A", Nil, List("a"), Nil)
val tcB = ASTClass("B", List(ASTClassRef("A", List("b"))), List("b"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tcA, "Test" -> tcB), nullDefs)
defs.tcs should be === Map(
ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Star)), Set.empty, Set.empty),
ModuleId("Test", "B") -> TypeclassDef(ModuleId("Test", "B"), List(IsIn(ModuleId("Test", "A"), List(TVar("b", Star)))), List(TVar("b", Star)), Set.empty, Set.empty)
)
}
it should "produce typeclass definitions with multiple superclasses" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))
.addClass("C", ModuleId("Test", "C"))))
val tcA = ASTClass("A", Nil, List("a"), Nil)
val tcB = ASTClass("B", Nil, List("b"), Nil)
val tcC = ASTClass("C", List(ASTClassRef("A", List("c")), ASTClassRef("B", List("c"))), List("c"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tcA, "Test" -> tcB, "Test" -> tcC), nullDefs)
defs.tcs should be === Map(
ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Star)), Set.empty, Set.empty),
ModuleId("Test", "B") -> TypeclassDef(ModuleId("Test", "B"), Nil, List(TVar("b", Star)), Set.empty, Set.empty),
ModuleId("Test", "C") -> TypeclassDef(ModuleId("Test", "C"), List(IsIn(ModuleId("Test", "A"), List(TVar("c", Star))), IsIn(ModuleId("Test", "B"), List(TVar("c", Star)))), List(TVar("c", Star)), Set.empty, Set.empty)
)
}
it should "produce typeclass definitions with multiple superclasses over multiple type variables" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))
.addClass("C", ModuleId("Test", "C"))))
val tcA = ASTClass("A", Nil, List("a"), Nil)
val tcB = ASTClass("B", Nil, List("b"), Nil)
val tcC = ASTClass("C", List(ASTClassRef("A", List("y")), ASTClassRef("B", List("x"))), List("x", "y"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tcA, "Test" -> tcB, "Test" -> tcC), nullDefs)
defs.tcs should be === Map(
ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Star)), Set.empty, Set.empty),
ModuleId("Test", "B") -> TypeclassDef(ModuleId("Test", "B"), Nil, List(TVar("b", Star)), Set.empty, Set.empty),
ModuleId("Test", "C") -> TypeclassDef(ModuleId("Test", "C"), List(IsIn(ModuleId("Test", "A"), List(TVar("y", Star))), IsIn(ModuleId("Test", "B"), List(TVar("x", Star)))), List(TVar("x", Star), TVar("y", Star)), Set.empty, Set.empty)
)
}
it should "infer the kind of type variables using superclass information" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))))
val defs0 = nullDefs.copy(tcs = nullDefs.tcs + (ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Kfun(Star, Star))), Set.empty, Set.empty)))
val tc = ASTClass("B", List(ASTClassRef("A", List("b"))), List("b"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tc), defs0)
defs.tcs should be === defs0.tcs +
(ModuleId("Test", "B") -> TypeclassDef(ModuleId("Test", "B"), List(IsIn(ModuleId("Test", "A"), List(TVar("b", Kfun(Star, Star))))), List(TVar("b", Kfun(Star, Star))), Set.empty, Set.empty))
}
it should "throw an error for typeclass members that don't use the class type variables" in {
val v = new ModuleVerifier(nullScopes)
When("the typeclass has one parameter")
val tc1 = ASTClass("A", Nil, List("a"), List(ASTClassMemberDef("do-a", ASTQType(Nil, ASTTypeVar("b")))))
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc1), nullDefs)
} should produce [TypeclassIllegalMemberDefinition]
When("the typeclass has multiple parameters")
val tc2 = ASTClass("A", Nil, List("a", "b"), List(ASTClassMemberDef("do-a", ASTQType(Nil, ASTTypeVar("a")))))
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc2), nullDefs)
} should produce [TypeclassIllegalMemberDefinition]
}
it should "throw an error for duplicate typeclass members" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a"), List(ASTClassMemberDef("do-a", ASTQType(Nil, ASTTypeVar("a"))), ASTClassMemberDef("do-a", ASTQType(Nil, ASTTypeVar("a")))))
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [TypeclassDuplicateMemberDefinitionError]
}
it should "throw an error for duplicate typeclass member implementations" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a"), List(
ASTClassMemberDef("do-a", ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a"))))),
ASTClassMemberImpl("do-a", ASTFunction(List("a"), ASTValueRead("a"))),
ASTClassMemberImpl("do-a", ASTFunction(List("a"), ASTValueRead("a")))))
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [TypeclassDuplicateMemberImplementationError]
}
it should "throw an error for typeclass members implementations that have no corresponding definition" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a"), List(ASTClassMemberImpl("do-a", ASTFunction(List("a"), ASTValueRead("a")))))
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
} should produce [TypeclassImplementsUnknownMemberError]
}
it should "infer the kind of type variables using member definitions" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a", "b"), List(ASTClassMemberDef("do-a", ASTQType(Nil, ASTTypeApply(ASTTypeVar("a"), List(ASTTypeVar("b")))))))
val defs = v.addTypeclassDefs(Seq("Test" -> tc), nullDefs)
defs.tcs should be === Map(
ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Kfun(Star, Star)), TVar("b", Star)), Set("do-a"), Set.empty)
)
}
it should "throw an error if the kind of type variables inferred from member definitions conflicts with the kind inferred from superclass information" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("A", ModuleId("Test", "A"))
.addClass("B", ModuleId("Test", "B"))))
val defs0 = nullDefs.copy(tcs = nullDefs.tcs + (ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Kfun(Star, Star))), Set.empty, Set.empty)))
val tc = ASTClass("B", List(ASTClassRef("A", List("b"))), List("b"), List(ASTClassMemberDef("do-b", ASTQType(Nil, ASTTypeApply(ASTTypeVar("b"), List(ASTTypeVar("b")))))))
evaluating {
v.addTypeclassDefs(Seq("Test" -> tc), defs0)
} should produce [KindConflictError]
}
it should "extend the tcs in the definitions list and leave all existing values unchanged" in {
val v = new ModuleVerifier(nullScopes)
val tc = ASTClass("A", Nil, List("a"), Nil)
val defs = v.addTypeclassDefs(Seq("Test" -> tc), testDefs)
defs.tcons should be === testDefs.tcons
defs.dcons should be === testDefs.dcons
defs.tcs should be === testDefs.tcs + (ModuleId("Test", "A") -> TypeclassDef(ModuleId("Test", "A"), Nil, List(TVar("a", Star)), Set.empty, Set.empty))
defs.tcis should be === testDefs.tcis
defs.mts should be === testDefs.mts
defs.mis.size should be === testDefs.mis.size
(defs.mis zip testDefs.mis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "addTypeclassInstances"
it should "throw an error if the typeclass is not in scope" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Foof", Nil, List(ASTTypeCon("X")), Nil)
), testDefs)
} should produce [UnknownTypeclassError]
}
it should "throw an error if the typeclass is provided the wrong number of types" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeCon("X"), ASTTypeCon("X")), Nil)
), testDefs)
} should produce [TypeclassArityError]
}
it should "throw an error if the typeclass is provided types of the wrong kinds" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeCon("X1")), Nil)
), testDefs)
} should produce [TypeclassIllegalParameterError]
}
it should "throw an error if the typeclass is provided non-concrete types" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeVar("a")), Nil)
), testDefs)
} should produce [TypeclassIllegalParameterError]
}
it should "throw an error if the instance provides a duplicate implementation for a member" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeCon("X")), List(
ASTClassMemberImpl("yfn", ASTFunction(List("x"), ASTValueRead("x"))),
ASTClassMemberImpl("yfn", ASTFunction(List("x"), ASTValueRead("x")))
))
), testDefs)
} should produce [InstanceDuplicateMemberError]
}
it should "throw an error if the instance implements members that were not defined in the typeclass" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeCon("X")), List(
ASTClassMemberImpl("yfn", ASTFunction(List("x"), ASTValueRead("x"))),
ASTClassMemberImpl("hwaet", ASTFunction(List("x"), ASTValueRead("x")))
))
), testDefs)
} should produce [InstanceUnknownMemberError]
}
it should "throw an error if the instance does not implement all the members defined in the typeclass" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeCon("X")), Nil)
), testDefs)
} should produce [InstanceIncompleteError]
}
it should "produce typeclass instances" in {
val v = new ModuleVerifier(testScopes)
val (_, defs) = v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", Nil, List(ASTTypeCon("X")), List(
ASTClassMemberImpl("yfn", ASTFunction(List("x"), ASTValueRead("x")))
))
), testDefs)
val origTCIs = testDefs.tcis(ModuleId("Test", "Y"))
defs.tcis should be === testDefs.tcis + (ModuleId("Test", "Y") ->
(Inst("Test", Nil, IsIn(ModuleId("Test", "Y"), List(testDefs.tcons(ModuleId("Test", "X"))))) :: origTCIs))
}
it should "produce typeclass instances defined with extended context" in {
val v = new ModuleVerifier(testScopes)
val (_, defs) = v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", List(ASTClassRef("Y", List("a"))), List(ASTTypeApply(ASTTypeCon("X1"), List(ASTTypeVar("a")))), List(
ASTClassMemberImpl("yfn", ASTFunction(List("x"), ASTValueRead("x")))
))
), testDefs)
val origTCIs = testDefs.tcis(ModuleId("Test", "Y"))
defs.tcis should be === testDefs.tcis + (ModuleId("Test", "Y") ->
(Inst("Test",
List(IsIn(ModuleId("Test", "Y"), List(TVar("a", Star)))),
IsIn(ModuleId("Test", "Y"), List(TAp(testDefs.tcons(ModuleId("Test", "X1")), TVar("a", Star)))))
:: origTCIs))
}
it should "extend the tcis in the definitions list and leave all existing values unchanged" in {
val v = new ModuleVerifier(testScopes)
val (_, defs) = v.addTypeclassInstances(TIEnv.empty, Seq(
"Test" -> ASTClassInst("Y", List(ASTClassRef("Y", List("a"))), List(ASTTypeApply(ASTTypeCon("X1"), List(ASTTypeVar("a")))), List(
ASTClassMemberImpl("yfn", ASTFunction(List("x"), ASTValueRead("x")))
))
), testDefs)
defs.tcons should be === testDefs.tcons
defs.dcons should be === testDefs.dcons
defs.tcs should be === testDefs.tcs
val origTCIs = testDefs.tcis(ModuleId("Test", "Y"))
defs.tcis should be === testDefs.tcis + (ModuleId("Test", "Y") ->
(Inst("Test",
List(IsIn(ModuleId("Test", "Y"), List(TVar("a", Star)))),
IsIn(ModuleId("Test", "Y"), List(TAp(testDefs.tcons(ModuleId("Test", "X1")), TVar("a", Star)))))
:: origTCIs))
defs.mts should be === testDefs.mts
defs.mis.size should be === testDefs.mis.size
(defs.mis zip testDefs.mis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "addMemberDefs"
it should "throw an error if the name of a member conflicts with an imported definition" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty.addMember("fn", ModuleId("Prelude", "fn"))))
evaluating {
v.addMemberDefs(TIEnv.empty, Seq(
"Test" -> ASTDef("fn", ASTQType(Nil, ASTTypeCon("Unit")))
), nullDefs)
} should produce [NamespaceError]
}
it should "throw an error if a member is defined more than once" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.defaults))
val defs = ModuleDefinitions.defaults
evaluating {
v.addMemberDefs(TIEnv.empty, Seq(
"Test" -> ASTDef("A", ASTQType(Nil, ASTTypeCon("Unit"))),
"Test" -> ASTDef("A", ASTQType(Nil, ASTTypeCon("Unit")))), defs)
} should produce [ModuleDuplicateDefinition]
}
it should "extend the mts in the definitions list and leave all existing values unchanged" in {
val v = new ModuleVerifier(testScopes)
val (_, defs) = v.addMemberDefs(TIEnv.empty, Seq("Test" -> ASTDef("A", ASTQType(Nil, ASTTypeCon("X")))), testDefs)
defs.tcons should be === testDefs.tcons
defs.dcons should be === testDefs.dcons
defs.tcs should be === testDefs.tcs
defs.tcis should be === testDefs.tcis
defs.mts should be === testDefs.mts + (ModuleId("Test", "A") -> Qual(Nil, testDefs.tcons(ModuleId("Test", "X"))))
defs.mis.size should be === testDefs.mis.size
(defs.mis zip testDefs.mis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "addTypeclassMemberDefs"
it should "throw an error if the name of a member conflicts with an imported definition" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("C", ModuleId("Test", "C"))
.addMember("z", ModuleId("Prelude", "z"))))
val c = ASTClass("C", Nil, List("a"), List(ASTClassMemberDef("z", ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))))))
val defs = v.addTypeclassDefs(Seq("Test" -> c), nullDefs)
evaluating {
v.addTypeclassMemberDefs(TIEnv.empty, Seq("Test" -> c), defs)
} should produce [NamespaceError]
}
it should "throw an error if a member name overlaps with a module member name" in {
val v = new ModuleVerifier(Map("Test" -> DefinitionsLookup.empty
.addClass("C", ModuleId("Test", "C"))
.addMember("z", ModuleId("Test", "z"))))
val c = ASTClass("C", Nil, List("a"), List(ASTClassMemberDef("z", ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))))))
val (_, defs0) = v.addMemberDefs(TIEnv.empty, Seq("Test" -> ASTDef("z", ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))))), nullDefs)
val defs1 = v.addTypeclassDefs(Seq("Test" -> c), defs0)
evaluating {
v.addTypeclassMemberDefs(TIEnv.empty, Seq("Test" -> c), defs1)
} should produce [ModuleDuplicateDefinition]
}
it should "handle members with additional typeclass predicates" in {
val v = new ModuleVerifier(Map("Test" -> testScopes("Test")
.addClass("C", ModuleId("Test", "C"))))
val c = ASTClass("C", Nil, List("a"), List(ASTClassMemberDef("ccc",
ASTQType(List(ASTClassRef("Y", List("b"))), ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("b"), ASTTypeVar("a")))))))
val defs0 = v.addTypeclassDefs(Seq("Test" -> c), testDefs)
val (_, defs1) = v.addTypeclassMemberDefs(TIEnv.empty, Seq("Test" -> c), defs0)
defs1.mts(ModuleId("Test", "ccc")) should be === Qual(List(
IsIn(ModuleId("Test", "C"), List(TGen(0, 0))),
IsIn(ModuleId("Test", "Y"), List(TGen(0, 1)))),
Forall(0, List(Star, Star), TGen(0, 0) fn (TGen(0, 1) fn TGen(0, 0))))
}
it should "throw an error if a member has additional typeclass predicates that cause a kind mismatch" in {
val v = new ModuleVerifier(Map("Test" -> testScopes("Test")
.addClass("C", ModuleId("Test", "C"))))
val c = ASTClass("C", Nil, List("a", "b"), List(
ASTClassMemberDef("bbb", ASTQType(Nil, ASTFunctionType(List(ASTTypeApply(ASTTypeVar("a"), List(ASTTypeVar("b"))), ASTTypeVar("b"))))),
ASTClassMemberDef("ccc", ASTQType(List(ASTClassRef("Y2", List("a", "c"))), ASTFunctionType(List(ASTTypeApply(ASTTypeVar("a"), List(ASTTypeVar("b"))), ASTTypeVar("c"), ASTTypeVar("c")))))))
val defs0 = v.addTypeclassDefs(Seq("Test" -> c), testDefs)
evaluating { v.addTypeclassMemberDefs(TIEnv.empty, Seq("Test" -> c), defs0) } should produce [KindMismatchError]
}
it should "extend the mts in the definitions list and leave all existing values unchanged" in {
val v = new ModuleVerifier(Map("Test" -> testScopes("Test")
.addClass("C", ModuleId("Test", "C"))))
val c = ASTClass("C", Nil, List("a"), List(ASTClassMemberDef("ccc",
ASTQType(Nil, ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))))))
val defs0 = v.addTypeclassDefs(Seq("Test" -> c), testDefs)
val (_, defs1) = v.addTypeclassMemberDefs(TIEnv.empty, Seq("Test" -> c), defs0)
defs1.tcons should be === defs0.tcons
defs1.dcons should be === defs0.dcons
defs1.tcs should be === defs0.tcs
defs1.tcis should be === defs0.tcis
defs1.mts should be === defs0.mts + (ModuleId("Test", "ccc") -> Qual(List(IsIn(ModuleId("Test", "C"), List(TGen(0, 0)))),
Forall(0, List(Star), TGen(0, 0) fn TGen(0, 0))))
defs1.mis.size should be === defs0.mis.size
(defs1.mis zip defs0.mis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "addMemberImplementations"
it should "throw an error if there are implementations missing for defined members" in {
val v = new ModuleVerifier(testScopes)
val mds = Seq("Test" -> new ASTDef("member", ASTQType(Nil, ASTTypeCon("X"))))
val (_, defs) = v.addMemberDefs(TIEnv.empty, mds, testDefs)
evaluating {
v.addMemberImplementations(Nil, mds, defs)
} should produce [ModuleMissingImplementationError]
}
it should "throw an error if multiple implemenations are provided for the same id" in {
val v = new ModuleVerifier(nullScopes)
val mi = new ASTLet("member", ASTFunction(List("x"), ASTValueRead("x")))
val m = new ASTModule("Test", List(mi, mi))
evaluating {
v.addMemberImplementations(Seq(m), Nil, nullDefs)
} should produce [ModuleDuplicateDefinition]
}
it should "throw an error if a member refers to itself in initialisation" in {
val v = new ModuleVerifier(Map("Test" -> testScopes("Test")
.addMember("memberX", ModuleId("Test", "memberX"))))
val mi = new ASTLet("memberX", ASTValueRead("memberX"))
val m = new ASTModule("Test", List(mi))
evaluating {
v.addMemberImplementations(Seq(m), Nil, nullDefs)
} should produce [ModuleMemberInitRecursiveError]
}
it should "throw an error if there is a cycle in initialising a group of members" in {
val v = new ModuleVerifier(Map("Test" -> testScopes("Test")
.addMember("memberX", ModuleId("Test", "memberX"))
.addMember("memberY", ModuleId("Test", "memberY"))))
val miX = new ASTLet("memberX", ASTValueRead("memberY"))
val miY = new ASTLet("memberY", ASTValueRead("memberX"))
val m = new ASTModule("Test", List(miX, miY))
evaluating {
v.addMemberImplementations(Seq(m), Nil, nullDefs)
} should produce [ModuleMemberInitCycleError]
}
it should "extend the mis in the definitions list and leave all existing values unchanged" in {
val v = new ModuleVerifier(testScopes)
val mi = new ASTLet("member", ASTFunction(List("x"), ASTValueRead("x")))
val m = new ASTModule("Test", List(mi))
val defs = v.addMemberImplementations(Seq(m), Nil, testDefs)
defs.tcons should be === testDefs.tcons
defs.dcons should be === testDefs.dcons
defs.tcs should be === testDefs.tcs
defs.tcis should be === testDefs.tcis
defs.mts should be === testDefs.mts
val testMis = testDefs.mis + (ModuleId("Test", "member") -> FunctionExpr(Argument("x"), ValueReadExpr(LocalId("x"))))
defs.mis.size should be === testMis.size
(defs.mis zip testMis) foreach { case ((k1, v1), (k2, v2)) =>
k1 should be === k2
v1 should equal(v2)
}
}
// ------------------------------------------------------------------------
behavior of "getPredicates"
it should "throw an error if a referenced type variable is out of scope" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.getPredicates(
testScopes("Test").tcs,
testDefs.tcs,
List(ASTClassRef("Y", List("a"))),
Map.empty)
} should produce [UnknownTypeVariableError]
}
it should "throw an error if there is an arity mismatch with the referenced class" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.getPredicates(
testScopes("Test").tcs,
testDefs.tcs,
List(ASTClassRef("Y", List("a", "b"))),
Map("a" -> TVar("a", Star)))
} should produce [TypeclassArityError]
}
it should "throw an error if there is a kind mismatch between type and typeclass reference" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.getPredicates(
testScopes("Test").tcs,
testDefs.tcs,
List(ASTClassRef("Y", List("a"))),
Map("a" -> TVar("a", Kfun(Star, Star))))
} should produce [KindMismatchError]
}
it should "construct a list of predicates from a list of AST typeclass references" in {
val v = new ModuleVerifier(testScopes)
v.getPredicates(
testScopes("Test").tcs,
testDefs.tcs,
List(ASTClassRef("Y", List("a")), ASTClassRef("Y", List("b"))),
Map("a" -> TVar("a", Star), "b" -> TVar("b", Star))) should be ===
List(IsIn(ModuleId("Test", "Y"), List(TVar("a", Star))),
IsIn(ModuleId("Test", "Y"), List(TVar("b", Star))))
}
// ------------------------------------------------------------------------
behavior of "getMemberType"
it should "throw an error if any of the referenced types are not in scope" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(Nil, ASTTypeCon("Nonexist")),
testDefs)
} should produce [UnknownTypeConstructorError]
}
it should "throw an error if any of the qualified type variables are not reachable in the type" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(List(ASTClassRef("Y", List("a"))), ASTTypeCon("X")),
testDefs)
} should produce [UnknownTypeVariableError]
}
it should "construct a qualified type from the AST for a type" in {
val v = new ModuleVerifier(testScopes)
v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(Nil, ASTTypeCon("X")),
testDefs)._2 should be ===
Qual(Nil, TCon(ModuleId("Test", "X"), Star))
}
it should "construct a qualified type from the AST for a type with predicates" in {
val v = new ModuleVerifier(testScopes)
val qt = v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(List(ASTClassRef("Y", List("a"))), ASTTypeApply(ASTTypeCon("X1"), List(ASTTypeVar("a")))),
testDefs)._2
qt should be === Qual(List(
IsIn(ModuleId("Test", "Y"), List(TGen(0, 0)))),
Forall(0, List(Star), TAp(TCon(ModuleId("Test", "X1"), Kfun(Star, Star)), TGen(0, 0))))
}
it should "throw an error if type variables declared by forall have overlapping names" in {
val v = new ModuleVerifier(testScopes)
Given("a type where the name overlap occurs by shadowing")
evaluating {
v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(Nil, ASTFunctionType(List(ASTForall(List("a"), ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))), ASTTypeVar("a")))),
testDefs)
} should produce [TypeVariableOverlapError]
Given("a type where the name appears in parallel foralls")
evaluating {
v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(Nil, ASTFunctionType(List(ASTForall(List("a"), ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))), ASTForall(List("a"), ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))), ASTTypeVar("b")))),
testDefs)
} should produce [TypeVariableOverlapError]
}
it should "allow qualification of variables that appear in an inner forall" in {
val v = new ModuleVerifier(testScopes)
val qt = v.getMemberType(TIEnv.empty,
ModuleId("Test", "a"),
ASTQType(List(ASTClassRef("Y", List("a"))), ASTFunctionType(List(ASTForall(List("a"), ASTFunctionType(List(ASTTypeVar("a"), ASTTypeVar("a")))), ASTTypeCon("X")))),
testDefs)._2
qt should be === Qual(List(IsIn(ModuleId("Test", "Y"), List(TGen(0, 0)))),
Forall(0, List(Star), TGen(0, 0) fn TGen(0, 0)) fn TCon(ModuleId("Test", "X"), Star))
}
// ------------------------------------------------------------------------
behavior of "lookupInstanceParamType"
it should "throw an error if the type has parameters applied but does not accept parameters" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.lookupInstanceParamType(TIEnv.empty,
testScopes("Test").tcons,
testDefs.tcons,
ASTTypeApply(ASTTypeCon("X"), List(ASTTypeVar("a"))))
} should produce [TypeConstructorNoArgsError]
}
it should "throw an error if the type has too many parameters applied" in {
val v = new ModuleVerifier(testScopes)
evaluating {
v.lookupInstanceParamType(TIEnv.empty,
testScopes("Test").tcons,
testDefs.tcons,
ASTTypeApply(ASTTypeCon("X1"), List(ASTTypeVar("a"), ASTTypeVar("b"))))
} should produce [TypeConstructorTooManyArgsError]
}
it should "construct a type for a typeclass instance parameter" in {
val v = new ModuleVerifier(testScopes)
v.lookupInstanceParamType(TIEnv.empty,
testScopes("Test").tcons,
testDefs.tcons,
ASTTypeApply(ASTTypeCon("X1"), List(ASTTypeVar("a"))))._2 should be ===
TAp(testDefs.tcons(ModuleId("Test", "X1")), TVar("a", Star))
}
}
| garyb/tap | src/test/scala/test/verifier/ModuleVerifierTests.scala | Scala | mit | 50,706 |
package kvstore
import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import Arbiter._
import Replicator._
import org.scalactic.ConversionCheckedTripleEquals
class Step6_NewSecondarySpec extends TestKit(ActorSystem("Step6NewSecondarySpec"))
with FunSuiteLike
with BeforeAndAfterAll
with Matchers
with ConversionCheckedTripleEquals
with ImplicitSender
with Tools {
override def afterAll(): Unit = {
system.shutdown()
}
test("case1: Primary must start replication to new replicas") {
val arbiter = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-primary")
val user = session(primary)
val secondary = TestProbe()
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
user.setAcked("k1", "v1")
arbiter.send(primary, Replicas(Set(primary, secondary.ref)))
secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
secondary.reply(SnapshotAck("k1", 0L))
val ack1 = user.set("k1", "v2")
secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
secondary.reply(SnapshotAck("k1", 1L))
user.waitAck(ack1)
val ack2 = user.remove("k1")
secondary.expectMsg(Snapshot("k1", None, 2L))
secondary.reply(SnapshotAck("k1", 2L))
user.waitAck(ack2)
}
test("case2: Primary must stop replication to removed replicas and stop Replicator") {
val arbiter = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-primary")
val user = session(primary)
val secondary = TestProbe()
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
arbiter.send(primary, Replicas(Set(primary, secondary.ref)))
val ack1 = user.set("k1", "v1")
secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
val replicator = secondary.lastSender
secondary.reply(SnapshotAck("k1", 0L))
user.waitAck(ack1)
watch(replicator)
arbiter.send(primary, Replicas(Set(primary)))
expectTerminated(replicator)
}
test("case3: Primary must stop replication to removed replicas and waive their outstanding acknowledgements") {
val arbiter = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case3-primary")
val user = session(primary)
val secondary = TestProbe()
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
arbiter.send(primary, Replicas(Set(primary, secondary.ref)))
val ack1 = user.set("k1", "v1")
secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
secondary.reply(SnapshotAck("k1", 0L))
user.waitAck(ack1)
val ack2 = user.set("k1", "v2")
secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
arbiter.send(primary, Replicas(Set(primary)))
user.waitAck(ack2)
}
}
| vasnake/Principles-of-Reactive-Programming | w6/kvstore/src/test/scala/kvstore/Step6_NewSecondarySpec.scala | Scala | gpl-3.0 | 3,019 |
/*
* Copyright (c) <2012-2013>, Amanj Sherwany <http://www.amanj.me>
* All rights reserved.
* */
package ch.usi.inf.l3.moolc.evaluator
import _root_.ch.usi.inf.l3.moolc.ast._
trait StoreTrait {
def newStore(env: Map[Var, PEValue]): StoreTrait
def addEnv(env: Map[Var, PEValue]): Unit
def get(v: Var): Option[PEValue]
def add(v: Var, p: PEValue): Unit
def remove(v: Var): Unit
def isCT(v: Var): Boolean
def isRT(v: Var): Boolean
def cloneStore(): StoreTrait
} | amanjpro/mool-compiler | src/main/scala/ch/usi/inf/l3/moolc/eval/StoreTrait.scala | Scala | bsd-3-clause | 482 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.tables
import com.outworkers.phantom.dsl._
case class StubRecord(
id: UUID,
name: String
)
abstract class TableWithSingleKey extends Table[TableWithSingleKey, StubRecord] {
object id extends UUIDColumn with PartitionKey
object name extends StringColumn
}
abstract class TableWithCompoundKey extends Table[TableWithCompoundKey, StubRecord] {
object id extends UUIDColumn with PartitionKey
object second extends UUIDColumn with PrimaryKey
object name extends StringColumn
}
abstract class TableWithCompositeKey extends Table[TableWithCompositeKey, StubRecord] {
object id extends UUIDColumn with PartitionKey
object second_part extends UUIDColumn with PartitionKey
object second extends UUIDColumn with PrimaryKey
object name extends StringColumn
}
| outworkers/phantom | phantom-dsl/src/test/scala/com/outworkers/phantom/tables/TableKeyTests.scala | Scala | apache-2.0 | 1,411 |
package fpinscala.errorhandling
import scala.{Option => _, Either => _, Left => _, Right => _, _}
// hide std library `Option` and `Either`, since we are writing our own in this chapter
sealed trait Either[+E, +A] {
/** Exercise 4.6 */
def map[B](f: A => B): Either[E, B] = this match {
case Right(v) => Right(f(v))
case _ => _
}
def flatMap[EE >: E, B](f: A => Either[EE, B]): Either[EE, B] = this match {
case Right(a) => f(a)
case _ => _
}
def orElse[EE >: E, B >: A](b: => Either[EE, B]): Either[EE, B] = this match {
case Left => b
case _ => _
}
def map2[EE >: E, B, C](b: Either[EE, B])(f: (A, B) => C): Either[EE, C] =
(this, b) match {
case (Right(a), Right(b)) => Right(f(a, b))
case (_, Left(x)) => Left(x)
case (Left(x), _) => Left(x)
}
}
case class Left[+E](get: E) extends Either[E, Nothing]
case class Right[+A](get: A) extends Either[Nothing, A]
object Either {
/** Exercise 4.7 */
def traverse[E, A, B](es: List[A])(f: A => Either[E, B]): Either[E, List[B]] =
es match {
case Nil => Right(Nil)
case x :: xs => xs.foldLeft(f(x).map( _ :: Nil) ) {
(acc, a) => for {
ac <- acc
aa <- f(a)
} yield aa :: ac
}
}
/** Exercise 4.7 */
def sequence[E, A](es: List[Either[E, A]]): Either[E, List[A]] =
traverse(es)(identity(_))
def mean(xs: IndexedSeq[Double]): Either[String, Double] =
if (xs.isEmpty)
Left("mean of empty list!")
else
Right(xs.sum / xs.length)
def safeDiv(x: Int, y: Int): Either[Exception, Int] =
try Right(x / y)
catch {
case e: Exception => Left(e)
}
def Try[A](a: => A): Either[Exception, A] =
try Right(a)
catch {
case e: Exception => Left(e)
}
} | lzongren/fpinscala | exercises/src/main/scala/fpinscala/errorhandling/Either.scala | Scala | mit | 1,793 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
import javax.ws.rs.core.UriBuilder
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.immutable
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import scala.util.control.NonFatal
import com.google.common.collect.MapMaker
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.logging.log4j.Level
import org.apache.spark.annotation.{DeveloperApi, Experimental}
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.executor.{Executor, ExecutorMetrics, ExecutorMetricsSource}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests._
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.plugin.PluginContainer
import org.apache.spark.io.CompressionCodec
import org.apache.spark.launcher.JavaModuleOptions
import org.apache.spark.metrics.source.JVMCPUSource
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.resource._
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.shuffle.ShuffleDataIOUtils
import org.apache.spark.shuffle.api.ShuffleDriverComponents
import org.apache.spark.status.{AppStatusSource, AppStatusStore}
import org.apache.spark.status.api.v1.ThreadStackTrace
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
import org.apache.spark.util._
import org.apache.spark.util.logging.DriverLogger
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* @note Only one `SparkContext` should be active per JVM. You must `stop()` the
* active `SparkContext` before creating a new one.
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/
class SparkContext(config: SparkConf) extends Logging {
// The call site where this SparkContext was constructed.
private val creationSite: CallSite = Utils.getCallSite()
if (!config.get(EXECUTOR_ALLOW_SPARK_CONTEXT)) {
// In order to prevent SparkContext from being created in executors.
SparkContext.assertOnDriver()
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
// NOTE: this must be placed at the beginning of the SparkContext constructor.
SparkContext.markPartiallyConstructed(this)
val startTime = System.currentTimeMillis()
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
private[spark] def assertNotStopped(): Unit = {
if (stopped.get()) {
val activeContext = SparkContext.activeContext.get()
val activeCreationSite =
if (activeContext == null) {
"(No active SparkContext.)"
} else {
activeContext.creationSite.longForm
}
throw new IllegalStateException(
s"""Cannot call methods on a stopped SparkContext.
|This stopped SparkContext was created at:
|
|${creationSite.longForm}
|
|The currently active SparkContext was created at:
|
|$activeCreationSite
""".stripMargin)
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkConf())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
}
// The following constructors are required when Java code accesses SparkContext directly.
// Please see SI-4278
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map())
// log out Spark Version in Spark driver log
logInfo(s"Running Spark version $SPARK_VERSION")
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _listenerBus: LiveListenerBus = _
private var _env: SparkEnv = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _driverLogger: Option[DriverLogger] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _archives: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
private var _statusStore: AppStatusStore = _
private var _heartbeater: Heartbeater = _
private var _resources: immutable.Map[String, ResourceInformation] = _
private var _shuffleDriverComponents: ShuffleDriverComponents = _
private var _plugins: Option[PluginContainer] = None
private var _resourceProfileManager: ResourceProfileManager = _
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = conf.clone()
def resources: Map[String, ResourceInformation] = _resources
def jars: Seq[String] = _jars
def files: Seq[String] = _files
def archives: Seq[String] = _archives
def master: String = _conf.get("spark.master")
def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE)
def appName: String = _conf.get("spark.app.name")
private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
def isLocal: Boolean = Utils.isLocalMaster(_conf)
/**
* @return true if context is stopped or in the midst of stopping.
*/
def isStopped: Boolean = stopped.get()
private[spark] def statusStore: AppStatusStore = _statusStore
// An asynchronous listener bus for Spark events
private[spark] def listenerBus: LiveListenerBus = _listenerBus
// This function allows components created by SparkEnv to be mocked in unit tests:
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedArchives = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala
// Keeps track of all persisted RDDs
private[spark] val persistentRdds = {
val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]()
map.asScala
}
def statusTracker: SparkStatusTracker = _statusTracker
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
def uiWebUrl: Option[String] = _ui.map(_.webUrl)
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* in case of MESOS something like 'driver-20170926223339-0001'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
Utils.cloneProperties(parent)
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| stop() method to be called. |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String): Unit = {
// let's allow lowercase or mixed case too
val upperCased = logLevel.toUpperCase(Locale.ROOT)
require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
s"Supplied level $logLevel did not match one of:" +
s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
Utils.setLogLevel(Level.toLevel(upperCased))
}
try {
_conf = config.clone()
_conf.validateSettings()
_conf.set("spark.app.startTime", startTime.toString)
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
// This should be set as early as possible.
SparkContext.fillMissingMagicCommitterConfsIfNeeded(_conf)
SparkContext.supplementJavaModuleOptions(_conf)
_driverLogger = DriverLogger(_conf)
val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
_resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt)
logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
// log out spark.app.name in the Spark driver logs
logInfo(s"Submitted application: $appName")
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties. This explicitly sets the configuration
// instead of relying on the default value of the config constant.
_conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS))
_conf.setIfMissing(DRIVER_PORT, 0)
_conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER)
_jars = Utils.getUserJars(_conf)
_files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty))
.toSeq.flatten
_archives = _conf.getOption(ARCHIVES.key).map(Utils.stringToSeq).toSeq.flatten
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
val compress = _conf.get(EVENT_LOG_COMPRESS)
if (compress && isEventLogEnabled) {
Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName)
} else {
None
}
}
_listenerBus = new LiveListenerBus(_conf)
_resourceProfileManager = new ResourceProfileManager(_conf, _listenerBus)
// Initialize the app status store and listener before SparkEnv is created so that it gets
// all events.
val appStatusSource = AppStatusSource.createSource(conf)
_statusStore = AppStatusStore.createLiveStore(conf, appStatusSource)
listenerBus.addToStatusQueue(_statusStore.listener.get)
// Create the Spark execution environment (cache, map output tracker, etc)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
// If running the REPL, register the repl's output dir with the file server.
_conf.getOption("spark.repl.class.outputDir").foreach { path =>
val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path))
_conf.set("spark.repl.class.uri", replUri)
}
_statusTracker = new SparkStatusTracker(this, _statusStore)
_progressBar =
if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.get(UI_ENABLED)) {
Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "",
startTime))
} else {
// For tests, do not enable the UI
None
}
// Bind the UI before starting the task scheduler to communicate
// the bound port to the cluster manager properly
_ui.foreach(_.bind())
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Performance optimization: this dummy call to .size() triggers eager evaluation of
// Configuration's internal `properties` field, guaranteeing that it will be computed and
// cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create
// a new per-session Configuration. If `properties` has not been computed by that time
// then each newly-created Configuration will perform its own expensive IO and XML
// parsing to load configuration defaults and populate its own properties. By ensuring
// that we've pre-computed the parent's properties, the child Configuration will simply
// clone the parent's properties.
_hadoopConfiguration.size()
// Add each JAR given through the constructor
if (jars != null) {
jars.foreach(jar => addJar(jar, true))
if (addedJars.nonEmpty) {
_conf.set("spark.app.initial.jar.urls", addedJars.keys.toSeq.mkString(","))
}
}
if (files != null) {
files.foreach(file => addFile(file, false, true))
if (addedFiles.nonEmpty) {
_conf.set("spark.app.initial.file.urls", addedFiles.keys.toSeq.mkString(","))
}
}
if (archives != null) {
archives.foreach(file => addFile(file, false, true, isArchive = true))
if (addedArchives.nonEmpty) {
_conf.set("spark.app.initial.archive.urls", addedArchives.keys.toSeq.mkString(","))
}
}
_executorMemory = _conf.getOption(EXECUTOR_MEMORY.key)
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))
.map(Utils.memoryStringToMb)
.getOrElse(1024)
// Convert java options to env vars as a work around
// since we can't set env vars directly in sbt.
for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser
_shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver()
_shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) =>
_conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v)
}
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Initialize any plugins before the task scheduler is initialized.
_plugins = PluginContainer(this, _resources.asJava)
// Create and start the scheduler
val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
val _executorMetricsSource =
if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) {
Some(new ExecutorMetricsSource)
} else {
None
}
// create and start the heartbeater for collecting memory metrics
_heartbeater = new Heartbeater(
() => SparkContext.this.reportHeartBeat(_executorMetricsSource),
"driver-heartbeater",
conf.get(EXECUTOR_HEARTBEAT_INTERVAL))
_heartbeater.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start()
_applicationId = _taskScheduler.applicationId()
_applicationAttemptId = _taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
_applicationAttemptId.foreach { attemptId =>
_conf.set(APP_ATTEMPT_ID, attemptId)
_env.blockManager.blockStoreClient.setAppAttemptId(attemptId)
}
if (_conf.get(UI_REVERSE_PROXY)) {
val proxyUrl = _conf.get(UI_REVERSE_PROXY_URL.key, "").stripSuffix("/") +
"/proxy/" + _applicationId
System.setProperty("spark.ui.proxyBase", proxyUrl)
}
_ui.foreach(_.setAppId(_applicationId))
_env.blockManager.initialize(_applicationId)
FallbackStorage.registerBlockManagerIfNeeded(_env.blockManager.master, _conf)
// The metrics system for Driver need to be set spark.app.id to app ID.
// So it should start after we get app ID from the task scheduler and set spark.app.id.
_env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addToEventLogQueue(logger)
Some(logger)
} else {
None
}
_cleaner =
if (_conf.get(CLEANER_REFERENCE_TRACKING)) {
Some(new ContextCleaner(this, _shuffleDriverComponents))
} else {
None
}
_cleaner.foreach(_.start())
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf,
cleaner = cleaner, resourceProfileManager = resourceProfileManager))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
setupAndStartListenerBus()
postEnvironmentUpdate()
postApplicationStart()
// After application started, attach handlers to started server and start handler.
_ui.foreach(_.attachAllHandler())
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
_env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
// Make sure the context is stopped if the user forgets about it. This avoids leaving
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
logDebug("Adding shutdown hook") // force eager creation of logger
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
try {
stop()
} catch {
case e: Throwable =>
logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e)
}
}
// Post init
_taskScheduler.postStartHook()
if (isLocal) {
_env.metricsSystem.registerSource(Executor.executorSourceLocalModeOnly)
}
_env.metricsSystem.registerSource(_dagScheduler.metricsSource)
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_env.metricsSystem.registerSource(new JVMCPUSource())
_executorMetricsSource.foreach(_.register(_env.metricsSystem))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
appStatusSource.foreach(_env.metricsSystem.registerSource(_))
_plugins.foreach(_.registerMetrics(applicationId))
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* to an executor being dead or unresponsive or due to network issues while sending the thread
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
env.blockManager.master.getExecutorEndpointRef(executorId) match {
case Some(endpointRef) =>
Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump))
case None =>
logWarning(s"Executor $executorId might already have stopped and " +
"can not request thread dump from it.")
None
}
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties): Unit = {
localProperties.set(props)
}
/**
* Set a local property that affects jobs submitted from this thread, such as the Spark fair
* scheduler pool. User-defined properties may also be set here. These properties are propagated
* through to worker tasks and can be accessed there via
* [[org.apache.spark.TaskContext#getLocalProperty]].
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String): Unit = {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String =
Option(localProperties.get).map(_.getProperty(key)).orNull
/** Set a human readable description of the current job. */
def setJobDescription(value: String): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all
* running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()`
* being called on the job's executor threads. This is useful to help ensure that the tasks
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
* may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String,
description: String, interruptOnCancel: Boolean = false): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/** Clear the current thread's job group ID and its description. */
def clearJobGroup(): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* @note Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/** Distribute a local Scala collection to form an RDD.
*
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
*
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
*
* @param start the start value.
* @param end the end value.
* @param step the incremental step
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed range
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
}
}
/** Distribute a local Scala collection to form an RDD.
*
* This method is identical to `parallelize`.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* location preferences (hostnames of Spark nodes) for each object.
* Create a new partition for each collection item.
* @param seq list of tuples of data and location preferences (hostnames of Spark nodes)
* @return RDD representing data partitioned according to location preferences
*/
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*
* @param path path to the text file on a supported file system
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of lines of the text file
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString).setName(path)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and the corresponding file content
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[Text],
classOf[Text],
updateConf,
minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path)
}
/**
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and corresponding file content
*/
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @note We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param recordLength The length at which to split the records
* @param conf Configuration for setting up the dataset.
*
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions Minimum number of Hadoop Splits to generate.
* @return RDD of tuples of key and corresponding value
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf before broadcasting it.
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it.
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
new HadoopRDD(
this,
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths as
* a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys,
* values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user
* don't need to pass them directly. Instead, callers can just write, for example:
* ```
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* ```
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
* @param conf Hadoop configuration
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// The call to NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
val job = NewHadoopJob.getInstance(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* be pretty slow if you use the default serializer (Java serialization),
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
*
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD representing deserialized data from the file(s)
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/** Build the union of a list of RDDs. */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty)
val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet
if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, nonEmptyRdds)
} else {
new UnionRDD(this, nonEmptyRdds)
}
}
/** Build the union of a list of RDDs passed as variable-length arguments. */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
/**
* Register the given accumulator.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _]): Unit = {
acc.register(this)
}
/**
* Register the given accumulator with given name.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _], name: String): Unit = {
acc.register(this, name = Option(name))
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator: LongAccumulator = {
val acc = new LongAccumulator
register(acc)
acc
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator(name: String): LongAccumulator = {
val acc = new LongAccumulator
register(acc, name)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator: DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator(name: String): DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc, name)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T]: CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T](name: String): CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc, name)
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*
* @param value value to broadcast to the Spark nodes
* @return `Broadcast` object, a read-only variable cached on each machine
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String): Unit = {
addFile(path, false, false)
}
/**
* Returns a list of file paths that are added to resources.
*/
def listFiles(): Seq[String] = addedFiles.keySet.toSeq
/**
* :: Experimental ::
* Add an archive to be downloaded and unpacked with this Spark job on every node.
*
* If an archive is added during execution, it will not be available until the next TaskSet
* starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(paths-to-files)` to find its download/unpacked location.
* The given path should be one of .zip, .tar, .tar.gz, .tgz and .jar.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*
* @since 3.1.0
*/
@Experimental
def addArchive(path: String): Unit = {
addFile(path, false, false, isArchive = true)
}
/**
* :: Experimental ::
* Returns a list of archive paths that are added to resources.
*
* @since 3.1.0
*/
@Experimental
def listArchives(): Seq[String] = addedArchives.keySet.toSeq
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
* @param recursive if true, a directory can be given in `path`. Currently directories are
* only supported for Hadoop-supported filesystems.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String, recursive: Boolean): Unit = {
addFile(path, recursive, false)
}
private def addFile(
path: String, recursive: Boolean, addedOnSubmit: Boolean, isArchive: Boolean = false
): Unit = {
val uri = Utils.resolveURI(path)
val schemeCorrectedURI = uri.getScheme match {
case null => new File(path).getCanonicalFile.toURI
case "local" =>
logWarning(s"File with 'local' scheme $path is not supported to add to file server, " +
s"since it is already available on every node.")
return
case _ => uri
}
val hadoopPath = new Path(schemeCorrectedURI)
val scheme = schemeCorrectedURI.getScheme
if (!Array("http", "https", "ftp").contains(scheme) && !isArchive) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
val isDir = fs.getFileStatus(hadoopPath).isDirectory
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
} else {
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
}
val key = if (!isLocal && scheme == "file") {
env.rpcEnv.fileServer.addFile(new File(uri.getPath))
} else if (uri.getScheme == null) {
schemeCorrectedURI.toString
} else {
uri.toString
}
val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis
if (!isArchive && addedFiles.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added file $path at $key with timestamp $timestamp")
// Fetch the file locally so that closures which are run on the driver can still use the
// SparkFiles API to access files.
Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf,
hadoopConfiguration, timestamp, useCache = false)
postEnvironmentUpdate()
} else if (
isArchive &&
addedArchives.putIfAbsent(
UriBuilder.fromUri(new URI(key)).fragment(uri.getFragment).build().toString,
timestamp).isEmpty) {
logInfo(s"Added archive $path at $key with timestamp $timestamp")
// If the scheme is file, use URI to simply copy instead of downloading.
val uriToUse = if (!isLocal && scheme == "file") uri else new URI(key)
val uriToDownload = UriBuilder.fromUri(uriToUse).fragment(null).build()
val source = Utils.fetchFile(uriToDownload.toString, Utils.createTempDir(), conf,
hadoopConfiguration, timestamp, useCache = false, shouldUntar = false)
val dest = new File(
SparkFiles.getRootDirectory(),
if (uri.getFragment != null) uri.getFragment else source.getName)
logInfo(
s"Unpacking an archive $path from ${source.getAbsolutePath} to ${dest.getAbsolutePath}")
Utils.deleteRecursively(dest)
Utils.unpack(source, dest)
postEnvironmentUpdate()
} else {
logWarning(s"The path $path has been added already. Overwriting of added paths " +
"is not supported in the current version.")
}
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperApi
def addSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.addToSharedQueue(listener)
}
/**
* :: DeveloperApi ::
* Deregister the listener from Spark's listener bus.
*/
@DeveloperApi
def removeSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.removeListener(listener)
}
private[spark] def getExecutorIds(): Seq[String] = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.getExecutorIds()
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
Nil
}
}
/**
* Get the max number of tasks that can be concurrent launched based on the ResourceProfile
* could be used, even if some of them are being used at the moment.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @param rp ResourceProfile which to use to calculate max concurrent tasks.
* @return The max number of tasks that can be concurrent launched currently.
*/
private[spark] def maxNumConcurrentTasks(rp: ResourceProfile): Int = {
schedulerBackend.maxNumConcurrentTasks(rp)
}
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions. This applies to the default ResourceProfile.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
@DeveloperApi
def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
// this is being applied to the default resource profile, would need to add api to support
// others
val defaultProfId = resourceProfileManager.defaultResourceProfile.id
b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors),
immutable.Map(localityAwareTasks -> defaultProfId),
immutable.Map(defaultProfId -> hostToLocalTaskCount))
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* @return whether the request is received.
*/
@DeveloperApi
def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
*
* This is not supported when dynamic allocation is turned on.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
require(executorAllocationManager.isEmpty,
"killExecutors() unsupported with Dynamic Allocation turned on")
b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId))
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
*
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* fulfill all missing application resource requests.
*
* @note The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received.
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/** The version of Spark on which this application is running. */
def version: String = SPARK_VERSION
/**
* Return a map from the block manager to the max memory available for caching and the remaining
* memory available for caching.
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
getRDDStorageInfo(_ => true)
}
private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray
rddInfos.foreach { rddInfo =>
val rddId = rddInfo.id
val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId))
rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0)
rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L)
rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L)
}
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Gets the locality information associated with the partition in a particular rdd
* @param rdd of interest
* @param partition to be looked up for locality
* @return list of preferred locations for the partition
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
*/
private[spark] def persistRDD(rdd: RDD[_]): Unit = {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)
listenerBus.post(SparkListenerUnpersistRDD(rddId))
}
/**
* Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future.
*
* If a jar is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems),
* an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addJar(path: String): Unit = {
addJar(path, false)
}
private def addJar(path: String, addedOnSubmit: Boolean): Unit = {
def addLocalJarFile(file: File): Seq[String] = {
try {
if (!file.exists()) {
throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found")
}
if (file.isDirectory) {
throw new IllegalArgumentException(
s"Directory ${file.getAbsoluteFile} is not allowed for addJar")
}
Seq(env.rpcEnv.fileServer.addJar(file))
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
Nil
}
}
def checkRemoteJarFile(path: String): Seq[String] = {
val hadoopPath = new Path(path)
val scheme = hadoopPath.toUri.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
try {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Jar ${path} not found")
}
if (fs.getFileStatus(hadoopPath).isDirectory) {
throw new IllegalArgumentException(
s"Directory ${path} is not allowed for addJar")
}
Seq(path)
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
Nil
}
} else {
Seq(path)
}
}
if (path == null || path.isEmpty) {
logWarning("null or empty path specified as parameter to addJar")
} else {
val (keys, scheme) = if (path.contains("\\\\") && Utils.isWindows) {
// For local paths with backslashes on Windows, URI throws an exception
(addLocalJarFile(new File(path)), "local")
} else {
val uri = Utils.resolveURI(path)
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
val uriScheme = uri.getScheme
val jarPaths = uriScheme match {
// A JAR file which exists only on the driver node
case null =>
// SPARK-22585 path without schema is not url encoded
addLocalJarFile(new File(uri.getPath))
// A JAR file which exists only on the driver node
case "file" => addLocalJarFile(new File(uri.getPath))
// A JAR file which exists locally on every worker node
case "local" => Seq("file:" + uri.getPath)
case "ivy" =>
// Since `new Path(path).toUri` will lose query information,
// so here we use `URI.create(path)`
DependencyUtils.resolveMavenDependencies(URI.create(path))
.flatMap(jar => addLocalJarFile(new File(jar)))
case _ => checkRemoteJarFile(path)
}
(jarPaths, uriScheme)
}
if (keys.nonEmpty) {
val timestamp = if (addedOnSubmit) startTime else System.currentTimeMillis
val (added, existed) = keys.partition(addedJars.putIfAbsent(_, timestamp).isEmpty)
if (added.nonEmpty) {
val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI"
logInfo(s"Added $jarMessage $path at ${added.mkString(",")} with timestamp $timestamp")
postEnvironmentUpdate()
}
if (existed.nonEmpty) {
val jarMessage = if (scheme != "ivy") "JAR" else "dependency jars of Ivy URI"
logInfo(s"The $jarMessage $path at ${existed.mkString(",")} has been added already." +
" Overwriting of added jar is not supported in the current version.")
}
}
}
}
/**
* Returns a list of jar files that are added to resources.
*/
def listJars(): Seq[String] = addedJars.keySet.toSeq
/**
* When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark
* may wait for some internal threads to finish. It's better to use this method to stop
* SparkContext instead.
*/
private[spark] def stopInNewThread(): Unit = {
new Thread("stop-spark-context") {
setDaemon(true)
override def run(): Unit = {
try {
SparkContext.this.stop()
} catch {
case e: Throwable =>
logError(e.getMessage, e)
throw e
}
}
}.start()
}
/**
* Shut down the SparkContext.
*/
def stop(): Unit = {
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(s"Cannot stop SparkContext within listener bus thread.")
}
// Use the stopping variable to ensure no contention for the stop scenario.
// Still track the stopped variable for use elsewhere in the code.
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
if (listenerBus != null) {
Utils.tryLogNonFatalError {
postApplicationEnd()
}
}
Utils.tryLogNonFatalError {
_driverLogger.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
Utils.tryLogNonFatalError {
_plugins.foreach(_.shutdown())
}
FallbackStorage.cleanUp(_conf, _hadoopConfiguration)
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (_heartbeater != null) {
Utils.tryLogNonFatalError {
_heartbeater.stop()
}
_heartbeater = null
}
if (_shuffleDriverComponents != null) {
Utils.tryLogNonFatalError {
_shuffleDriverComponents.cleanupApplication()
}
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
if (_statusStore != null) {
_statusStore.close()
}
// Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this
// `SparkContext` is stopped.
localProperties.remove()
ResourceProfile.clearDefaultProfile()
// Unset YARN mode system env variable, to allow switching between cluster types.
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def setCallSite(shortCallSite: String): Unit = {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
private[spark] def setCallSite(callSite: CallSite): Unit = {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def clearCallSite(): Unit = {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
*/
private[spark] def getCallSite(): CallSite = {
lazy val callSite = Utils.getCallSite()
CallSite(
Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm),
Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm)
)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* The function that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit): Unit = {
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit): Unit = {
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param timeout maximum time to wait for the job, in milliseconds
* @return partial result (how partial depends on whether the job was finished before or
* after timeout)
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* Submit a job for execution and return a FutureJob holding the result.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
* @param resultFunc function to be executed when the result is ready
*/
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Submit a map stage for execution. This is currently an internal API only, but might be
* promoted to DeveloperApi in the future.
*/
private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C])
: SimpleFutureAction[MapOutputStatistics] = {
assertNotStopped()
val callSite = getCallSite()
var result: MapOutputStatistics = null
val waiter = dagScheduler.submitMapStage(
dependency,
(r: MapOutputStatistics) => { result = r },
callSite,
localProperties.get)
new SimpleFutureAction[MapOutputStatistics](waiter, result)
}
/**
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
* for more information.
*/
def cancelJobGroup(groupId: String): Unit = {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @param reason optional reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int, reason: String): Unit = {
dagScheduler.cancelJob(jobId, Option(reason))
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int): Unit = {
dagScheduler.cancelJob(jobId, None)
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @param reason reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int, reason: String): Unit = {
dagScheduler.cancelStage(stageId, Option(reason))
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int): Unit = {
dagScheduler.cancelStage(stageId, None)
}
/**
* Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI
* or through SparkListener.onTaskStart.
*
* @param taskId the task ID to kill. This id uniquely identifies the task attempt.
* @param interruptThread whether to interrupt the thread running the task.
* @param reason the reason for killing the task, which should be a short string. If a task
* is killed multiple times with different reasons, only one reason will be reported.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(
taskId: Long,
interruptThread: Boolean = true,
reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = {
dagScheduler.killTaskAttempt(taskId, interruptThread, reason)
}
/**
* Clean a closure to make it ready to be serialized and sent to tasks
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* @return the cleaned closure
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed.
* @param directory path to the directory where checkpoint files will be stored
* (must be HDFS path if running in cluster)
*/
def setCheckpointDir(directory: String): Unit = {
// If we are running on a cluster, log a warning if the directory is local.
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Spark is not running in local mode, therefore the checkpoint directory " +
s"must not be on the local filesystem. Directory '$directory' " +
"appears to be on the local filesystem.")
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
private val nextRddId = new AtomicInteger(0)
/** Register a new RDD, returning its RDD ID */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* This should be called after all internal listeners have been registered with the listener bus
* (e.g. after the web UI and event logging listeners have been registered).
*/
private def setupAndStartListenerBus(): Unit = {
try {
conf.get(EXTRA_LISTENERS).foreach { classNames =>
val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf)
listeners.foreach { listener =>
listenerBus.addToSharedQueue(listener)
logInfo(s"Registered listener ${listener.getClass().getName()}")
}
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start(this, _env.metricsSystem)
_listenerBusStarted = true
}
/** Post the application start event */
private def postApplicationStart(): Unit = {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls,
schedulerBackend.getDriverAttributes))
_driverLogger.foreach(_.startSync(_hadoopConfiguration))
}
/** Post the application end event */
private def postApplicationEnd(): Unit = {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/** Post the environment update event once the task scheduler is ready */
private def postEnvironmentUpdate(): Unit = {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val addedArchivePaths = addedArchives.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration,
schedulingMode, addedJarPaths, addedFilePaths, addedArchivePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
/** Reports heartbeat metrics for the driver. */
private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = {
val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager)
executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics))
val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics]
// In the driver, we do not track per-stage metrics, so use a dummy stage for the key
driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics))
val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0)
listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates,
driverUpdates))
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
SparkContext.setActiveContext(this)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
*/
object SparkContext extends Logging {
private val VALID_LOG_LEVELS =
Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
/**
* Lock that guards access to global variables that track SparkContext construction.
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if another thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
*
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get()).filter(_ ne sc).foreach { ctx =>
val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." +
s"The currently running SparkContext was created at:\\n${ctx.creationSite.longForm}"
throw new SparkException(errMsg)
}
contextBeingConstructed.filter(_ ne sc).foreach { otherContext =>
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext should be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\\n$otherContextCreationSite"
logWarning(warnMsg)
}
}
}
/**
* Called to ensure that SparkContext is created or accessed only on the Driver.
*
* Throws an exception if a SparkContext is about to be created in executors.
*/
private def assertOnDriver(): Unit = {
if (Utils.isInRunningSparkTask) {
// we're accessing it during task execution, fail.
throw new IllegalStateException(
"SparkContext should only be created and accessed on the driver.")
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* @param config `SparkConfig` that will be used for initialisation of the `SparkContext`
* @return current `SparkContext` (or a new one if it wasn't created before the function call)
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config))
} else {
if (config.getAll.nonEmpty) {
logWarning("Using an existing SparkContext; some configuration may not take effect.")
}
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* This method allows not passing a SparkConf (useful if just retrieving).
*
* @return current `SparkContext` (or a new one if wasn't created before the function call)
*/
def getOrCreate(): SparkContext = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext())
}
activeContext.get()
}
}
/** Return the current active [[SparkContext]] if any. */
private[spark] def getActive: Option[SparkContext] = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = Some(sc)
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
*/
private[spark] def setActiveContext(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = None
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T])
: ArrayWritable = {
def anyToWritable[U <: Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*
* @param cls class that should be inside of the jar
* @return jar that contains the Class, `None` if not found
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*
* @param obj reference to an instance which class should be inside of the jar
* @return jar that contains the class of the instance, `None` if not found
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String): Int = {
numDriverCores(master, null)
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String, conf: SparkConf): Int = {
def convertToInt(threads: String): Int = {
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case "yarn" | SparkMasterRegex.KUBERNETES_REGEX(_) =>
if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") {
conf.getInt(DRIVER_CORES.key, 0)
} else {
0
}
case _ => 0 // Either driver is not being used, or its core count will be interpolated later
}
}
/**
* Create a task scheduler based on a given master URL.
* Return a 2-tuple of the scheduler backend and the task scheduler.
*/
private def createTaskScheduler(
sc: SparkContext,
master: String,
deployMode: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
val MAX_LOCAL_TASK_FAILURES = 1
// Ensure that default executor's resources satisfies one or more tasks requirement.
// This function is for cluster managers that don't set the executor cores config, for
// others its checked in ResourceProfile.
def checkResourcesPerTask(executorCores: Int): Unit = {
val taskCores = sc.conf.get(CPUS_PER_TASK)
if (!sc.conf.get(SKIP_VALIDATE_CORES_TESTING)) {
validateTaskCpusLargeEnough(sc.conf, executorCores, taskCores)
}
val defaultProf = sc.resourceProfileManager.defaultResourceProfile
ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores))
}
master match {
case "local" =>
checkResourcesPerTask(1)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_CLUSTER_REGEX(numWorkers, coresPerWorker, memoryPerWorker) =>
checkResourcesPerTask(coresPerWorker.toInt)
// Check to make sure memory requested <= memoryPerWorker. Otherwise Spark will just hang.
val memoryPerWorkerInt = memoryPerWorker.toInt
if (sc.executorMemory > memoryPerWorkerInt) {
throw new SparkException(
"Asked to launch cluster with %d MiB/worker but requested %d MiB/executor".format(
memoryPerWorkerInt, sc.executorMemory))
}
// For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED
// to false because this mode is intended to be used for testing and in this case all the
// executors are running on the same host. So if host local reading was enabled here then
// testing of the remote fetching would be secondary as setting this config explicitly to
// false would be required in most of the unit test (despite the fact that remote fetching
// is much more frequent in production).
sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false)
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = LocalSparkCluster(
numWorkers.toInt, coresPerWorker.toInt, memoryPerWorkerInt, sc.conf)
val masterUrls = localCluster.start()
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case masterUrl =>
val cm = getClusterManager(masterUrl) match {
case Some(clusterMgr) => clusterMgr
case None => throw new SparkException("Could not parse Master URL: '" + master + "'")
}
try {
val scheduler = cm.createTaskScheduler(sc, masterUrl)
val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler)
cm.initialize(scheduler, backend)
(backend, scheduler)
} catch {
case se: SparkException => throw se
case NonFatal(e) =>
throw new SparkException("External scheduler cannot be instantiated", e)
}
}
}
private def getClusterManager(url: String): Option[ExternalClusterManager] = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url))
if (serviceLoaders.size > 1) {
throw new SparkException(
s"Multiple external cluster managers registered for the url $url: $serviceLoaders")
}
serviceLoaders.headOption
}
/**
* This is a helper function to complete the missing S3A magic committer configurations
* based on a single conf: `spark.hadoop.fs.s3a.bucket.<bucket>.committer.magic.enabled`
*/
private def fillMissingMagicCommitterConfsIfNeeded(conf: SparkConf): Unit = {
val magicCommitterConfs = conf
.getAllWithPrefix("spark.hadoop.fs.s3a.bucket.")
.filter(_._1.endsWith(".committer.magic.enabled"))
.filter(_._2.equalsIgnoreCase("true"))
if (magicCommitterConfs.nonEmpty) {
// Try to enable S3 magic committer if missing
conf.setIfMissing("spark.hadoop.fs.s3a.committer.magic.enabled", "true")
if (conf.get("spark.hadoop.fs.s3a.committer.magic.enabled").equals("true")) {
conf.setIfMissing("spark.hadoop.fs.s3a.committer.name", "magic")
conf.setIfMissing("spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a",
"org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory")
conf.setIfMissing("spark.sql.parquet.output.committer.class",
"org.apache.spark.internal.io.cloud.BindingParquetOutputCommitter")
conf.setIfMissing("spark.sql.sources.commitProtocolClass",
"org.apache.spark.internal.io.cloud.PathOutputCommitProtocol")
}
}
}
/**
* SPARK-36796: This is a helper function to supplement `--add-opens` options to
* `spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions`.
*/
private def supplementJavaModuleOptions(conf: SparkConf): Unit = {
def supplement(key: OptionalConfigEntry[String]): Unit = {
val v = conf.get(key) match {
case Some(opts) => s"${JavaModuleOptions.defaultModuleOptions()} $opts"
case None => JavaModuleOptions.defaultModuleOptions()
}
conf.set(key.key, v)
}
supplement(DRIVER_JAVA_OPTIONS)
supplement(EXECUTOR_JAVA_OPTIONS)
}
}
/**
* A collection of regexes for extracting information from the master string.
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
val LOCAL_N_REGEX = """local\\[([0-9]+|\\*)\\]""".r
// Regular expression for local[N, maxRetries], used in tests with failing tasks
val LOCAL_N_FAILURES_REGEX = """local\\[([0-9]+|\\*)\\s*,\\s*([0-9]+)\\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
val LOCAL_CLUSTER_REGEX = """local-cluster\\[\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*]""".r
// Regular expression for connecting to Spark deploy clusters
val SPARK_REGEX = """spark://(.*)""".r
// Regular expression for connecting to kubernetes clusters
val KUBERNETES_REGEX = """k8s://(.*)""".r
}
/**
* A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The getter for the writable class takes a `ClassTag[T]` in case this is a generic object
* that doesn't know the type of `T` when it is created. This sounds strange but is necessary to
* support converting subclasses of `Writable` to themselves (`writableWritableConverter()`).
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
// The following implicit declarations have been added on top of the very similar ones
// below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta
// expansion of zero-arg methods and thus won't match a no-arg method where it expects
// an implicit that is a function of no args.
implicit val intWritableConverterFn: () => WritableConverter[Int] =
() => simpleWritableConverter[Int, IntWritable](_.get)
implicit val longWritableConverterFn: () => WritableConverter[Long] =
() => simpleWritableConverter[Long, LongWritable](_.get)
implicit val doubleWritableConverterFn: () => WritableConverter[Double] =
() => simpleWritableConverter[Double, DoubleWritable](_.get)
implicit val floatWritableConverterFn: () => WritableConverter[Float] =
() => simpleWritableConverter[Float, FloatWritable](_.get)
implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] =
() => simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = {
() => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit val stringWritableConverterFn: () => WritableConverter[String] =
() => simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] =
() => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
// These implicits remain included for backwards-compatibility. They fulfill the
// same role as those above.
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The `Writable` class will be used in `SequenceFileRDDFunctions`.
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
| shaneknapp/spark | core/src/main/scala/org/apache/spark/SparkContext.scala | Scala | apache-2.0 | 131,087 |
package scalaz.stream
import scalaz._
import scalaz.syntax.equal._
import scalaz.syntax.foldable._
import scalaz.std.anyVal._
import scalaz.std.list._
import scalaz.std.list.listSyntax._
import scalaz.std.string._
import org.scalacheck._
import Prop.{extendedAny => _, _}
import Arbitrary.arbitrary
import scalaz.concurrent.Strategy
import scala.concurrent
import scalaz.\\/-
import scalaz.\\/._
object ProcessSpec extends Properties("Process1") {
import Process._
import process1._
implicit val S = Strategy.DefaultStrategy
// Subtyping of various Process types:
// * Process1 is a Tee that only read from the left (Process1[I,O] <: Tee[I,Any,O])
// * Tee is a Wye that never requests Both (Tee[I,I2,O] <: Wye[I,I2,O])
// This 'test' is just ensuring that this typechecks
object Subtyping {
def asTee[I,O](p1: Process1[I,O]): Tee[I,Any,O] = p1
def asWye[I,I2,O](t: Tee[I,I2,O]): Wye[I,I2,O] = t
}
implicit def EqualProcess[A:Equal]: Equal[Process0[A]] = new Equal[Process0[A]] {
def equal(a: Process0[A], b: Process0[A]): Boolean =
a.toList == b.toList
}
implicit def ArbProcess0[A:Arbitrary]: Arbitrary[Process0[A]] =
Arbitrary(Arbitrary.arbitrary[List[A]].map(a => Process(a: _*)))
property("basic") = forAll { (p: Process0[Int], p2: Process0[String], n: Int) =>
val f = (x: Int) => List.range(1, x.min(100))
val g = (x: Int) => x % 7 == 0
val pf : PartialFunction[Int,Int] = { case x : Int if x % 2 == 0 => x}
val sm = Monoid[String]
("id" |: {
((p |> id) === p) && ((id |> p) === p)
}) &&
("map" |: {
(p.toList.map(_ + 1) === p.map(_ + 1).toList) &&
(p.map(_ + 1) === p.pipe(lift(_ + 1)))
}) &&
("flatMap" |: {
(p.toList.flatMap(f) === p.flatMap(f andThen Process.emitAll).toList)
}) &&
("filter" |: {
(p.toList.filter(g) === p.filter(g).toList)
}) &&
("take" |: {
(p.toList.take(n) === p.take(n).toList)
}) &&
("takeWhile" |: {
(p.toList.takeWhile(g) === p.takeWhile(g).toList)
}) &&
("drop" |: {
(p.toList.drop(n) === p.drop(n).toList)
}) &&
("dropLast" |: {
p.dropLast.toList === p.toList.dropRight(1)
}) &&
("dropLastIf" |: {
val pred = (_: Int) % 2 == 0
val pl = p.toList
val n = if (pl.lastOption.map(pred).getOrElse(false)) 1 else 0
p.dropLastIf(pred).toList === pl.dropRight(n) &&
p.dropLastIf(_ => false).toList === p.toList
}) &&
("dropWhile" |: {
(p.toList.dropWhile(g) === p.dropWhile(g).toList)
}) &&
("exists" |: {
(List(p.toList.exists(g)) === p.exists(g).toList)
}) &&
("forall" |: {
(List(p.toList.forall(g)) === p.forall(g).toList)
}) &&
("lastOr" |: {
p.pipe(lastOr(42)).toList === p.toList.lastOption.orElse(Some(42)).toList
}) &&
("maximum" |: {
p.maximum.toList === p.toList.maximum.toList
}) &&
("maximumBy" |: {
// enable when switching to scalaz 7.1
//p2.maximumBy(_.length).toList === p2.toList.maximumBy(_.length).toList
true
}) &&
("maximumOf" |: {
p2.maximumOf(_.length).toList === p2.toList.map(_.length).maximum.toList
}) &&
("minimum" |: {
p.minimum.toList === p.toList.minimum.toList
}) &&
("minimumBy" |: {
// enable when switching to scalaz 7.1
//p2.minimumBy(_.length).toList === p2.toList.minimumBy(_.length).toList
true
}) &&
("minimumOf" |: {
p2.minimumOf(_.length).toList === p2.toList.map(_.length).minimum.toList
}) &&
("zip" |: {
(p.toList.zip(p2.toList) === p.zip(p2).toList)
}) &&
("yip" |: {
val l = p.toList.zip(p2.toList)
val r = p.toSource.yip(p2.toSource).runLog.timed(3000).run.toList
(l === r)
}) &&
("scan" |: {
p.toList.scan(0)(_ - _) ===
p.toSource.scan(0)(_ - _).runLog.timed(3000).run.toList
}) &&
("scan1" |: {
p.toList.scan(0)(_ + _).tail ===
p.toSource.scan1(_ + _).runLog.timed(3000).run.toList
}) &&
("shiftRight" |: {
p.pipe(shiftRight(1, 2)).toList === List(1, 2) ++ p.toList
}) &&
("splitWith" |: {
p.splitWith(_ < n).toList.map(_.toList) === p.toList.splitWith(_ < n)
}) &&
("sum" |: {
p.toList.sum[Int] ===
p.toSource.pipe(process1.sum).runLastOr(0).timed(3000).run
}) &&
("intersperse" |: {
p.intersperse(0).toList == p.toList.intersperse(0)
}) &&
("collect" |: {
p.collect(pf).toList == p.toList.collect(pf)
}) &&
("collectFirst" |: {
p.collectFirst(pf).toList == p.toList.collectFirst(pf).toList
}) &&
("fold" |: {
p.fold(0)(_ + _).toList == List(p.toList.fold(0)(_ + _))
}) &&
("foldMap" |: {
p.foldMap(_.toString).toList.lastOption.toList == List(p.toList.map(_.toString).fold(sm.zero)(sm.append(_,_)))
}) &&
("reduce" |: {
(p.reduce(_ + _).toList == (if (p.toList.nonEmpty) List(p.toList.reduce(_ + _)) else List()))
}) &&
("find" |: {
(p.find(_ % 2 == 0).toList == p.toList.find(_ % 2 == 0).toList)
})
}
property("awaitOption") = secure {
Process().pipe(awaitOption).toList == List(None) &&
Process(1, 2).pipe(awaitOption).toList == List(Some(1))
}
property("chunk") = secure {
Process(0, 1, 2, 3, 4).chunk(2).toList == List(Vector(0, 1), Vector(2, 3), Vector(4))
}
property("chunkBy") = secure {
emitSeq("foo bar baz").chunkBy(_ != ' ').toList.map(_.mkString) ==
List("foo ", "bar ", "baz")
}
property("fill") = forAll(Gen.choose(0,30).map2(Gen.choose(0,50))((_,_))) {
case (n,chunkSize) => Process.fill(n)(42, chunkSize).runLog.run.toList == List.fill(n)(42)
}
property("iterate") = secure {
Process.iterate(0)(_ + 1).take(100).runLog.run.toList == List.iterate(0, 100)(_ + 1)
}
property("repartition") = secure {
Process("Lore", "m ip", "sum dolo", "r sit amet").repartition(_.split(" ")).toList ==
List("Lorem", "ipsum", "dolor", "sit", "amet") &&
Process("hel", "l", "o Wor", "ld").repartition(_.grouped(2).toVector).toList ==
List("he", "ll", "o ", "Wo", "rl", "d") &&
Process(1, 2, 3, 4, 5).repartition(i => Vector(i, i)).toList ==
List(1, 3, 6, 10, 15, 15) &&
Process[String]().repartition(_ => Vector()).toList == List() &&
Process("hello").repartition(_ => Vector()).toList == List()
}
property("repartition2") = secure {
Process("he", "ll", "o").repartition2(s => (Some(s), None)).toList ===
List("he", "ll", "o") &&
Process("he", "ll", "o").repartition2(s => (None, Some(s))).toList ===
List("hello") &&
Process("he", "ll", "o").repartition2 {
s => (Some(s.take(1)), Some(s.drop(1)))
}.toList === List("h", "e", "l", "lo")
}
property("stripNone") = secure {
Process(None, Some(1), None, Some(2), None).pipe(stripNone).toList === List(1, 2)
}
property("terminated") = secure {
Process(1, 2, 3).terminated.toList == List(Some(1), Some(2), Some(3), None)
}
property("unfold") = secure {
Process.unfold((0, 1)) {
case (f1, f2) => if (f1 <= 13) Some(((f1, f2), (f2, f1 + f2))) else None
}.map(_._1).runLog.run.toList == List(0, 1, 1, 2, 3, 5, 8, 13)
}
property("window") = secure {
def window(n: Int) = Process.range(0, 5).window(n).runLog.run.toList
window(1) == List(Vector(0), Vector(1), Vector(2), Vector(3), Vector(4), Vector()) &&
window(2) == List(Vector(0, 1), Vector(1, 2), Vector(2, 3), Vector(3, 4), Vector(4)) &&
window(3) == List(Vector(0, 1, 2), Vector(1, 2, 3), Vector(2, 3, 4), Vector(3, 4))
}
import scalaz.concurrent.Task
property("enqueue") = secure {
val tasks = Process.range(0,1000).map(i => Task { Thread.sleep(1); 1 })
tasks.sequence(50).pipe(processes.sum[Int].last).runLog.run.head == 1000 &&
tasks.gather(50).pipe(processes.sum[Int].last).runLog.run.head == 1000
}
// ensure that wye terminates
property("wye one side infinite") = secure {
import ReceiveY._
def whileBoth[A,B]: Wye[A,B,Nothing] = {
def go: Wye[A,B,Nothing] = receiveBoth[A,B,Nothing] {
case HaltL(_) | HaltR(_) => halt
case _ => go
}
go
}
val inf = Process.constant(0)
val one = eval(Task.now(1))
val empty = Process[Int]()
inf.wye(empty)(whileBoth).run.timed(800).attempt.run == \\/-(()) &&
empty.wye(inf)(whileBoth).run.timed(800).attempt.run == \\/-(()) &&
inf.wye(one)(whileBoth).run.timed(800).attempt.run == \\/-(()) &&
one.wye(inf)(whileBoth).run.timed(800).attempt.run == \\/-(())
}
property("wye runs cleanup for both sides") = secure {
import ReceiveY._
import java.util.concurrent.atomic.AtomicBoolean
def eitherWhileBoth[A,B]: Wye[A,B,A \\/ B] = {
def go: Wye[A,B,A \\/ B] = receiveBoth[A,B,A \\/ B] {
case HaltL(_) | HaltR(_) => halt
case ReceiveL(i) => emit(-\\/(i)) fby go
case ReceiveR(i) => emit(\\/-(i)) fby go
}
go
}
val completed = new AtomicBoolean(false)
val (_, qProc) = async.queue[Unit]
val left = qProc.onComplete(eval(Task.delay { completed.set(true) }))
val right = Process[Int](1)
left.wye(right)(eitherWhileBoth).run.run
completed.get
}
property("wye runs cleanup from last evaluated await") = secure {
import ReceiveY._
import java.util.concurrent.atomic.AtomicInteger
def whileBoth[A,B]: Wye[A,B,Nothing] = {
def go: Wye[A,B,Nothing] = receiveBoth[A,B,Nothing] {
case HaltL(_) | HaltR(_) => halt
case _ => go
}
go
}
val openComplete = new concurrent.SyncVar[Unit]
val nOpened = new AtomicInteger
val open: Task[Unit] = Task.delay { nOpened.incrementAndGet(); openComplete.put(()) }
val close: Task[Unit] = Task.delay { nOpened.decrementAndGet() }
val (q, qProc) = async.queue[Unit]
val (_, block) = async.queue[Unit]
val resourceProc = await(open)(_ => block, halt, halt).onComplete(eval_(close))
val complexProc = Process.suspend(resourceProc)
Task { openComplete.get; q.close }.runAsync(_ => ())
// Left side opens the resource and blocks, right side terminates. Resource must be closed.
complexProc.wye(qProc)(whileBoth).run.run
nOpened.get == 0
}
// ensure that zipping terminates when the smaller stream runs out
property("zip one side infinite") = secure {
val ones = Process.eval(Task.now(1)).repeat
val p = Process(1,2,3)
ones.zip(p).runLog.run == IndexedSeq(1 -> 1, 1 -> 2, 1 -> 3) &&
p.zip(ones).runLog.run == IndexedSeq(1 -> 1, 2 -> 1, 3 -> 1)
}
property("merge") = secure {
import scala.concurrent.duration._
val sleepsL = Process.awakeEvery(1 seconds).take(3)
val sleepsR = Process.awakeEvery(100 milliseconds).take(30)
val sleeps = sleepsL merge sleepsR
val p = sleeps.toTask
val tasks = List.fill(10)(p.timed(500).attemptRun)
tasks.forall(_.isRight)
}
property("forwardFill") = secure {
import scala.concurrent.duration._
val t2 = Process.awakeEvery(2 seconds).forwardFill.zip {
Process.awakeEvery(100 milliseconds).take(100)
}.run.timed(15000).run
true
}
property("range") = secure {
Process.range(0, 100).runLog.run == IndexedSeq.range(0, 100) &&
Process.range(0, 1).runLog.run == IndexedSeq.range(0, 1) &&
Process.range(0, 0).runLog.run == IndexedSeq.range(0, 0)
}
property("ranges") = forAll(Gen.choose(1, 101)) { size =>
Process.ranges(0, 100, size).flatMap { case (i,j) => emitSeq(i until j) }.runLog.run ==
IndexedSeq.range(0, 100)
}
property("liftL") = secure {
import scalaz.\\/._
val s = Process.range(0, 100000)
val p = s.map(left) pipe process1.id[Int].liftL
true
}
property("feedL") = secure {
val w = wye.feedL(List.fill(10)(1))(process1.id)
val x = Process.range(0,100).wye(halt)(w).runLog.run
x.toList == (List.fill(10)(1) ++ List.range(0,100))
}
property("feedR") = secure {
val w = wye.feedR(List.fill(10)(1))(wye.merge[Int])
val x = Process.range(0,100).wye(halt)(w).runLog.run
x.toList == (List.fill(10)(1) ++ List.range(0,100))
}
property("either") = secure {
val w = wye.either[Int,Int]
val s = Process.constant(1).take(1)
s.wye(s)(w).runLog.run.map(_.fold(identity, identity)).toList == List(1,1)
}
property("last") = secure {
var i = 0
Process.range(0,10).last.map(_ => i += 1).runLog.run
i =? 1
}
property("state") = secure {
val s = Process.state((0, 1))
val fib = Process(0, 1) ++ s.flatMap { case (get, set) =>
val (prev0, prev1) = get
val next = prev0 + prev1
eval(set((prev1, next))).drain ++ emit(next)
}
val l = fib.take(10).runLog.run.toList
l === List(0, 1, 1, 2, 3, 5, 8, 13, 21, 34)
}
property("chunkBy2") = secure {
val s = Process(3, 5, 4, 3, 1, 2, 6)
s.chunkBy2(_ < _).toList == List(Vector(3, 5), Vector(4), Vector(3), Vector(1, 2, 6)) &&
s.chunkBy2(_ > _).toList == List(Vector(3), Vector(5, 4, 3, 1), Vector(2), Vector(6))
}
property("duration") = {
val firstValueDiscrepancy = duration.take(1).runLast.run.get
val reasonableError = 200 * 1000000 // 200 millis
(firstValueDiscrepancy.toNanos < reasonableError) :| "duration is near zero at first access"
}
implicit def arbVec[A:Arbitrary]: Arbitrary[IndexedSeq[A]] =
Arbitrary(Gen.listOf(arbitrary[A]).map(_.toIndexedSeq))
property("zipAll") = forAll((l: IndexedSeq[Int], l2: IndexedSeq[Int]) => {
val a = Process.range(0,l.length).map(l(_))
val b = Process.range(0,l2.length).map(l2(_))
val r = a.tee(b)(tee.zipAll(-1, 1)).runLog.run.toList
r.toString |: (r == l.zipAll(l2, -1, 1).toList)
})
property("passL/R") = secure {
val a = Process.range(0,10)
val b: Process[Task,Int] = halt
a.tee(b)(tee.passL[Int]).runLog.run == List.range(0,10) &&
b.tee(a)(tee.passR[Int]).runLog.run == List.range(0,10)
}
property("cleanup") = secure {
val a = Process(false).toSource |> await1[Boolean]
val b = a.orElse(Process.emit(false), Process.emit(true))
b.cleanup.runLastOr(false).run
}
property("onFailure") = secure {
@volatile var i: Int = 0
val p = eval(Task.delay(sys.error("FAIL"))) onFailure (Process.emit(1)) map (j => i = j)
try { p.run.run; false }
catch { case e: Throwable =>
e.getMessage == "FAIL" && i == 1
}
}
property("interrupt") = secure {
val p1 = Process(1,2,3,4,6).toSource
val i1 = repeatEval(Task.now(false))
val v = i1.wye(p1)(wye.interrupt).runLog.run.toList
v == List(1,2,3,4,6)
}
import scala.concurrent.duration._
val smallDelay = Gen.choose(10, 300) map {_.millis}
property("every") =
forAll(smallDelay) { delay: Duration =>
type BD = (Boolean, Duration)
val durationSinceLastTrue: Process1[BD, BD] = {
def go(lastTrue: Duration): Process1[BD,BD] = {
await1 flatMap { pair:(Boolean, Duration) => pair match {
case (true , d) => emit((true , d - lastTrue)) fby go(d)
case (false, d) => emit((false, d - lastTrue)) fby go(lastTrue)
} }
}
go(0.seconds)
}
val draws = (600.millis / delay) min 10 // don't take forever
val durationsSinceSpike = every(delay).
tee(duration)(tee zipWith {(a,b) => (a,b)}).
take(draws.toInt) |>
durationSinceLastTrue
val result = durationsSinceSpike.runLog.run.toList
val (head :: tail) = result
head._1 :| "every always emits true first" &&
tail.filter (_._1).map(_._2).forall { _ >= delay } :| "true means the delay has passed" &&
tail.filterNot(_._1).map(_._2).forall { _ <= delay } :| "false means the delay has not passed"
}
property("pipeIn") = secure {
val q = async.boundedQueue[String]()
val sink = q.enqueue.pipeIn(process1.lift[Int,String](_.toString))
(Process.range(0,10) to sink).run.run
val res = q.dequeue.take(10).runLog.run.toList
q.close.run
res === (0 until 10).map(_.toString).toList
}
property("runStep") = secure {
def go(p:Process[Task,Int], acc:Seq[Throwable \\/ Int]) : Throwable \\/ Seq[Throwable \\/ Int] = {
p.runStep.run match {
case Step(-\\/(e),Halt(_),Halt(_)) => \\/-(acc)
case Step(-\\/(e),Halt(_), c) => go(c,acc :+ -\\/(e))
case Step(-\\/(e),t,_) => go(t,acc :+ -\\/(e))
case Step(\\/-(a),t,_) => go(t,acc ++ a.map(\\/-(_)))
}
}
val ex = new java.lang.Exception("pure")
val p1 = Process.range(10,12)
val p2 = Process.range(20,22) ++ Process.suspend(eval(Task.fail(ex))) onFailure(Process(100).toSource)
val p3 = Process.await(Task.delay(1))(i=> throw ex,halt,emit(200)) //throws exception in `pure` code
go((p1 ++ p2) onComplete p3, Vector()) match {
case -\\/(e) => false
case \\/-(c) =>
c == List(
right(10),right(11)
, right(20),right(21),left(ex),right(100)
, left(ex), right(200)
)
}
}
property("runStep.stackSafety") = secure {
def go(p:Process[Task,Int], acc:Int) : Int = {
p.runStep.run match {
case Step(-\\/(e),Halt(_),_) => acc
case Step(-\\/(e),t,_) => go(t,acc)
case Step(\\/-(a),t,_) => go(t,acc + a.sum)
}
}
val s = 1 until 10000
val p1 = s.foldLeft[Process[Task,Int]](halt)({case (p,n)=>Emit(Vector(n),p)})
go(p1,0) == s.sum
}
property("affine") = secure {
var cnt = 0
(affine(eval_(Task.delay{ cnt = cnt + 1})) fby
eval(Task.delay(cnt))).repeat.take(100)
.run.run
cnt == 1
}
}
| doctau/scalaz-stream | src/test/scala/scalaz/stream/ProcessSpec.scala | Scala | mit | 17,660 |
package play.core
import scala.concurrent.ExecutionContext
import java.util.concurrent.Executors
private[play] object Execution {
lazy val internalContext: scala.concurrent.ExecutionContext = {
val numberOfThreads = play.api.Play.maybeApplication.map(_.configuration.getInt("internal-threadpool-size")).flatten.getOrElse(Runtime.getRuntime.availableProcessors)
ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(numberOfThreads, NamedThreadFactory("play-internal-execution-context")))
}
}
| noel-yap/setter-for-catan | play-2.1.1/framework/src/play/src/main/scala/play/core/system/Execution.scala | Scala | apache-2.0 | 523 |
package org.jetbrains.plugins.scala
package debugger
import java.util
import java.util.Collections
import com.intellij.debugger.engine._
import com.intellij.debugger.jdi.VirtualMachineProxyImpl
import com.intellij.debugger.requests.ClassPrepareRequestor
import com.intellij.debugger.{MultiRequestPositionManager, NoDataException, PositionManager, SourcePosition}
import com.intellij.openapi.editor.Document
import com.intellij.openapi.project.{DumbService, Project}
import com.intellij.openapi.roots.impl.DirectoryIndex
import com.intellij.openapi.util.Ref
import com.intellij.openapi.vfs.VirtualFile
import com.intellij.psi._
import com.intellij.psi.search.{FilenameIndex, GlobalSearchScope}
import com.intellij.psi.util.CachedValueProvider.Result
import com.intellij.psi.util.{CachedValueProvider, CachedValuesManager, PsiTreeUtil}
import com.intellij.util.{Processor, Query}
import com.sun.jdi._
import com.sun.jdi.request.ClassPrepareRequest
import org.jetbrains.annotations.{NotNull, Nullable}
import org.jetbrains.plugins.scala.caches.ScalaShortNamesCacheManager
import org.jetbrains.plugins.scala.debugger.ScalaPositionManager._
import org.jetbrains.plugins.scala.debugger.evaluation.ScalaEvaluatorBuilderUtil
import org.jetbrains.plugins.scala.debugger.evaluation.evaluator.ScalaCompilingEvaluator
import org.jetbrains.plugins.scala.debugger.evaluation.util.DebuggerUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScBindingPattern, ScConstructorPattern, ScInfixPattern}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScParameters}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.jetbrains.plugins.scala.lang.psi.types.ValueClassType
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.util.macroDebug.ScalaMacroDebuggingUtil
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.reflect.NameTransformer
import scala.util.Try
/**
* @author ilyas
*/
class ScalaPositionManager(val debugProcess: DebugProcess) extends PositionManager with MultiRequestPositionManager with LocationLineManager {
protected[debugger] val caches = new ScalaPositionManagerCaches(debugProcess)
import caches._
ScalaPositionManager.cacheInstance(this)
@Nullable
def getSourcePosition(@Nullable location: Location): SourcePosition = {
if (shouldSkip(location)) return null
val position =
for {
loc <- location.toOption
psiFile <- getPsiFileByReferenceType(debugProcess.getProject, loc.declaringType).toOption
lineNumber = exactLineNumber(location)
if lineNumber >= 0
} yield {
calcPosition(psiFile, location, lineNumber).getOrElse {
SourcePosition.createFromLine(psiFile, lineNumber)
}
}
position match {
case Some(p) => p
case None => throw NoDataException.INSTANCE
}
}
@NotNull
def getAllClasses(@NotNull position: SourcePosition): util.List[ReferenceType] = {
val file = position.getFile
throwIfNotScalaFile(file)
val generatedClassName = file.getUserData(ScalaCompilingEvaluator.classNameKey)
def hasLocations(refType: ReferenceType, position: SourcePosition): Boolean = {
try {
val generated = generatedClassName != null && refType.name().contains(generatedClassName)
lazy val sameFile = getPsiFileByReferenceType(file.getProject, refType) == file
generated || sameFile && locationsOfLine(refType, position).size > 0
} catch {
case _: NoDataException | _: AbsentInformationException | _: ClassNotPreparedException | _: ObjectCollectedException => false
}
}
val possiblePositions = positionsOnLine(file, position.getLine)
val exactClasses = ArrayBuffer[ReferenceType]()
val namePatterns = mutable.Set[NamePattern]()
inReadAction {
val onTheLine = possiblePositions.map(findGeneratingClassOrMethodParent)
if (onTheLine.isEmpty) return Collections.emptyList()
val nonLambdaParent =
if (isCompiledWithIndyLambdas(file)) {
val nonStrictParents = onTheLine.head.withParentsInFile
nonStrictParents.find(p => ScalaEvaluatorBuilderUtil.isGenerateNonAnonfunClass(p))
} else None
def addExactClasses(name: String) = {
exactClasses ++= debugProcess.getVirtualMachineProxy.classesByName(name).asScala
}
val sourceImages = onTheLine ++ nonLambdaParent
sourceImages.foreach {
case null =>
case tr: ScTrait if !DebuggerUtil.isLocalClass(tr) =>
val traitImplName = getSpecificNameForDebugger(tr)
val simpleName = traitImplName.stripSuffix("$class")
Seq(simpleName, traitImplName).foreach(addExactClasses)
case td: ScTypeDefinition if !DebuggerUtil.isLocalClass(td) =>
val qName = getSpecificNameForDebugger(td)
val delayedBodyName = if (isDelayedInit(td)) Seq(s"$qName$delayedInitBody") else Nil
(qName +: delayedBodyName).foreach(addExactClasses)
case elem =>
val namePattern = NamePattern.forElement(elem)
namePatterns ++= Option(namePattern)
}
}
val packageName: Option[String] = Option(inReadAction(file.asInstanceOf[ScalaFile].getPackageName))
val foundWithPattern =
if (namePatterns.isEmpty) Nil
else filterAllClasses(c => hasLocations(c, position) && namePatterns.exists(_.matches(c)), packageName)
(exactClasses ++ foundWithPattern).distinct.asJava
}
@NotNull
def locationsOfLine(@NotNull refType: ReferenceType, @NotNull position: SourcePosition): util.List[Location] = {
throwIfNotScalaFile(position.getFile)
checkForIndyLambdas(refType)
try {
val line: Int = position.getLine
locationsOfLine(refType, line).asJava
}
catch {
case _: AbsentInformationException => Collections.emptyList()
}
}
def createPrepareRequest(@NotNull requestor: ClassPrepareRequestor, @NotNull position: SourcePosition): ClassPrepareRequest = {
throw new IllegalStateException("This class implements MultiRequestPositionManager, corresponding createPrepareRequests version should be used")
}
override def createPrepareRequests(requestor: ClassPrepareRequestor, position: SourcePosition): util.List[ClassPrepareRequest] = {
def isLocalOrUnderDelayedInit(definition: PsiClass): Boolean = {
DebuggerUtil.isLocalClass(definition) || isDelayedInit(definition)
}
def findEnclosingTypeDefinition: Option[ScTypeDefinition] = {
@tailrec
def notLocalEnclosingTypeDefinition(element: PsiElement): Option[ScTypeDefinition] = {
PsiTreeUtil.getParentOfType(element, classOf[ScTypeDefinition]) match {
case null => None
case td if DebuggerUtil.isLocalClass(td) => notLocalEnclosingTypeDefinition(td.getParent)
case td => Some(td)
}
}
val element = nonWhitespaceElement(position)
notLocalEnclosingTypeDefinition(element)
}
def createPrepareRequest(position: SourcePosition): ClassPrepareRequest = {
val qName = new Ref[String](null)
val waitRequestor = new Ref[ClassPrepareRequestor](null)
inReadAction {
val sourceImage = findReferenceTypeSourceImage(position)
val insideMacro: Boolean = isInsideMacro(nonWhitespaceElement(position))
sourceImage match {
case cl: ScClass if ValueClassType.isValueClass(cl) =>
//there are no instances of value classes, methods from companion object are used
qName.set(getSpecificNameForDebugger(cl) + "$")
case tr: ScTrait if !DebuggerUtil.isLocalClass(tr) =>
//to handle both trait methods encoding
qName.set(tr.getQualifiedNameForDebugger + "*")
case typeDef: ScTypeDefinition if !isLocalOrUnderDelayedInit(typeDef) =>
val specificName = getSpecificNameForDebugger(typeDef)
qName.set(if (insideMacro) specificName + "*" else specificName)
case _ =>
findEnclosingTypeDefinition.foreach(typeDef => qName.set(typeDef.getQualifiedNameForDebugger + "*"))
}
// Enclosing type definition is not found
if (qName.get == null) {
qName.set(SCRIPT_HOLDER_CLASS_NAME + "*")
}
waitRequestor.set(new ScalaPositionManager.MyClassPrepareRequestor(position, requestor))
}
debugProcess.getRequestsManager.createClassPrepareRequest(waitRequestor.get, qName.get)
}
val file = position.getFile
throwIfNotScalaFile(file)
val possiblePositions = inReadAction {
positionsOnLine(file, position.getLine).map(SourcePosition.createFromElement)
}
possiblePositions.map(createPrepareRequest).asJava
}
private def throwIfNotScalaFile(file: PsiFile): Unit = {
if (!checkScalaFile(file)) throw NoDataException.INSTANCE
}
private def checkScalaFile(file: PsiFile): Boolean = file match {
case sf: ScalaFile => !sf.isCompiled
case _ => false
}
private def filterAllClasses(condition: ReferenceType => Boolean, packageName: Option[String]): Seq[ReferenceType] = {
def samePackage(refType: ReferenceType) = {
val name = refType.name()
val lastDot = name.lastIndexOf('.')
val refTypePackageName = if (lastDot < 0) "" else name.substring(0, lastDot)
packageName.isEmpty || packageName.contains(refTypePackageName)
}
def isAppropriate(refType: ReferenceType) = {
Try(samePackage(refType) && refType.isInitialized && condition(refType)).getOrElse(false)
}
import scala.collection.JavaConverters._
for {
refType <- debugProcess.getVirtualMachineProxy.allClasses.asScala
if isAppropriate(refType)
} yield {
refType
}
}
@Nullable
private def findReferenceTypeSourceImage(@NotNull position: SourcePosition): PsiElement = {
val element = nonWhitespaceElement(position)
findGeneratingClassOrMethodParent(element)
}
protected def nonWhitespaceElement(@NotNull position: SourcePosition): PsiElement = {
val file = position.getFile
@tailrec
def nonWhitespaceInner(element: PsiElement, document: Document): PsiElement = {
element match {
case null => null
case _: PsiWhiteSpace if document.getLineNumber(element.getTextRange.getEndOffset) == position.getLine =>
val nextElement = file.findElementAt(element.getTextRange.getEndOffset)
nonWhitespaceInner(nextElement, document)
case _ => element
}
}
if (!file.isInstanceOf[ScalaFile]) null
else {
val firstElement = file.findElementAt(position.getOffset)
try {
val document = PsiDocumentManager.getInstance(file.getProject).getDocument(file)
nonWhitespaceInner(firstElement, document)
}
catch {
case _: Throwable => firstElement
}
}
}
private def calcPosition(file: PsiFile, location: Location, lineNumber: Int): Option[SourcePosition] = {
throwIfNotScalaFile(file)
def isDefaultArgument(method: Method) = {
val methodName = method.name()
val lastDollar = methodName.lastIndexOf("$")
if (lastDollar >= 0) {
val (start, index) = methodName.splitAt(lastDollar + 1)
(start.endsWith("$default$"), index)
}
else (false, "")
}
def findDefaultArg(possiblePositions: Seq[PsiElement], defaultArgIndex: String) : Option[PsiElement] = {
try {
val paramNumber = defaultArgIndex.toInt - 1
possiblePositions.find {
case e =>
val scParameters = PsiTreeUtil.getParentOfType(e, classOf[ScParameters])
if (scParameters != null) {
val param = scParameters.params(paramNumber)
param.isDefaultParam && param.isAncestorOf(e)
}
else false
}
} catch {
case _: Exception => None
}
}
def calcElement(): Option[PsiElement] = {
val possiblePositions = positionsOnLine(file, lineNumber)
val currentMethod = location.method()
lazy val (isDefaultArg, defaultArgIndex) = isDefaultArgument(currentMethod)
def findPsiElementForIndyLambda(): Option[PsiElement] = {
val lambdas = lambdasOnLine(file, lineNumber)
val methods = indyLambdaMethodsOnLine(location.declaringType(), lineNumber)
val methodsToLambdas = methods.zip(lambdas).toMap
methodsToLambdas.get(currentMethod)
}
if (possiblePositions.size <= 1) {
possiblePositions.headOption
}
else if (isIndyLambda(currentMethod)) {
findPsiElementForIndyLambda()
}
else if (isDefaultArg) {
findDefaultArg(possiblePositions, defaultArgIndex)
}
else if (!isAnonfun(currentMethod)) {
possiblePositions.find {
case e: PsiElement if isLambda(e) => false
case (_: ScExpression) childOf (_: ScParameter) => false
case _ => true
}
}
else {
val generatingPsiElem = findElementByReferenceType(location.declaringType())
possiblePositions.find(p => generatingPsiElem.contains(findGeneratingClassOrMethodParent(p)))
}
}
calcElement().map(SourcePosition.createFromElement)
}
private def findScriptFile(refType: ReferenceType): Option[PsiFile] = {
try {
val name = refType.name()
if (name.startsWith(SCRIPT_HOLDER_CLASS_NAME)) {
cachedSourceName(refType) match {
case Some(srcName) =>
val files = FilenameIndex.getFilesByName(debugProcess.getProject, srcName, debugProcess.getSearchScope)
files.headOption
case _ => None
}
}
else None
}
catch {
case _: AbsentInformationException => None
}
}
@Nullable
private def getPsiFileByReferenceType(project: Project, refType: ReferenceType): PsiFile = {
if (refType == null) return null
if (refTypeToFileCache.contains(refType)) return refTypeToFileCache(refType)
def searchForMacroDebugging(qName: String): PsiFile = {
val directoryIndex: DirectoryIndex = DirectoryIndex.getInstance(project)
val dotIndex = qName.lastIndexOf(".")
val packageName = if (dotIndex > 0) qName.substring(0, dotIndex) else ""
val query: Query[VirtualFile] = directoryIndex.getDirectoriesByPackageName(packageName, true)
val fileNameWithoutExtension = if (dotIndex > 0) qName.substring(dotIndex + 1) else qName
val fileNames: util.Set[String] = new util.HashSet[String]
import scala.collection.JavaConversions._
for (extention <- ScalaLoader.SCALA_EXTENSIONS) {
fileNames.add(fileNameWithoutExtension + "." + extention)
}
val result = new Ref[PsiFile]
query.forEach(new Processor[VirtualFile] {
override def process(vDir: VirtualFile): Boolean = {
var isFound = false
for {
fileName <- fileNames
if !isFound
vFile <- vDir.findChild(fileName).toOption
} {
val psiFile: PsiFile = PsiManager.getInstance(project).findFile(vFile)
val debugFile: PsiFile = ScalaMacroDebuggingUtil.loadCode(psiFile, force = false)
if (debugFile != null) {
result.set(debugFile)
isFound = true
}
else if (psiFile.isInstanceOf[ScalaFile]) {
result.set(psiFile)
isFound = true
}
}
!isFound
}
})
result.get
}
def findFile() = {
def withDollarTestName(originalQName: String): Option[String] = {
val dollarTestSuffix = "$Test" //See SCL-9340
if (originalQName.endsWith(dollarTestSuffix)) Some(originalQName)
else if (originalQName.contains(dollarTestSuffix + "$")) {
val index = originalQName.indexOf(dollarTestSuffix) + dollarTestSuffix.length
Some(originalQName.take(index))
}
else None
}
def topLevelClassName(originalQName: String): String = {
if (originalQName.endsWith(packageSuffix)) originalQName
else originalQName.replace(packageSuffix, ".").takeWhile(_ != '$')
}
def tryToFindClass(name: String) = {
findClassByQualName(name, isScalaObject = false)
.orElse(findClassByQualName(name, isScalaObject = true))
}
val scriptFile = findScriptFile(refType)
val file = scriptFile.getOrElse {
val originalQName = NameTransformer.decode(refType.name)
if (!ScalaMacroDebuggingUtil.isEnabled) {
val clazz = withDollarTestName(originalQName).flatMap(tryToFindClass)
.orElse(tryToFindClass(topLevelClassName(originalQName)))
clazz.map(_.getNavigationElement.getContainingFile).orNull
}
else
searchForMacroDebugging(topLevelClassName(originalQName))
}
file
}
val file = inReadAction(findFile())
if (file != null && refType.methods().asScala.exists(isIndyLambda)) {
isCompiledWithIndyLambdasCache.put(file, true)
}
refTypeToFileCache.put(refType, file)
file
}
private def nameMatches(elem: PsiElement, refType: ReferenceType): Boolean = {
val pattern = NamePattern.forElement(elem)
pattern != null && pattern.matches(refType)
}
private def checkForIndyLambdas(refType: ReferenceType) = {
if (!refTypeToFileCache.contains(refType)) {
getPsiFileByReferenceType(debugProcess.getProject, refType)
}
}
def findElementByReferenceType(refType: ReferenceType): Option[PsiElement] = {
def createPointer(elem: PsiElement) =
SmartPointerManager.getInstance(debugProcess.getProject).createSmartPsiElementPointer(elem)
refTypeToElementCache.get(refType) match {
case Some(Some(p)) if p.getElement != null => Some(p.getElement)
case Some(Some(_)) | None =>
val found = findElementByReferenceTypeInner(refType)
refTypeToElementCache.update(refType, found.map(createPointer))
found
case Some(None) => None
}
}
private def findElementByReferenceTypeInner(refType: ReferenceType): Option[PsiElement] = {
val byName = findByQualName(refType) orElse findByShortName(refType)
if (byName.isDefined) return byName
val project = debugProcess.getProject
val allLocations = Try(refType.allLineLocations().asScala).getOrElse(Seq.empty)
val refTypeLineNumbers = allLocations.map(checkedLineNumber).filter(_ > 0)
if (refTypeLineNumbers.isEmpty) return None
val firstRefTypeLine = refTypeLineNumbers.min
val lastRefTypeLine = refTypeLineNumbers.max
val refTypeLines = firstRefTypeLine to lastRefTypeLine
val file = getPsiFileByReferenceType(project, refType)
if (!checkScalaFile(file)) return None
val document = PsiDocumentManager.getInstance(project).getDocument(file)
if (document == null) return None
def elementLineRange(elem: PsiElement, document: Document) = {
val startLine = document.getLineNumber(elem.getTextRange.getStartOffset)
val endLine = document.getLineNumber(elem.getTextRange.getEndOffset)
startLine to endLine
}
def checkLines(elem: PsiElement, document: Document) = {
val lineRange = elementLineRange(elem, document)
//intersection, very loose check because sometimes first line for <init> method is after range of the class
firstRefTypeLine <= lineRange.end && lastRefTypeLine >= lineRange.start
}
def isAppropriateCandidate(elem: PsiElement) = {
checkLines(elem, document) && ScalaEvaluatorBuilderUtil.isGenerateClass(elem) && nameMatches(elem, refType)
}
def findCandidates(): Seq[PsiElement] = {
def findAt(offset: Int): Option[PsiElement] = {
val startElem = file.findElementAt(offset)
startElem.parentsInFile.find(isAppropriateCandidate)
}
if (lastRefTypeLine - firstRefTypeLine >= 2) {
val offsetsInTheMiddle = Seq(
document.getLineEndOffset(firstRefTypeLine),
document.getLineEndOffset(firstRefTypeLine + 1)
)
offsetsInTheMiddle.flatMap(findAt).distinct
}
else {
val firstLinePositions = positionsOnLine(file, firstRefTypeLine)
val allPositions =
if (firstRefTypeLine == lastRefTypeLine) firstLinePositions
else firstLinePositions ++ positionsOnLine(file, lastRefTypeLine)
allPositions.distinct.filter(isAppropriateCandidate)
}
}
def filterWithSignature(candidates: Seq[PsiElement]) = {
val applySignature = refType.methodsByName("apply").asScala.find(m => !m.isSynthetic).map(_.signature())
if (applySignature.isEmpty) candidates
else {
candidates.filter(l => applySignature == DebuggerUtil.lambdaJVMSignature(l))
}
}
val candidates = findCandidates()
if (candidates.size <= 1) return candidates.headOption
if (refTypeLines.size > 1) {
val withExactlySameLines = candidates.filter(elementLineRange(_, document) == refTypeLines)
if (withExactlySameLines.size == 1) return withExactlySameLines.headOption
}
if (candidates.exists(!isLambda(_))) return candidates.headOption
val filteredWithSignature = filterWithSignature(candidates)
if (filteredWithSignature.size == 1) return filteredWithSignature.headOption
val byContainingClasses = filteredWithSignature.groupBy(c => findGeneratingClassOrMethodParent(c.getParent))
if (byContainingClasses.size > 1) {
findContainingClass(refType) match {
case Some(e) => return byContainingClasses.get(e).flatMap(_.headOption)
case None =>
}
}
filteredWithSignature.headOption
}
private def findClassByQualName(qName: String, isScalaObject: Boolean): Option[PsiClass] = {
val project = debugProcess.getProject
val cacheManager = ScalaShortNamesCacheManager.getInstance(project)
val classes =
if (qName.endsWith(packageSuffix))
Option(cacheManager.getPackageObjectByName(qName.stripSuffix(packageSuffix), GlobalSearchScope.allScope(project))).toSeq
else
cacheManager.getClassesByFQName(qName.replace(packageSuffix, "."), debugProcess.getSearchScope)
val clazz =
if (classes.length == 1) classes.headOption
else if (classes.length >= 2) {
if (isScalaObject) classes.find(_.isInstanceOf[ScObject])
else classes.find(!_.isInstanceOf[ScObject])
}
else None
clazz.filter(_.isValid)
}
private def findByQualName(refType: ReferenceType): Option[PsiClass] = {
val originalQName = NameTransformer.decode(refType.name)
val endsWithPackageSuffix = originalQName.endsWith(packageSuffix)
val withoutSuffix =
if (endsWithPackageSuffix) originalQName.stripSuffix(packageSuffix)
else originalQName.stripSuffix("$").stripSuffix("$class")
val withDots = withoutSuffix.replace(packageSuffix, ".").replace('$', '.')
val transformed = if (endsWithPackageSuffix) withDots + packageSuffix else withDots
findClassByQualName(transformed, originalQName.endsWith("$"))
}
private def findByShortName(refType: ReferenceType): Option[PsiClass] = {
val project = debugProcess.getProject
if (DumbService.getInstance(project).isDumb) return None
lazy val sourceName = cachedSourceName(refType).getOrElse("")
def sameFileName(elem: PsiElement) = {
val containingFile = elem.getContainingFile
containingFile != null && containingFile.name == sourceName
}
val originalQName = NameTransformer.decode(refType.name)
val withoutSuffix =
if (originalQName.endsWith(packageSuffix)) originalQName
else originalQName.replace(packageSuffix, ".").stripSuffix("$").stripSuffix("$class")
val lastDollar = withoutSuffix.lastIndexOf('$')
val lastDot = withoutSuffix.lastIndexOf('.')
val index = Seq(lastDollar, lastDot, 0).max + 1
val name = withoutSuffix.drop(index)
val isScalaObject = originalQName.endsWith("$")
val cacheManager = ScalaShortNamesCacheManager.getInstance(project)
val classes = cacheManager.getClassesByName(name, GlobalSearchScope.allScope(project))
val inSameFile = classes.filter(c => c.isValid && sameFileName(c))
if (inSameFile.length == 1) classes.headOption
else if (inSameFile.length >= 2) {
if (isScalaObject) inSameFile.find(_.isInstanceOf[ScObject])
else inSameFile.find(!_.isInstanceOf[ScObject])
}
else None
}
private def findContainingClass(refType: ReferenceType): Option[PsiElement] = {
def classesByName(s: String) = {
val vm = debugProcess.getVirtualMachineProxy
vm.classesByName(s).asScala
}
val name = NameTransformer.decode(refType.name())
val index = name.lastIndexOf("$$")
if (index < 0) return None
val containingName = NameTransformer.encode(name.substring(0, index))
classesByName(containingName).headOption.flatMap(findElementByReferenceType)
}
}
object ScalaPositionManager {
private val SCRIPT_HOLDER_CLASS_NAME: String = "Main$$anon$1"
private val packageSuffix = ".package$"
private val delayedInitBody = "delayedInit$body"
private val isCompiledWithIndyLambdasCache = mutable.HashMap[PsiFile, Boolean]()
private val instances = mutable.HashMap[DebugProcess, ScalaPositionManager]()
private def cacheInstance(scPosManager: ScalaPositionManager) = {
val debugProcess = scPosManager.debugProcess
instances.put(debugProcess, scPosManager)
debugProcess.addDebugProcessListener(new DebugProcessAdapter {
override def processDetached(process: DebugProcess, closedByUser: Boolean): Unit = {
ScalaPositionManager.instances.remove(process)
debugProcess.removeDebugProcessListener(this)
}
})
}
def instance(vm: VirtualMachine): Option[ScalaPositionManager] = instances.collectFirst {
case (process, manager) if getVM(process).contains(vm) => manager
}
def instance(debugProcess: DebugProcess): Option[ScalaPositionManager] = instances.get(debugProcess)
def instance(mirror: Mirror): Option[ScalaPositionManager] = instance(mirror.virtualMachine())
private def getVM(debugProcess: DebugProcess) = {
if (!DebuggerManagerThreadImpl.isManagerThread) None
else {
debugProcess.getVirtualMachineProxy match {
case impl: VirtualMachineProxyImpl => Option(impl.getVirtualMachine)
case _ => None
}
}
}
def positionsOnLine(file: PsiFile, lineNumber: Int): Seq[PsiElement] = {
if (lineNumber < 0) return Seq.empty
val scFile = file match {
case sf: ScalaFile => sf
case _ => return Seq.empty
}
val cacheProvider = new CachedValueProvider[mutable.HashMap[Int, Seq[PsiElement]]] {
override def compute(): Result[mutable.HashMap[Int, Seq[PsiElement]]] = Result.create(mutable.HashMap[Int, Seq[PsiElement]](), file)
}
CachedValuesManager.getCachedValue(file, cacheProvider).getOrElseUpdate(lineNumber, positionsOnLineInner(scFile, lineNumber))
}
def checkedLineNumber(location: Location): Int =
try location.lineNumber() - 1
catch {case _: InternalError => -1}
def cachedSourceName(refType: ReferenceType): Option[String] = {
ScalaPositionManager.instance(refType).map(_.caches).flatMap(_.cachedSourceName(refType))
}
private def positionsOnLineInner(file: ScalaFile, lineNumber: Int): Seq[PsiElement] = {
inReadAction {
val document = PsiDocumentManager.getInstance(file.getProject).getDocument(file)
if (document == null || lineNumber >= document.getLineCount) return Seq.empty
val startLine = document.getLineStartOffset(lineNumber)
val endLine = document.getLineEndOffset(lineNumber)
def elementsOnTheLine(file: ScalaFile, lineNumber: Int): Seq[PsiElement] = {
val result = ArrayBuffer[PsiElement]()
var elem = file.findElementAt(startLine)
while (elem != null && elem.getTextOffset <= endLine) {
elem match {
case ChildOf(_: ScUnitExpr) | ChildOf(ScBlock()) =>
result += elem
case ElementType(t) if ScalaTokenTypes.WHITES_SPACES_AND_COMMENTS_TOKEN_SET.contains(t) ||
ScalaTokenTypes.BRACES_TOKEN_SET.contains(t) =>
case _ =>
result += elem
}
elem = PsiTreeUtil.nextLeaf(elem, true)
}
result
}
def findParent(element: PsiElement): Option[PsiElement] = {
val parentsOnTheLine = element.withParentsInFile.takeWhile(e => e.getTextOffset > startLine).toIndexedSeq
val anon = parentsOnTheLine.collectFirst {
case e if isLambda(e) => e
case newTd: ScNewTemplateDefinition if DebuggerUtil.generatesAnonClass(newTd) => newTd
}
val filteredParents = parentsOnTheLine.reverse.filter {
case _: ScExpression => true
case _: ScConstructorPattern | _: ScInfixPattern | _: ScBindingPattern => true
case callRefId childOf ((ref: ScReferenceExpression) childOf (_: ScMethodCall))
if ref.nameId == callRefId && ref.getTextRange.getStartOffset < startLine => true
case _: ScTypeDefinition => true
case _ => false
}
val maxExpressionPatternOrTypeDef =
filteredParents.find(!_.isInstanceOf[ScBlock]).orElse(filteredParents.headOption)
Seq(anon, maxExpressionPatternOrTypeDef).flatten.sortBy(_.getTextLength).headOption
}
elementsOnTheLine(file, lineNumber).flatMap(findParent).distinct
}
}
def isLambda(element: PsiElement): Boolean = {
ScalaEvaluatorBuilderUtil.isGenerateAnonfun(element) && !isInsideMacro(element)
}
def lambdasOnLine(file: PsiFile, lineNumber: Int): Seq[PsiElement] = {
positionsOnLine(file, lineNumber).filter(isLambda)
}
def isIndyLambda(m: Method): Boolean = {
val name = m.name()
def isBeforeM5indyLambda = {
val lastDollar = name.lastIndexOf('$')
lastDollar > 0 && name.substring(0, lastDollar).endsWith("$anonfun")
}
def isAfterM5indyLambda = name.startsWith("$anonfun$") && !name.endsWith("$adapted")
isAfterM5indyLambda || isBeforeM5indyLambda
}
def isAnonfunType(refType: ReferenceType): Boolean = {
refType match {
case ct: ClassType =>
val supClass = ct.superclass()
supClass != null && supClass.name().startsWith("scala.runtime.AbstractFunction")
case _ => false
}
}
def isAnonfun(m: Method): Boolean = {
isIndyLambda(m) || m.name.startsWith("apply") && isAnonfunType(m.declaringType())
}
def indyLambdaMethodsOnLine(refType: ReferenceType, lineNumber: Int): Seq[Method] = {
def ordinal(m: Method) = {
val name = m.name()
val lastDollar = name.lastIndexOf('$')
Try(name.substring(lastDollar + 1).toInt).getOrElse(-1)
}
val all = refType.methods().asScala.filter(isIndyLambda)
val onLine = all.filter(m => Try(!m.locationsOfLine(lineNumber + 1).isEmpty).getOrElse(false))
onLine.sortBy(ordinal)
}
def isCompiledWithIndyLambdas(file: PsiFile): Boolean = {
if (file == null) false
else {
val originalFile = Option(file.getUserData(ScalaCompilingEvaluator.originalFileKey)).getOrElse(file)
isCompiledWithIndyLambdasCache.getOrElse(originalFile, false)
}
}
@tailrec
def findGeneratingClassOrMethodParent(element: PsiElement): PsiElement = {
element match {
case null => null
case elem if ScalaEvaluatorBuilderUtil.isGenerateClass(elem) || isLambda(elem) => elem
case elem if isMacroCall(elem) => elem
case elem => findGeneratingClassOrMethodParent(elem.getParent)
}
}
private object MacroDef {
val macroImpl = "scala.reflect.macros.internal.macroImpl"
def unapply(fun: ScFunction): Option[ScFunction] = {
fun match {
case m: ScMacroDefinition => Some(m)
case _ if fun.annotations.map(_.constructor.typeElement.getText).contains(macroImpl) => Some(fun)
case _ => None
}
}
}
private object InsideMacro {
def unapply(elem: PsiElement): Option[ScMethodCall] = {
elem.parentsInFile.collectFirst {
case mc: ScMethodCall if isMacroCall(mc) => mc
}
}
}
def isInsideMacro(elem: PsiElement): Boolean = elem.parentsInFile.exists(isMacroCall)
private def isMacroCall(elem: PsiElement): Boolean = elem match {
case ScMethodCall(ResolvesTo(MacroDef(_)), _) => true
case _ => false
}
object InsideAsync {
def unapply(elem: PsiElement): Option[ScMethodCall] = elem match {
case InsideMacro(call @ ScMethodCall(ref: ScReferenceExpression, _)) if ref.refName == "async" => Some(call)
case _ => None
}
}
def shouldSkip(location: Location, debugProcess: DebugProcess): Boolean = {
ScalaPositionManager.instance(debugProcess).forall(_.shouldSkip(location))
}
private def getSpecificNameForDebugger(td: ScTypeDefinition): String = {
val name = td.getQualifiedNameForDebugger
td match {
case _: ScObject => s"$name$$"
case _: ScTrait => s"$name$$class"
case _ => name
}
}
def isDelayedInit(cl: PsiClass): Boolean = cl match {
case obj: ScObject =>
val manager: ScalaPsiManager = ScalaPsiManager.instance(obj.getProject)
val clazz: PsiClass =
manager.getCachedClass(obj.getResolveScope, "scala.DelayedInit").orNull
clazz != null && obj.isInheritor(clazz, deep = true)
case _ => false
}
private class MyClassPrepareRequestor(position: SourcePosition, requestor: ClassPrepareRequestor) extends ClassPrepareRequestor {
private val sourceFile = position.getFile
private val sourceName = sourceFile.getName
private def sourceNameOf(refType: ReferenceType): Option[String] = ScalaPositionManager.cachedSourceName(refType)
def processClassPrepare(debuggerProcess: DebugProcess, referenceType: ReferenceType) {
val positionManager: CompoundPositionManager = debuggerProcess.asInstanceOf[DebugProcessImpl].getPositionManager
if (!sourceNameOf(referenceType).contains(sourceName)) return
if (positionManager.locationsOfLine(referenceType, position).size > 0) {
requestor.processClassPrepare(debuggerProcess, referenceType)
}
else {
val positionClasses: util.List[ReferenceType] = positionManager.getAllClasses(position)
if (positionClasses.contains(referenceType)) {
requestor.processClassPrepare(debuggerProcess, referenceType)
}
}
}
}
private class NamePattern(elem: PsiElement) {
private val containingFile = elem.getContainingFile
private val sourceName = containingFile.getName
private val isGeneratedForCompilingEvaluator = containingFile.getUserData(ScalaCompilingEvaluator.classNameKey) != null
private var compiledWithIndyLambdas = isCompiledWithIndyLambdas(containingFile)
private val exactName: Option[String] = {
elem match {
case td: ScTypeDefinition if !DebuggerUtil.isLocalClass(td) =>
Some(getSpecificNameForDebugger(td))
case _ => None
}
}
private var classJVMNameParts: Seq[String] = null
private def computeClassJVMNameParts(elem: PsiElement): Seq[String] = {
if (exactName.isDefined) Seq.empty
else inReadAction {
elem match {
case InsideMacro(call) => computeClassJVMNameParts(call.getParent)
case _ =>
val parts = elem.withParentsInFile.flatMap(partsFor)
parts.toSeq.reverse
}
}
}
private def partsFor(elem: PsiElement): Seq[String] = {
elem match {
case td: ScTypeDefinition => Seq(ScalaNamesUtil.toJavaName(td.name))
case newTd: ScNewTemplateDefinition if DebuggerUtil.generatesAnonClass(newTd) => Seq("$anon")
case e if ScalaEvaluatorBuilderUtil.isGenerateClass(e) => partsForAnonfun(e)
case _ => Seq.empty
}
}
private def partsForAnonfun(elem: PsiElement): Seq[String] = {
val anonfunCount = ScalaEvaluatorBuilderUtil.anonClassCount(elem)
val lastParts = Seq.fill(anonfunCount - 1)(Seq("$apply", "$anonfun")).flatten
val containingClass = findGeneratingClassOrMethodParent(elem.getParent)
val owner = PsiTreeUtil.getParentOfType(elem, classOf[ScFunctionDefinition], classOf[ScTypeDefinition],
classOf[ScPatternDefinition], classOf[ScVariableDefinition])
val firstParts =
if (PsiTreeUtil.isAncestor(owner, containingClass, true)) Seq("$anonfun")
else owner match {
case fun: ScFunctionDefinition =>
val name = if (fun.name == "this") JVMNameUtil.CONSTRUCTOR_NAME else fun.name
val encoded = NameTransformer.encode(name)
Seq(s"$$$encoded", "$anonfun")
case _ => Seq("$anonfun")
}
lastParts ++ firstParts
}
private def checkParts(name: String): Boolean = {
var nameTail = name
updateParts()
for (part <- classJVMNameParts) {
val index = nameTail.indexOf(part)
if (index >= 0) {
nameTail = nameTail.substring(index + part.length)
}
else return false
}
nameTail.indexOf("$anon") == -1
}
def updateParts(): Unit = {
val newValue = isCompiledWithIndyLambdas(containingFile)
if (newValue != compiledWithIndyLambdas || classJVMNameParts == null) {
compiledWithIndyLambdas = newValue
classJVMNameParts = computeClassJVMNameParts(elem)
}
}
def matches(refType: ReferenceType): Boolean = {
val refTypeSourceName = cachedSourceName(refType).getOrElse("")
if (refTypeSourceName != sourceName && !isGeneratedForCompilingEvaluator) return false
val name = refType.name()
exactName match {
case Some(qName) => qName == name || qName.stripSuffix("$class") == name
case None => checkParts(name)
}
}
}
private object NamePattern {
def forElement(elem: PsiElement): NamePattern = {
if (elem == null || !ScalaEvaluatorBuilderUtil.isGenerateClass(elem)) return null
val cacheProvider = new CachedValueProvider[NamePattern] {
override def compute(): Result[NamePattern] = Result.create(new NamePattern(elem), elem)
}
CachedValuesManager.getCachedValue(elem, cacheProvider)
}
}
private[debugger] class ScalaPositionManagerCaches(debugProcess: DebugProcess) {
debugProcess.addDebugProcessListener(new DebugProcessAdapter {
override def processDetached(process: DebugProcess, closedByUser: Boolean): Unit = {
clear()
process.removeDebugProcessListener(this)
}
})
val refTypeToFileCache = mutable.HashMap[ReferenceType, PsiFile]()
val refTypeToElementCache = mutable.HashMap[ReferenceType, Option[SmartPsiElementPointer[PsiElement]]]()
val customizedLocationsCache = mutable.HashMap[Location, Int]()
val lineToCustomizedLocationCache = mutable.HashMap[(ReferenceType, Int), Seq[Location]]()
val seenRefTypes = mutable.Set[ReferenceType]()
val sourceNames = mutable.HashMap[ReferenceType, Option[String]]()
def cachedSourceName(refType: ReferenceType): Option[String] =
sourceNames.getOrElseUpdate(refType, Try(refType.sourceName()).toOption)
def clear(): Unit = {
isCompiledWithIndyLambdasCache.clear()
refTypeToFileCache.clear()
refTypeToElementCache.clear()
customizedLocationsCache.clear()
lineToCustomizedLocationCache.clear()
seenRefTypes.clear()
sourceNames.clear()
}
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/debugger/ScalaPositionManager.scala | Scala | apache-2.0 | 40,034 |
import java.util.Timer
import java.util.TimerTask
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.daemon.Daemon
import org.slf4j.MarkerFactory
object JsvcDemo extends App {
new JsvcDemoDaemon().start
}
class JsvcDemoDaemon extends Daemon with LazyLogging {
val timer:Timer = new Timer()
val loggerTask = new JsvcDemo()
val shell = new JsvcSsh(5222, () => new ExceptionShell(loggerTask))
def destroy() { logger.debug("destroy") }
def init(context: org.apache.commons.daemon.DaemonContext) {
logger.debug("init")
logger.debug(context.toString)
}
def start() {
logger.debug("start")
timer.schedule(new RobustTimerTask() {
def work() {
logger.info(" running ...")
}
}, 0, 3000)
timer.schedule(loggerTask, 0, 500)
shell.start()
}
def stop() {
shell.stop()
timer.cancel()
logger.debug("stopped")
}
}
class JsvcDemo extends RobustTimerTask {
val some = MarkerFactory.getMarker("some")
var status:Seq[String] = Nil
def work() {
logger.info(some, " hello ...")
if (status.nonEmpty) {
val currentState = status.head
status = status.takeRight(status.size -1)
currentState match {
case "re" => throw new IllegalStateException("re")
case "ex" => throw new Exception("ex")
case "er" => throw new Error("er")
case _ => throw new Throwable("th")
}
}
}
}
class ExceptionShell(task:JsvcDemo) extends JsvcSsh.ShellAdapter {
val SHELL_CMD_QUIT = "quit"
val SHELL_CMD_EXIT = "exit"
val SHELL_CMD_HELP = "help"
def handleUserInput(line:String) {
exitOn(line, Seq(SHELL_CMD_QUIT, SHELL_CMD_EXIT))
if (line.equalsIgnoreCase(SHELL_CMD_HELP)) {
writeln("Possible values are: " + Seq("re", "ex", "er", "th"))
} else {
writeln("=> \"" + line + "\"")
task.status = line.split(" ").toSeq
}
}
def prompt() {
val params = Seq(SHELL_CMD_QUIT, SHELL_CMD_EXIT, SHELL_CMD_HELP)
initPrompt(prompt = "eX> ", completer = params)
writeln("""
|*******************************
|* Welcome to Exception Shell. *
|*******************************""".stripMargin.trim)
handle((line) => handleUserInput(line))
}
}
trait RobustTimerTask extends TimerTask with LazyLogging {
def run() {
try {
work()
} catch {
case e:RuntimeException => logger.error("", e)
case e:Exception => logger.error("", e)
case e:Error => logger.error("", e)
case e:Throwable => logger.error("", e)
}
}
def work()
}
| ThStock/jsvc-wrapper | src/main/scala/JsvcDemo.scala | Scala | apache-2.0 | 2,573 |
package com.github.simonthecat.eventdrivenorders.orderservice
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.clients.producer.KafkaProducer
import scala.io.StdIn
/**
* Created by simon on 04.08.16.
*/
object OrderApp extends App {
val confirmationService = new ConfirmationService(
confirmationConsumer = new KafkaConsumer[String, String](kafka.storeConfirmationConsumer),
confirmationTopic = "order.confirmation",
replyProducer = new KafkaProducer[String, String](kafka.producerCfg),
replyTopic = "api.reply"
)
val orderService = new OrderProcessingService(
new KafkaConsumer[String, String](kafka.orderConsumerCfg),
"order.order",
new KafkaProducer[String, String](kafka.producerCfg),
"store.update"
)
confirmationService.start()
orderService.start()
StdIn.readLine()
confirmationService.stop()
orderService.stop()
}
| simonthecat/event-driven-orders | order-service/src/main/scala/com/github/simonthecat/eventdrivenorders/orderservice/OrderApp.scala | Scala | mit | 912 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
/**
* Indicates that a message failed its checksum and is corrupt
*/
class InvalidMessageException extends RuntimeException
| jinfei21/kafka | src/kafka/message/InvalidMessageException.scala | Scala | apache-2.0 | 743 |
package com.twitter.server.util
import com.twitter.concurrent.AsyncStream
import com.twitter.io.{Buf, Reader}
import com.twitter.util.events.{Event, Sink}
import java.util.logging.LogRecord
/**
* A utility to serialize the [[com.twitter.util.events.Sink]] in a format
* readable by [[http://goo.gl/iN9ozV Trace Viewer]].
*
* Add the following to the `onExit` hook to export the Sink to a file when the
* server exits, or place it behind an endpoint to be triggered by request.
*
* {{{
* val trace = Writer.fromOutputStream(new FileOutputStream("sink.trace.json"))
* val done = Reader.copy(TraceEventSink.serialize(Sink.default), trace) ensure trace.close()
* Await.result(done, 3.seconds)
* }}}
*/
object TraceEventSink {
private val comma = Buf.Utf8(",")
private val nl = Buf.Utf8("\\n")
private val leftBracket = Buf.Utf8("[")
private val sp = Buf.Utf8(" ")
private def showObject(o: Object): String = o match {
case r: LogRecord => s"${r.getLevel.toString} ${r.getMessage}"
case _ => o.toString
}
private def asTraceEvent(e: Event): Buf = Buf.Utf8(
Json.serialize(Map(
"name" -> e.etype.id,
"cat" -> "",
"ph" -> "i",
"ts" -> (e.when.inMillis * 1000).toString,
"pid" -> e.getTraceId.getOrElse(0),
"tid" -> e.getSpanId.getOrElse(0),
"args" -> Map(
Seq(
"longVal" -> e.getLong,
"objectVal" -> e.getObject.map(showObject),
"doubleVal" -> e.getDouble
).filterNot(_._2.isEmpty):_*
)
))
)
/**
* Serialize a sink into the [[http://goo.gl/iN9ozV Trace Event]] format.
*/
def serialize(sink: Sink): Reader = {
val delim = nl.concat(comma).concat(sp)
val events: Seq[Buf] = sink.events.toSeq.map(asTraceEvent)
// Note: we leave out the "]" from the JSON array since it's optional. See:
// http://goo.gl/iN9ozV#heading=h.f2f0yd51wi15.
if (events.isEmpty) Reader.fromBuf(leftBracket) else Reader.concat(
Reader.fromBuf(leftBracket.concat(events.head)) +::
AsyncStream.fromSeq(events.tail.map { buf =>
Reader.fromBuf(delim.concat(buf))
})
)
}
}
| BuoyantIO/twitter-server | src/main/scala/com/twitter/server/util/TraceEventSink.scala | Scala | apache-2.0 | 2,145 |
package org.rplsd.condalang.command
import com.mongodb.casbah.commons.MongoDBObject
import org.rplsd.condalang.data.{BahanBaku, Kuantitas, Recipe}
import org.rplsd.condalang.util.DBConnection
import com.mongodb.casbah.Imports._
/**
* Created by Luqman on 11/27/2015.
*/
object read {
def recipe(s:String)(implicit dBConn: DBConnection): Option[Recipe] = {
val check1 = MongoDBObject(Recipe.nama_resep -> s)
val result = dBConn.recipeColl.findOne(check1)
if (result.isEmpty) {
println("Resep tidak ada di database")
None
}
else {
val obj = result.get
val bahan = obj.as[MongoDBObject](Recipe.bahan)
val keys = bahan.keys
val bahans = keys.map (key => key -> (bahan.as[BasicDBList](key))).toMap
.mapValues(l => (l.as[Double](0),l.as[String](1))).mapValues(t => Kuantitas(t._1,t._2))
val r = Recipe(obj.as[String](Recipe.nama_resep),
Some(bahans),
Some(obj.as[Double](Recipe.harga)))
Some(r)
}
}
def bahan_baku (b:String)(implicit dBConn: DBConnection): Option[BahanBaku] = {
val query = MongoDBObject(BahanBaku.nama -> b)
val result = dBConn.bahanBakuColl.findOne(query)
result.map( obj => {
val basicList = obj.as[BasicDBList](BahanBaku.kuantitas)
val kuantitas = Kuantitas(basicList.as[Double](0),basicList.as[String](1))
BahanBaku(b,kuantitas)
})
}
def all_bahan_baku (implicit dBConn: DBConnection): List[BahanBaku] = {
dBConn.bahanBakuColl.find().toIterator.map( obj => {
val basicList = obj.as[BasicDBList](BahanBaku.kuantitas)
val kuantitas = Kuantitas(basicList.as[Double](0),basicList.as[String](1))
BahanBaku(obj.as[String](BahanBaku.nama),kuantitas)
}).toList
}
def all_recipe (implicit dBConn: DBConnection): List[Recipe] = {
dBConn.recipeColl.find().toIterator.map ( obj => {
val bahan = obj.as[MongoDBObject](Recipe.bahan)
val keys = bahan.keys
val bahans = keys.map(key => key -> (bahan.as[BasicDBList](key))).toMap
.mapValues(l => (l.as[Double](0), l.as[String](1))).mapValues(t => Kuantitas(t._1, t._2))
val r = Recipe(obj.as[String](Recipe.nama_resep),
Some(bahans),
Some(obj.as[Double](Recipe.harga)))
r
}).toList
}
}
| luqmankusnadi/Tugas-RPLSD-DSL | src/main/scala/org/rplsd/condalang/command/read.scala | Scala | mit | 2,282 |
package actors.routing.minutes
import actors.routing.minutes.MinutesActorLike.MinutesLookup
import drt.shared.CrunchApi.{CrunchMinute, MillisSinceEpoch, MinutesContainer, StaffMinute}
import drt.shared.Terminals.Terminal
import drt.shared._
import drt.shared.dates.UtcDate
import services.SDate
import scala.concurrent.{ExecutionContextExecutor, Future}
object MockMinutesLookup {
implicit val ec: ExecutionContextExecutor = scala.concurrent.ExecutionContext.global
def cmLookup(mockData: MinutesContainer[CrunchMinute, TQM]): MinutesLookup[CrunchMinute, TQM] = {
val byDay = mockData.minutes.groupBy(m => SDate(m.minute).toUtcDate)
(terminalDate: (Terminal, UtcDate), _: Option[MillisSinceEpoch]) => {
val (_, date) = terminalDate
Future {
byDay.get(date).map(MinutesContainer[CrunchMinute, TQM])
}
}
}
def smLookup(mockData: MinutesContainer[StaffMinute, TM]): MinutesLookup[StaffMinute, TM] = {
val byDay = mockData.minutes.groupBy(m => SDate(m.minute).toUtcDate)
(terminalDate: (Terminal, UtcDate), _: Option[MillisSinceEpoch]) => {
val (_, date) = terminalDate
Future {
byDay.get(date).map(MinutesContainer[StaffMinute, TM])
}
}
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/actors/routing/minutes/MockMinutesLookup.scala | Scala | apache-2.0 | 1,231 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert.cluster
sealed trait ClusterEvent
object ClusterEvents {
/**
* <code>ClusterEvent</code> which indicates that you are now connected to the cluster.
*
* @param nodes the current list of <code>Node</code>s stored in the cluster metadata
* @param router a <code>Router</code> which is valid for the current state of the cluster
*/
case class Connected(nodes: Set[Node]) extends ClusterEvent
/**
* <code>ClusterEvent</code> which indicates that the cluster topology has changed.
*
* @param nodes the current list of <code>Node</code>s stored in the cluster metadata
* @param router a <code>Router</code> which is valid for the current state of the cluster
*/
case class NodesChanged(nodes: Set[Node]) extends ClusterEvent
/**
* <code>ClusterEvent</code> which indicates that the cluster is now disconnected.
*/
case object Disconnected extends ClusterEvent
/**
* <code>ClusterEvent</code> which indicates that the cluster is now shutdown.
*/
case object Shutdown extends ClusterEvent
}
/**
* A trait to be implemented by classes which wish to receive cluster events. Register <code>ClusterListener</code>s
* with <code>ClusterClient#addListener(listener)</code>.
*/
trait ClusterListener {
/**
* Handle a cluster event.
*
* @param event the <code>ClusterEvent</code> to handle
*/
def handleClusterEvent(event: ClusterEvent): Unit
}
case class ClusterListenerKey(id: Long)
| rhavyn/norbert | cluster/src/main/scala/com/linkedin/norbert/cluster/ClusterEvent.scala | Scala | apache-2.0 | 2,080 |
package cluster
import java.io.FileWriter
import org.apache.spark.ml.clustering.LDA
import org.apache.spark.ml.feature._
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
object LDANewsClusteringCoreNLP {
val spark = SparkSession.builder.appName("Simple Application").config("spark.master", "local[*]").getOrCreate()
import spark.implicits._
def getDF() = {
val df = spark.read.json("data/cleaned.news.json.gz").select("canonical_link", "text").distinct().cache
print(s"total number of records: ${df.count()}")
df
}
def main(args: Array[String]): Unit = {
val df = getDF()
val (rescaledDF: DataFrame, vocabulary) = preprocess(df)
val writer = new FileWriter("vocabulary.txt")
vocabulary.foreach(vocab => writer.write(s"$vocab\\n"))
rescaledDF.cache()
val maxIter = 200
for (i <- 7 to 10) {
val (topics, lp) = train(rescaledDF, i, maxIter, vocabulary)
topics.repartition(1).write.json(s"topics$i.$lp.json")
showTopics(topics, vocabulary)
}
}
val nDropMostCommon = 1000
val maxTermsPerTopic = 100
val dropRightPercentage = 0.5
val udfSeqSize = udf[Int, Seq[String]](_.size)
def format5Digit(seq: Seq[Double]): Seq[String] = {
seq.map("%.5f".format(_))
}
val udfFormatVector5Digit = udf[Seq[String], Vector](vec => format5Digit(vec.toArray))
val udfFormatSeq5Digit = udf[Seq[String], Seq[Double]](format5Digit(_))
private def train(rescaledData: DataFrame, k: Int, iter: Int, vocabulary: Array[String]): (DataFrame, Double) = {
val start = System.currentTimeMillis()
val lda = new LDA().setK(k).setMaxIter(iter)
val model = lda.fit(rescaledData)
val ll = model.logLikelihood(rescaledData)
val lp = model.logPerplexity(rescaledData)
println(s"The lower bound on the log likelihood of the entire corpus: $ll")
println(s"The upper bound on perplexity: $lp")
// Describe topics.
val topics = model.describeTopics(maxTermsPerTopic)
println("The topics described by their top-weighted terms:")
val udfTermIndex2Term = udf[Seq[String], Seq[Int]](seq => seq.map(id => vocabulary(id)))
topics.select(
$"topic",
$"termIndices",
udfTermIndex2Term($"termIndices").as("term"),
udfFormatSeq5Digit($"termWeights").as("termWeights")
).show(false)
// Shows the result.
val transformed = model.transform(rescaledData)
transformed.printSchema()
transformed.select(
col("canonical_link"),
udfFormatVector5Digit(col("topicDistribution")).as("topicDistribution")
).show(false)
val elapsed = (System.currentTimeMillis() - start) / 1000.0
println(s"totally spend $elapsed second")
(topics, lp)
}
def showTopics(topics: DataFrame, vocabArray: Array[String]): Unit = {
topics.collect().foreach {
case Row(topic, termIndices: Seq[Int], termWeights: Seq[Double]) =>
println(s"TOPIC $topic ${termIndices.size} ${termWeights.size}")
termIndices.zipWithIndex.foreach { case (term, index) =>
val weight = "%.5f".format(termWeights(index))
val vocab = vocabArray(term)
println(s"$weight\\t$vocab\\t$term")
}
println()
}
}
private def preprocess(df: DataFrame) = {
//
// tokenizer
//
val coreNLP = new CoreNLP().setInputCol("text").setOutputCol("words")
val tokenized = coreNLP.transform(df).filter(size($"words") > 50)
//
// words removal
//
// remove english stop word and numbers
val numbers: Array[String] = (0 to 3000).map(_.toString).toArray
val a2z = ('a' to 'z').map(_.toString).toArray
val stopWords: Array[String] = StopWordsRemover.loadDefaultStopWords("english") ++ numbers ++ a2z
val remover = new StopWordsRemover().
setInputCol("words").
setOutputCol("removed").setStopWords(stopWords)
val removed = remover.transform(tokenized)
removed.show()
//
// vectorizer
//
// val minDF = 0.2 // collected term appear more than 20% of the documents of the corpus
// val minDF = 2 // collected term appear more than 2 documents of the corpus
// val minTF = 0.2 // term having more than 20% appearance in the documents
// val minTF = 2 // term having appear more than 2 in the documents
//
// val vocabSize = 2900000
val countVectorizer = new CountVectorizer().setInputCol("removed").
setOutputCol("features").setMinDF(3).setMinTF(2)
//
// fit to get the vocabulary
//
// the vocabulary are ordered by commonality
//
removed.cache()
val vectorModel = countVectorizer.fit(removed)
println("most common words: " + vectorModel.vocabulary.take(20).mkString(" "))
println(s"totally ${vectorModel.vocabulary.length} of words in the vocabulary")
//
// having a new vector model by dropping the some most common words
//
val dropRight = (vectorModel.vocabulary.size * dropRightPercentage).toInt
val vocabulary = vectorModel.vocabulary.drop(nDropMostCommon).dropRight(dropRight)
val newVectorModel = new CountVectorizerModel(vocabulary).
setInputCol("removed").setOutputCol("features")
//
// transform from words to vectors
//
val featurizedData = newVectorModel.transform(removed)
removed.unpersist()
val dfFeaturized: DataFrame = featurizedData.select("canonical_link", "words", "features")
(dfFeaturized, vocabulary)
}
}
| rockiey/explore-spark | src/main/scala/cluster/LDANewsClusteringCoreNLP.scala | Scala | mit | 5,537 |
package com.twitter.scalding.typed.functions
import com.twitter.algebird.{ Aggregator, Ring, Semigroup, Fold }
import java.util.Random
import java.io.Serializable
case class Constant[T](result: T) extends Function1[Any, T] {
def apply(a: Any) = result
}
case class ConstantKey[K, V](key: K) extends Function1[V, (K, V)] {
def apply(v: V) = (key, v)
}
case class DebugFn[A]() extends Function1[A, A] {
def apply(a: A) = {
println(a)
a
}
}
case class WithConstant[A, B](constant: B) extends Function1[A, (A, B)] {
def apply(a: A) = (a, constant)
}
case class MakeKey[K, V](fn: V => K) extends Function1[V, (K, V)] {
def apply(v: V) = (fn(v), v)
}
case class MapOptionToFlatMap[A, B](fn: A => Option[B]) extends Function1[A, List[B]] {
def apply(a: A) = fn(a) match {
case None => Nil
case Some(a) => a :: Nil
}
}
case class PartialFunctionToFilter[A, B](fn: PartialFunction[A, B]) extends Function1[A, Boolean] {
def apply(a: A) = fn.isDefinedAt(a)
}
case class MapValueStream[A, B](fn: Iterator[A] => Iterator[B]) extends Function2[Any, Iterator[A], Iterator[B]] {
def apply(k: Any, vs: Iterator[A]) = fn(vs)
}
case class Drop[A](count: Int) extends Function1[Iterator[A], Iterator[A]] {
def apply(as: Iterator[A]) = as.drop(count)
}
case class DropWhile[A](fn: A => Boolean) extends Function1[Iterator[A], Iterator[A]] {
def apply(as: Iterator[A]) = as.dropWhile(fn)
}
case class Take[A](count: Int) extends Function1[Iterator[A], Iterator[A]] {
def apply(as: Iterator[A]) = as.take(count)
}
case class TakeWhile[A](fn: A => Boolean) extends Function1[Iterator[A], Iterator[A]] {
def apply(as: Iterator[A]) = as.takeWhile(fn)
}
case class Identity[A, B](eqTypes: EqTypes[A, B]) extends Function1[A, B] {
def apply(a: A) = eqTypes(a)
}
object Identity extends Serializable {
def apply[A](): Identity[A, A] = Identity[A, A](EqTypes.reflexive[A])
}
case class Widen[A, B](subTypes: SubTypes[A, B]) extends Function1[A, B] {
def apply(a: A) = subTypes(a)
}
case class GetKey[K]() extends Function1[(K, Any), K] {
def apply(kv: (K, Any)) = kv._1
}
case class GetValue[V]() extends Function1[(Any, V), V] {
def apply(kv: (Any, V)) = kv._2
}
case class Swap[A, B]() extends Function1[(A, B), (B, A)] {
def apply(ab: (A, B)) = (ab._2, ab._1)
}
case class SumAll[T](sg: Semigroup[T]) extends Function1[TraversableOnce[T], Iterator[T]] {
def apply(ts: TraversableOnce[T]) = sg.sumOption(ts).iterator
}
case class Fill[A](size: Int) extends Function1[A, Iterator[A]] {
def apply(a: A) = Iterator.fill(size)(a)
}
case class AggPrepare[A, B, C](agg: Aggregator[A, B, C]) extends Function1[A, B] {
def apply(a: A) = agg.prepare(a)
}
case class AggPresent[A, B, C](agg: Aggregator[A, B, C]) extends Function1[B, C] {
def apply(a: B) = agg.present(a)
}
case class FoldLeftIterator[A, B](init: B, fold: (B, A) => B) extends Function1[Iterator[A], Iterator[B]] {
def apply(as: Iterator[A]) = Iterator.single(as.foldLeft(init)(fold))
}
case class ScanLeftIterator[A, B](init: B, fold: (B, A) => B) extends Function1[Iterator[A], Iterator[B]] {
def apply(as: Iterator[A]) = as.scanLeft(init)(fold)
}
case class FoldIterator[A, B](fold: Fold[A, B]) extends Function1[Iterator[A], Iterator[B]] {
def apply(as: Iterator[A]) = Iterator.single(fold.overTraversable(as))
}
case class FoldWithKeyIterator[K, A, B](foldfn: K => Fold[A, B]) extends Function2[K, Iterator[A], Iterator[B]] {
def apply(k: K, as: Iterator[A]) = Iterator.single(foldfn(k).overTraversable(as))
}
case class AsRight[A, B]() extends Function1[B, Either[A, B]] {
def apply(b: B) = Right(b)
}
case class AsLeft[A, B]() extends Function1[A, Either[A, B]] {
def apply(b: A) = Left(b)
}
case class TuplizeFunction[A, B, C](fn: (A, B) => C) extends Function1[(A, B), C] {
def apply(ab: (A, B)) = fn(ab._1, ab._2)
}
case class DropValue1[A, B, C]() extends Function1[(A, (B, C)), (A, C)] {
def apply(abc: (A, (B, C))) = (abc._1, abc._2._2)
}
case class RandomNextInt(seed: Long, modulus: Int) extends Function1[Any, Int] {
private[this] lazy val rng = new Random(seed)
def apply(a: Any) = {
val raw = rng.nextInt(modulus) + a.hashCode()
val mod = raw % modulus
if (mod >= 0) mod else mod + modulus
}
}
case class RandomFilter(seed: Long, fraction: Double) extends Function1[Any, Boolean] {
private[this] lazy val rng = new Random(seed)
def apply(a: Any) = rng.nextDouble < fraction
}
case class Count[T](fn: T => Boolean) extends Function1[T, Long] {
def apply(t: T) = if (fn(t)) 1L else 0L
}
case class SizeOfSet[T]() extends Function1[Set[T], Long] {
def apply(s: Set[T]) = s.size.toLong
}
case class HeadSemigroup[T]() extends Semigroup[T] {
def plus(a: T, b: T) = a
// Don't enumerate every item, just take the first
override def sumOption(to: TraversableOnce[T]): Option[T] =
if (to.isEmpty) None
else Some(to.toIterator.next)
}
case class SemigroupFromFn[T](fn: (T, T) => T) extends Semigroup[T] {
def plus(a: T, b: T) = fn(a, b)
}
case class SemigroupFromProduct[T](ring: Ring[T]) extends Semigroup[T] {
def plus(a: T, b: T) = ring.times(a, b)
}
/**
* This is a semigroup that throws IllegalArgumentException if
* there is more than one item. This is used to trigger optimizations
* where the user knows there is at most one value per key.
*/
case class RequireSingleSemigroup[T]() extends Semigroup[T] {
def plus(a: T, b: T) = throw new IllegalArgumentException(s"expected only one item, calling plus($a, $b)")
}
case class ConsList[T]() extends Function1[(T, List[T]), List[T]] {
def apply(results: (T, List[T])) = results._1 :: results._2
}
case class ReverseList[T]() extends Function1[List[T], List[T]] {
def apply(results: List[T]) = results.reverse
}
case class ToList[A]() extends Function1[Iterator[A], Iterator[List[A]]] {
def apply(as: Iterator[A]) =
// This should never really happen, but we are being defensive
if (as.isEmpty) Iterator.empty
else Iterator.single(as.toList)
}
case class ToSet[A]() extends Function1[A, Set[A]] {
// this allows us to access Set1 without boxing into varargs
private[this] val empty = Set.empty[A]
def apply(a: A) = empty + a
}
case class MaxOrd[A, B >: A](ord: Ordering[B]) extends Function2[A, A, A] {
def apply(a1: A, a2: A) =
if (ord.lt(a1, a2)) a2 else a1
}
case class MaxOrdBy[A, B](fn: A => B, ord: Ordering[B]) extends Function2[A, A, A] {
def apply(a1: A, a2: A) =
if (ord.lt(fn(a1), fn(a2))) a2 else a1
}
case class MinOrd[A, B >: A](ord: Ordering[B]) extends Function2[A, A, A] {
def apply(a1: A, a2: A) =
if (ord.lt(a1, a2)) a1 else a2
}
case class MinOrdBy[A, B](fn: A => B, ord: Ordering[B]) extends Function2[A, A, A] {
def apply(a1: A, a2: A) =
if (ord.lt(fn(a1), fn(a2))) a1 else a2
}
case class FilterKeysToFilter[K](fn: K => Boolean) extends Function1[(K, Any), Boolean] {
def apply(kv: (K, Any)) = fn(kv._1)
}
case class FlatMapValuesToFlatMap[K, A, B](fn: A => TraversableOnce[B]) extends Function1[(K, A), TraversableOnce[(K, B)]] {
def apply(ka: (K, A)) = {
val k = ka._1
fn(ka._2).map((k, _))
}
}
case class MergeFlatMaps[A, B](fns: Iterable[A => TraversableOnce[B]]) extends Function1[A, TraversableOnce[B]] {
def apply(a: A) = fns.iterator.flatMap { fn => fn(a) }
}
case class MapValuesToMap[K, A, B](fn: A => B) extends Function1[(K, A), (K, B)] {
def apply(ka: (K, A)) = (ka._1, fn(ka._2))
}
case class EmptyGuard[K, A, B](fn: (K, Iterator[A]) => Iterator[B]) extends Function2[K, Iterator[A], Iterator[B]] {
def apply(k: K, as: Iterator[A]) =
if (as.nonEmpty) fn(k, as) else Iterator.empty
}
case class FilterGroup[A, B](fn: ((A, B)) => Boolean) extends Function2[A, Iterator[B], Iterator[B]] {
def apply(a: A, bs: Iterator[B]) = bs.filter(fn(a, _))
}
case class MapGroupMapValues[A, B, C](fn: B => C) extends Function2[A, Iterator[B], Iterator[C]] {
def apply(a: A, bs: Iterator[B]) = bs.map(fn)
}
case class MapGroupFlatMapValues[A, B, C](fn: B => TraversableOnce[C]) extends Function2[A, Iterator[B], Iterator[C]] {
def apply(a: A, bs: Iterator[B]) = bs.flatMap(fn)
}
object FlatMapFunctions extends Serializable {
case class FromIdentity[A]() extends Function1[A, Iterator[A]] {
def apply(a: A) = Iterator.single(a)
}
case class FromFilter[A](fn: A => Boolean) extends Function1[A, Iterator[A]] {
def apply(a: A) = if (fn(a)) Iterator.single(a) else Iterator.empty
}
case class FromMap[A, B](fn: A => B) extends Function1[A, Iterator[B]] {
def apply(a: A) = Iterator.single(fn(a))
}
case class FromFilterCompose[A, B](fn: A => Boolean, next: A => TraversableOnce[B]) extends Function1[A, TraversableOnce[B]] {
def apply(a: A) = if (fn(a)) next(a) else Iterator.empty
}
case class FromMapCompose[A, B, C](fn: A => B, next: B => TraversableOnce[C]) extends Function1[A, TraversableOnce[C]] {
def apply(a: A) = next(fn(a))
}
case class FromFlatMapCompose[A, B, C](fn: A => TraversableOnce[B], next: B => TraversableOnce[C]) extends Function1[A, TraversableOnce[C]] {
def apply(a: A) = fn(a).flatMap(next)
}
}
object ComposedFunctions extends Serializable {
case class ComposedMapFn[A, B, C](fn0: A => B, fn1: B => C) extends Function1[A, C] {
def apply(a: A) = fn1(fn0(a))
}
case class ComposedFilterFn[-A](fn0: A => Boolean, fn1: A => Boolean) extends Function1[A, Boolean] {
def apply(a: A) = fn0(a) && fn1(a)
}
/**
* This is only called at the end of a task, so might as well make it stack safe since a little
* extra runtime cost won't matter
*/
case class ComposedOnComplete(fn0: () => Unit, fn1: () => Unit) extends Function0[Unit] {
def apply(): Unit = {
@annotation.tailrec
def loop(fn: () => Unit, stack: List[() => Unit]): Unit =
fn match {
case ComposedOnComplete(left, right) => loop(left, right :: stack)
case notComposed =>
notComposed()
stack match {
case h :: tail => loop(h, tail)
case Nil => ()
}
}
loop(fn0, List(fn1))
}
}
case class ComposedMapGroup[A, B, C, D](
f: (A, Iterator[B]) => Iterator[C],
g: (A, Iterator[C]) => Iterator[D]) extends Function2[A, Iterator[B], Iterator[D]] {
def apply(a: A, bs: Iterator[B]) = {
val cs = f(a, bs)
if (cs.nonEmpty) g(a, cs)
else Iterator.empty
}
}
}
| jzmq/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/functions/Functions.scala | Scala | apache-2.0 | 10,444 |
/*
* the monty hall problem is a simple game show based probability puzzle with
* a puzzling outcome :)
*
* Suppose you are on a gameshow and you are given the choice of 3 doors. Behind
* one of these doors is the price and behind the others a goat. Only the
* moderator knows behind which door the price is and will open one door with a
* goat after you did your first choice. Next you can choose if you want to
* switch doors or not.
*
* Question:
* What is the best strategie? Stay or switch?
* What are the probabilities of winning for each of these strategies?
*
*/
object MontyHall {
import probability.probdsl._
// first we want to encode our state.
//
// these are the doors one can choose from:
sealed abstract class Door
object A extends Door { override def toString = "A" }
object B extends Door { override def toString = "B" }
object C extends Door { override def toString = "C" }
val doors = List(A,B,C)
// the state class used to track the experiments' state
final case class State(prize:Door, // door the price is behind
chosen:Door, // door currently chosen by player
open:Door // door opened by host
)
// just for the encoding
sealed abstract class Winning
object Looser extends Winning { override def toString = "Looser" }
object Winner extends Winning { override def toString = "Winner" }
// and a testing function on state to find out if we win or loose
def testWinner(s:State) = if(s.prize == s.chosen) Winner else Looser
/*
* Let us encode the problem with random variables:
*
* P = doors : door prize was put behind
* C1 = doors : the door choosen in the first round by player
* O = doors : the door opened by show's host
*/
/*
* P(P = A) = 1/3
* P(P = B) = 1/3
* P(P = C) = 1/3
*/
def hide = uniform(doors)
/*
* and then let the player choose one door:
* P(C1 = A) = 1/3
* P(C1 = B) = 1/3
* P(C2 = C) = 1/3
*/
def choose = uniform(doors)
/*
* and compute probability distribution of host opening a specific door
* given the event P and C1:
* P(O|C1,P)
* with O != C1 and O != P
*/
def open(hidden:Door, chosen:Door) =
uniform( doors.filter { x => x != hidden && x != chosen } )
// play the first round (until game host will open a door)
def firstRound = {
val p = hide
val c = choose
State(p, c, open(p,c))
}
// finally implement strategie 'stay'
def stay(s:State) = s
/*
* and strategy 'switch' where we choose a door C2 with
* C2 != O and C2 != C1.
* find P(C2|O, C1, P)
*/
def switchDoor(s:State) =
uniform( doors.filter{ x => x != s.open && x != s.chosen}.map { door =>
State(s.prize, door, s.open)
})
def main(args:Array[String]) = run
// print some results
def run = {
println("stay:\\n" + normalizedProb { testWinner(stay(firstRound)) } + "\\n")
println("switch:\\n" + normalizedProb { testWinner(switchDoor(firstRound)) })
}
}
| urso/scala_prob | examples/MontyHall.scala | Scala | bsd-3-clause | 3,055 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Ryan C. Brozo
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.ryanbrozo.akka.http.hawk.common
object Imports extends Common with Credentials
| ryanbrozo/akka-http-hawk | lib/src/test/scala/com/ryanbrozo/akka/http/hawk/common/Imports.scala | Scala | mit | 1,239 |
package nabab
object Moralization {
def moralize(graph: Graph): UnorientedGraph = {
import graph.factory
val edges = for {
node <- graph.nodes.toIterator;
parents = graph.origins(node).toList;
parentCount = parents.size;
iParent1 <- 0 until parentCount - 1;
parent1 = parents(iParent1);
iParent2 <- iParent1 + 1 until parentCount;
parent2 = parents(iParent2)
} yield {
EdgeDefinition(parent1, parent2)
}
DefaultUnorientedGraph.disorient(graph.add(newEdges = edges.toSeq))
}
}
| ochafik/nabab | ScalaPort/src/main/scala/Moralization.scala | Scala | mit | 554 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.core.policy.status
import java.util.concurrent.TimeUnit
import akka.actor.{Actor, _}
import akka.event.slf4j.SLF4JLogging
import akka.pattern.gracefulStop
import akka.util.Timeout
import com.stratio.sparta.serving.core.CuratorFactoryHolder
import com.stratio.sparta.serving.core.constants.AkkaConstant._
import com.stratio.sparta.serving.core.constants.{AkkaConstant, AppConstant}
import com.stratio.sparta.serving.core.exception.ServingCoreException
import com.stratio.sparta.serving.core.helpers.ResourceManagerLink
import com.stratio.sparta.serving.core.models._
import com.stratio.sparta.serving.core.policy.status.PolicyStatusActor._
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.cache.{NodeCache, NodeCacheListener}
import org.json4s.jackson.Serialization.{read, write}
import scala.collection.JavaConversions
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}
class PolicyStatusActor(curatorFramework: CuratorFramework)
extends Actor with SLF4JLogging with SpartaSerializer {
override def receive: Receive = {
case Create(policyStatus) => sender ! create(policyStatus)
case Update(policyStatus) => sender ! update(policyStatus)
case FindAll => findAll()
case DeleteAll => deleteAll()
case PolicyStatusActor.Kill(name) => sender ! kill(name)
case AddListener(name, callback) => addListener(name, callback)
case Delete(id) => sender ! delete(id)
}
def kill(policyName: String): Boolean = {
implicit val timeout: Timeout = Timeout(3L, TimeUnit.SECONDS)
val Stopped = true
val NotStopped = false
val pActor = context.actorSelection(cleanActorName(policyName)).resolveOne().value
pActor match {
case Some(Success(actor)) =>
val stopped = gracefulStop(actor, 2 seconds)
Await.result(stopped, 3 seconds) match {
case false =>
log.warn(s"Sending the Kill message to the actor with name: $policyName")
context.system.stop(actor)
case true =>
log.warn(s"Stopped correctly the actor with name: $policyName")
}
Stopped
case Some(Failure(e)) =>
log.warn(s"Failure getting policy actor with name: $policyName actor to kill." +
s" Exception: ${e.getLocalizedMessage}")
NotStopped
case None =>
log.warn(s"There is no policy actor with name: $policyName actor to kill")
NotStopped
}
}
def update(policyStatus: PolicyStatusModel): Option[PolicyStatusModel] = {
val statusPath = s"${AppConstant.ContextPath}/${policyStatus.id}"
//TODO check the correct statuses
if (Option(curatorFramework.checkExists.forPath(statusPath)).isDefined) {
val ips = read[PolicyStatusModel](new String(curatorFramework.getData.forPath(statusPath)))
log.info(s">> Updating context ${policyStatus.id} : <${ips.status}> to <${policyStatus.status}>")
curatorFramework.setData().forPath(statusPath, write(policyStatus).getBytes)
Some(policyStatus)
} else None
}
def create(policyStatus: PolicyStatusModel): Option[PolicyStatusModel] = {
val statusPath = s"${AppConstant.ContextPath}/${policyStatus.id}"
if (CuratorFactoryHolder.existsPath(statusPath)) {
val ips = read[PolicyStatusModel](new String(curatorFramework.getData.forPath(statusPath)))
log.info(s">> Updating context ${policyStatus.id} : <${ips.status}> to <${policyStatus.status}>")
curatorFramework.setData().forPath(statusPath, write(policyStatus).getBytes)
Some(policyStatus)
} else {
log.info(s">> Creating policy context |${policyStatus.id}| to <${policyStatus.status}>")
validate(None, policyStatus.status)
curatorFramework.create.creatingParentsIfNeeded.forPath(statusPath, write(policyStatus).getBytes)
Some(policyStatus)
}
}
def setNotStartedStatus(policyStatus: PolicyStatusModel): Option[PolicyStatusModel] = {
val statusPath = s"${AppConstant.ContextPath}/${policyStatus.id}"
log.info(s">> Creating policy context |${policyStatus.id}| to <${policyStatus.status}>")
validate(None, policyStatus.status)
curatorFramework.create.creatingParentsIfNeeded.forPath(statusPath, write(policyStatus).getBytes)
Some(policyStatus)
}
def findAll(): Unit = {
sender ! Response(
Try {
val contextPath = s"${AppConstant.ContextPath}"
if (CuratorFactoryHolder.existsPath(contextPath)) {
val children = curatorFramework.getChildren.forPath(contextPath)
val policiesStatus = JavaConversions.asScalaBuffer(children).toList.map(element =>
read[PolicyStatusModel](new String(
curatorFramework.getData.forPath(s"${AppConstant.ContextPath}/$element")
))
)
PoliciesStatusModel(policiesStatus, ResourceManagerLink.getLink)
} else PoliciesStatusModel(Seq(), ResourceManagerLink.getLink)
}
)
}
def deleteAll(): Unit = {
sender ! ResponseDelete(Try({
val contextPath = s"${AppConstant.ContextPath}"
if (CuratorFactoryHolder.existsPath(contextPath)) {
val children = curatorFramework.getChildren.forPath(contextPath)
val policiesStatus = JavaConversions.asScalaBuffer(children).toList.map(element =>
read[PolicyStatusModel](new String(curatorFramework.getData.forPath(s"${AppConstant.ContextPath}/$element")))
)
policiesStatus.foreach(policyStatus => {
val statusPath = s"${AppConstant.ContextPath}/${policyStatus.id}"
if (Option(curatorFramework.checkExists.forPath(statusPath)).isDefined) {
log.info(s">> Deleting context ${policyStatus.id} >")
curatorFramework.delete().forPath(statusPath)
} else throw new ServingCoreException(ErrorModel.toString(
new ErrorModel(ErrorModel.CodeNotExistsPolicyWithId, s"No policy context with id ${policyStatus.id}.")))
})
}
}))
}
def delete(id: String): ResponseDelete =
ResponseDelete(
Try {
val statusPath = s"${AppConstant.ContextPath}/$id"
if (Option(curatorFramework.checkExists.forPath(statusPath)).isDefined) {
log.info(s">> Deleting context $id >")
curatorFramework.delete().forPath(statusPath)
} else throw new ServingCoreException(ErrorModel.toString(
new ErrorModel(ErrorModel.CodeNotExistsPolicyWithId, s"No policy context with id $id.")))
}
)
/**
* Adds a listener to one policy and executes the callback when it changed.
*
* @param id of the policy.
* @param callback with a function that will be executed.
*/
def addListener(id: String, callback: (PolicyStatusModel, NodeCache) => Unit): Unit = {
val contextPath = s"${AppConstant.ContextPath}/$id"
val nodeCache: NodeCache = new NodeCache(curatorFramework, contextPath)
nodeCache.getListenable.addListener(new NodeCacheListener {
override def nodeChanged(): Unit = {
Try(new String(nodeCache.getCurrentData.getData)) match {
case Success(value) =>
callback(read[PolicyStatusModel](value), nodeCache)
case Failure(e) =>
log.error(s"NodeCache value: ${nodeCache.getCurrentData}", e)
}
}
})
nodeCache.start()
}
}
object PolicyStatusActor {
case class Kill(name: String)
case class Update(policyStatus: PolicyStatusModel)
case class Create(policyStatus: PolicyStatusModel)
case class AddListener(name: String, callback: (PolicyStatusModel, NodeCache) => Unit)
case class Delete(id: String)
case object DeleteAll
case object FindAll
case class Response(policyStatus: Try[PoliciesStatusModel])
case class ResponseDelete(value: Try[Unit])
/**
* This map represents the state machine of one context.
*/
val StateMachine = Map(
None -> Seq(PolicyStatusEnum.NotStarted),
Some(PolicyStatusEnum.NotStarted) -> Seq(PolicyStatusEnum.Launched, PolicyStatusEnum.Failed),
Some(PolicyStatusEnum.Launched) -> Seq(PolicyStatusEnum.Starting, PolicyStatusEnum.Failed),
Some(PolicyStatusEnum.Starting) -> Seq(PolicyStatusEnum.Started, PolicyStatusEnum.Failed),
Some(PolicyStatusEnum.Started) -> Seq(PolicyStatusEnum.Stopping, PolicyStatusEnum.Failed),
Some(PolicyStatusEnum.Stopping) -> Seq(PolicyStatusEnum.Stopped, PolicyStatusEnum.Failed),
Some(PolicyStatusEnum.Stopped) -> Seq(PolicyStatusEnum.Launched, PolicyStatusEnum.Failed),
Some(PolicyStatusEnum.Failed) -> Seq(PolicyStatusEnum.Launched)
)
/**
* Validates with the StateMachine if one status could be changed to another.
*
* @param initialStatus that contains the currently status.
* @param finalStatus to change. If not one exception will be thrown.
*/
def validate(initialStatus: Option[PolicyStatusEnum.Value], finalStatus: PolicyStatusEnum.Value): Unit = {
if (!StateMachine.exists(_._1 == initialStatus))
throw new IllegalStateException(s"The status ${initialStatus.get} is not in the StateMachine")
if (!StateMachine.get(initialStatus).get.contains(finalStatus))
throw new IllegalStateException(s"Imposible change status from $initialStatus to $finalStatus")
}
}
/**
* Possible states that a policy could be when it was run.
*
* Launched: Sparta performs a spark-submit to the cluster.
* Starting: SpartaJob tries to start the job.
* Started: if the job was successfully started and the receiver is running.
* Failed: if the lifecycle fails.
* Stopping: Sparta sends a stop signal to the job to stop it gracefully.
* Stopped: the job is stopped.
*/
object PolicyStatusEnum extends Enumeration {
type status = Value
val Launched = Value("Launched")
val Starting = Value("Starting")
val Started = Value("Started")
val Failed = Value("Failed")
val Stopping = Value("Stopping")
val Stopped = Value("Stopped")
val NotStarted = Value("NotStarted")
}
| danielcsant/sparta | serving-core/src/main/scala/com/stratio/sparta/serving/core/policy/status/PolicyStatusActor.scala | Scala | apache-2.0 | 10,633 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component
import akka.actor.{Actor, ActorRef}
import akka.pattern.ask
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import scala.concurrent.duration._
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
/**
* This is a helper class that enables developers who use to trait to interact with the ComponentManager
* easily on the harness
*/
trait ComponentHelper {
this: Actor =>
import context.dispatcher
var componentManagerInitialized = false
var componentManager:Option[ActorRef] = None
/**
* This should only be called once to initialize the component manager actor. Will retry when called
* as many methods require this to work.
*/
def initComponentHelper : Future[ActorRef] = {
val p = Promise[ActorRef]()
def awaitComponentManager(timeOut: Deadline) {
if (timeOut.isOverdue() && !componentManagerInitialized) {
componentManagerInitialized = true
p failure ComponentException("Component Manager", "Failed to get component manager")
} else if (context != null) {
context.actorSelection(HarnessConstants.ComponentFullName).resolveOne()(1 second) onComplete {
case Success(s) =>
componentManager = Some(s)
componentManagerInitialized = true
p success s
case Failure(_) => awaitComponentManager(timeOut)
}
} else p failure ComponentException("Component Manager", "Context set to null, must have shut down")
}
componentManager match {
case Some(cm) => p success cm
case None =>
if (!componentManagerInitialized) {
val deadline = 5 seconds fromNow
awaitComponentManager(deadline)
} else {
p failure ComponentException("Component Manager", "Component manager did not initialize")
}
}
p.future
}
/**
* Wrapper function around request that allows the developer to not have to deal with the
* ComponentResponse return object, and just deal with the message that they care about
*
* @param name name of the component
* @param msg msg to send to the component
* @return
*/
def unwrapRequest[T, U](name:String, msg:ComponentRequest[T]) : Future[U] = {
val p = Promise[U]()
componentRequest(name, msg).mapTo[ComponentResponse[U]] onComplete {
case Success(s) => p success s.resp
case Failure(f) => p failure f
}
p.future
}
def request[T](name:String, msg:Any, childName:Option[String]=None) : Future[ComponentResponse[T]] =
componentRequest(name, ComponentRequest(msg, childName))
/**
* Simplest way to make a request directly to a component, will return a Future holding whatever the component returns
* @param name Name of the component
* @param msg Message to send it
*/
def unwrapSelfRequest[T](name:String, msg:AnyRef) : Future[T] = {
unwrapRequest[msg.type, T](name, ComponentRequest[msg.type](msg, Some(ComponentManager.ComponentRef)))
}
/**
* Wrapper function that allows developer to make requests to components individually without having to know about the
* ComponentManager as the parent that routes the messages to the various components
*
* @param name name of the component
* @param msg message you want to send to the component
* @return
*/
def componentRequest[T, U](name:String, msg:ComponentRequest[T]) : Future[ComponentResponse[U]] = {
val p = Promise[ComponentResponse[U]]()
initComponentHelper onComplete {
case Success(cm) =>
(cm ? Request(name, msg))(msg.timeout).mapTo[ComponentResponse[U]] onComplete {
case Success(s) => p success s
case Failure(f) => p failure f
}
case Failure(f) => p failure f
}
p.future
}
def selfMessage(name:String, msg:Any) =
componentMessage(name, ComponentMessage(msg, Some(ComponentManager.ComponentRef)))
/**
* Wrapper function that will allow you to send any message in and it will
* wrap the msg within a ComponentMessage case class
*
* @param name name of component
* @param msg message to send
* @param childName name of component's child, or 'self' if one wants to hit the component itself
*/
def message(name:String, msg:Any, childName:Option[String]=None) =
componentMessage(name, ComponentMessage(msg, childName))
/**
* Wrapper function that allows the developer to message components individually without having to know about the
* ComponentManager as the parent that routes the messages to the various components
*
* @param name name of the component
* @param msg message you want to send to the component
*/
def componentMessage[T](name:String, msg:ComponentMessage[T]) = {
initComponentHelper onComplete {
case Success(cm) =>
cm ! Message(name, msg)
case Failure(f) => throw f
}
}
/**
* Wrapper function that allows developers to get the actor reference for a particular component
*
* @param name the name of the component
* @param timeout implicit timeout value
* @return
*/
def getComponent(name:String)(implicit timeout:Timeout) : Future[ActorRef] = {
val p = Promise[ActorRef]()
initComponentHelper onComplete {
case Success(cm) =>
(cm ? GetComponent(name))(timeout).mapTo[Option[ActorRef]] onComplete {
case Success(s) =>
s match {
case Some(ref) => p success ref
case None => p failure ComponentNotFoundException("Component Manager", s"component $name not found")
}
case Failure(f) => p failure f
}
case Failure(f) => p failure f
}
p.future
}
}
| Crashfreak/wookiee | wookiee-core/src/main/scala/com/webtrends/harness/component/ComponentHelper.scala | Scala | apache-2.0 | 6,483 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples.ml
// scalastyle:off println
// $example on$
import org.apache.spark.ml.clustering.GaussianMixture
// $example off$
import org.apache.spark.sql.SparkSession
/**
* An example demonstrating Gaussian Mixture Model (GMM).
* Run with
* {{{
* bin/run-example ml.GaussianMixtureExample
* }}}
*/
object GaussianMixtureExample {
def main(args: Array[String]): Unit = {
// Creates a SparkSession
val spark = SparkSession.builder.appName(s"${this.getClass.getSimpleName}").getOrCreate()
// $example on$
// Loads data
val dataset = spark.read.format("libsvm").load("data/mllib/sample_kmeans_data.txt")
// Trains Gaussian Mixture Model
val gmm = new GaussianMixture()
.setK(2)
val model = gmm.fit(dataset)
// output parameters of mixture model model
for (i <- 0 until model.getK) {
println("weight=%f\\nmu=%s\\nsigma=\\n%s\\n" format
(model.weights(i), model.gaussians(i).mean, model.gaussians(i).cov))
}
// $example off$
spark.stop()
}
}
// scalastyle:on println
| mrchristine/spark-examples-dbc | src/main/scala/org/apache/spark/examples/ml/GaussianMixtureExample.scala | Scala | apache-2.0 | 1,872 |
package org.gc.scala.learningscala.db.postgres.slick
import slick.driver.PostgresDriver.api._
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
object Sample {
class Users(tag: Tag) extends Table[(Int, String)](tag, "users") {
def id = column[Int]("id")
def username = column[String]("username")
def * = (id, username)
}
def main(args: Array[String]) {
println("Scala Slick with PostgreSQL")
val connectionUrl = "jdbc:postgresql://localhost/scala?user=postgres&password=gw"
val users = TableQuery[Users]
println(users.baseTableRow.username.toString())
// val db = Database.forConfig("postgresDB")
val db = Database.forURL(connectionUrl)
db.run(users.result).foreach(x => println(x.toString))
db.run(users.result).map(_.foreach {
case (id, username) => println(s"${id}: ${username}")
case _ => println("Unknown")
})
}
}
| ganeshchand/learning-scala | src/main/scala-2.11/org/gc/scala/learningscala/db/postgres/slick/Sample.scala | Scala | mit | 1,018 |
/*
* Copyright (C) 2014 Pedro Vicente Gómez Sánchez.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.pedrovgs.prime
import org.scalatest.FunSuite
/**
*
* @author Pedro Vicente Gómez Sánchez
*/
class PrimeUtilSuite extends FunSuite {
test("2 is prime") {
assert(PrimeUtil.isPrime(2))
}
test("4 is not prime") {
assert(!PrimeUtil.isPrime(4))
}
test("7 is prime") {
assert(PrimeUtil.isPrime(7))
}
test("10 is not prime") {
assert(!PrimeUtil.isPrime(10))
}
test("7919 is prime") {
assert(PrimeUtil.isPrime(7919))
}
test("10472 is not prime") {
assert(!PrimeUtil.isPrime(10472))
}
test("104729 is prime") {
assert(PrimeUtil.isPrime(104729))
}
}
| pedrovgs/ProjectEuler | src/test/scala/com/github/pedrovgs/prime/PrimeUtilSuite.scala | Scala | apache-2.0 | 1,244 |
package org.scalajs.openui5.sap.ui.core
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
@JSName("sap.ui.core.HorizontalAlign")
@js.native
object HorizontalAlign extends js.Object {
// Must use String for now due to
// https://issues.scala-lang.org/browse/SI-9668
val Begin: String /*HorizontalAlign*/ = js.native
val Center: String /*HorizontalAlign*/ = js.native
val End: String /*HorizontalAlign*/ = js.native
val Left: String /*HorizontalAlign*/ = js.native
val Right: String /*HorizontalAlign*/ = js.native
}
| lastsys/scalajs-openui5 | src/main/scala/org/scalajs/openui5/sap/ui/core/HorizontalAlign.scala | Scala | mit | 548 |
package net.tomasherman.specus.server.api.net
import net.tomasherman.specus.common.api.net.Packet
/**
* This file is part of Specus.
*
* Specus is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Specus is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* You should have received a copy of the GNU General Public License
* along with Specus. If not, see <http://www.gnu.org/licenses/>.
*
*/
/** Abstract trait codec needs to implement. There is one codec per packet type. *Codec must be stateless*!
* @param packetId Id of the packet. First byte in every packet.
* @param packetClass Class of the packet returned by the codec. */
trait CodecRepository {
/** Tries to find a suitable codec for packetId.
* @param packetid Byte for which a suitable codec should be found.
* @return Either None if no codec is found or Option wrapped codec. */
def lookupCodec(packetId: Byte): Option[Codec[_ <: Packet]]
/** Tries to find a suitable codec for packet class.
* @param packetid Byte for which a suitable codec should be found.
* @return Either None if no codec is found or Option wrapped codec. */
def lookupCodec(p: Packet): Option[Codec[_ <: Packet]]
/** Registers codec for lookup. Note that registration fails should any other codec be registered with same packet class or packet id as registering codec.
* @param codecClass Class of the codec to be registered. New instance is created automatically.
* @returns True if the codec was registered correctly. False if registration failed */
def registerCodec(codecClass: Class[_ <: Codec[_ <: Packet]]): Boolean
} | tomasherman/specus | server_api/src/main/scala/net/CodecRepository.scala | Scala | gpl-3.0 | 1,988 |
package s99.p07
object P07 {
/*
* Type [T] also works instead of [Any]. However, it's not bounded so pretty much is Any anyway :).
*/
def flatten(input: List[Any]): List[Any] =
input match {
case List() => List()
case head :: tail =>
head match {
case list: List[_] => flatten(list) ::: flatten(tail)
case elem => elem :: flatten(tail)
}
}
/* This does not work because of static type matching. Maybe there are some clever ways to make it work.
* It compiles, but always calls combine(Any, ...) because head appears as element of type Any.
def combine(input: Any, remainder: List[Any]): List[Any] = input :: remainder
def combine(input: List[Any], remainder: List[Any]): List[Any] = flattenPolymorphic(input) ::: remainder
def flattenPolymorphic(input: List[Any]): List[Any] =
input match {
case List() => List()
case head :: tail => combine(head, flatten(tail))
}
*/
} | izmailoff/scala-s-99 | src/main/scala/s99/p07/P07.scala | Scala | apache-2.0 | 974 |
// See comment in BCodeBodyBuilder
// -target:jvm-1.6 -Ybackend:GenBCode -Yopt:l:none
// target enables stack map frame generation
class C {
// can't just emit a call to ???, that returns value of type Nothing$ (not Int).
def f1: Int = ???
def f2: Int = throw new Error("")
def f3(x: Boolean) = {
var y = 0
// cannot assign an object of type Nothing$ to Int
if (x) y = ???
else y = 1
y
}
def f4(x: Boolean) = {
var y = 0
// tests that whatever is emitted after the throw is valid (what? depends on opts, presence of stack map frames)
if (x) y = throw new Error("")
else y = 1
y
}
def f5(x: Boolean) = {
// stack heights need to be the same. ??? looks to the jvm like returning a value of
// type Nothing$, need to drop or throw it.
println(
if (x) { ???; 10 }
else 20
)
}
def f6(x: Boolean) = {
println(
if (x) { throw new Error(""); 10 }
else 20
)
}
def f7(x: Boolean) = {
println(
if (x) throw new Error("")
else 20
)
}
def f8(x: Boolean) = {
println(
if (x) throw new Error("")
else 20
)
}
}
object Test extends App {
new C()
}
| lampepfl/dotty | tests/run/nothingTypeNoOpt.scala | Scala | apache-2.0 | 1,205 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.