code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2015 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend
import org.apache.camel.CamelContext
import org.apache.camel.builder.RouteBuilder
import org.apache.camel.test.junit4.CamelTestSupport
import org.scalatest.BeforeAndAfterAll
import com.github.dnvriend.playround.followup.FollowUpRoutes.PaymentRejection
import com.github.dnvriend.playround.followup.FollowUpRoutes.PaymentRejectionWithHistory
class FollowUpTest extends CamelTestSupport with TestSpec with BeforeAndAfterAll {
override def createCamelContext(): CamelContext = camelContext
override protected def beforeAll(): Unit = setUp() // required as CamelTestSupport uses junit before for initialization
override def createRouteBuilder(): RouteBuilder = new RouteBuilder {
def configure(): Unit = {
from("direct:retryCollection").to("mock:retryCollection")
from("direct:withdrawTicket").to("mock:withdrawTicket")
from("direct:notify").to("mock:notify")
from("direct:cancelSubscription").to("mock:cancelSubscription")
}
}
lazy val mockRetryCollection = getMockEndpoint("mock:retryCollection")
lazy val mockWithdrawTicket = getMockEndpoint("mock:withdrawTicket")
lazy val mockNotify = getMockEndpoint("mock:notify")
lazy val mockCancelSubscription = getMockEndpoint("mock:cancelSubscription")
"FollowUp" should "trigger actions in line with the classification of the payment rejection" in {
val deployExecute = deploy("followUpExecute.bpmn20.xml")
val deployTrigger = deploy("followUpTrigger.bpmn20.xml")
deployExecute should be a 'success
deployTrigger should be a 'success
val rejection = PaymentRejection(0, 0, "P201601", "ABC", "DEF")
val withHistory = PaymentRejectionWithHistory(rejection, List.empty)
val msgCount = 1000
mockRetryCollection.expectedMessageCount(0)
mockWithdrawTicket.expectedMessageCount(msgCount)
mockNotify.expectedMessageCount(0)
mockCancelSubscription.expectedMessageCount(msgCount)
1 to msgCount foreach { _ ⇒
producerTemplate.sendBody("direct:rejectPayment", withHistory)
}
assertMockEndpointsSatisfied()
}
}
| dnvriend/activiti-test | helloworld/src/test/scala/com/github/dnvriend/FollowUpTest.scala | Scala | apache-2.0 | 2,696 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.hmrcemailrenderer.templates.api
import junit.framework.TestCase
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.hmrcemailrenderer.templates.api
class ApiDeveloperPasswordResetSpec extends WordSpec with Matchers {
val resetPasswordLink = "http://reset.password.com"
val developerHubTitle = "Developer Hub Title"
val templateParams = Map("resetPasswordLink" -> resetPasswordLink,
"staticAssetUrlPrefix" -> "http://uri", "staticAssetVersion" -> "v1", "borderColour" -> "#005EA5")
"htmlView" should {
"render as" in new TestCase {
val renderedHtml = api.html.passwordResetEmail.render(templateParams)
renderedHtml.contentType should include("text/html")
renderedHtml.body should include("<p style=\"margin: 0 0 30px; font-size: 19px;\">" +
"Click on the link below to reset your password for the HMRC Developer Hub.</p>")
renderedHtml.body should include("<p style=\"margin: 0 0 30px; font-size: 19px;\">" +
"<a href=\"" + resetPasswordLink + "\" style=\"color: #005EA5;\">" +
resetPasswordLink + "</a> </p>")
renderedHtml.body should include("<p style=\"margin: 0 0 30px; font-size: 19px;\">From HMRC Developer Hub</p>")
}
"render with developerHubTitle" in new TestCase {
val templateParamsPlus = templateParams + ("developerHubTitle" -> developerHubTitle)
val renderedHtml = api.html.passwordResetEmail.render(templateParamsPlus)
renderedHtml.body should include("<p style=\"margin: 0 0 30px; font-size: 19px;\">" +
"Click on the link below to reset your password for the HMRC " + developerHubTitle + ".</p>")
renderedHtml.body should include("<p style=\"margin: 0 0 30px; font-size: 19px;\">From HMRC " + developerHubTitle + "</p>")
}
}
"textView" should {
"render as" in new TestCase {
val renderedTxt = api.txt.passwordResetEmail.render(templateParams)
renderedTxt.contentType should include("text/plain")
renderedTxt.body should include("Click on the link below to reset your password for the HMRC Developer Hub.")
renderedTxt.body should include(resetPasswordLink)
renderedTxt.body should include("From HMRC Developer Hub")
}
"render with developerHubTitle" in new TestCase {
val templateParamsPlus = templateParams + ("developerHubTitle" -> developerHubTitle)
val renderedTxt = api.txt.passwordResetEmail.render(templateParamsPlus)
renderedTxt.body should include("Click on the link below to reset your password for the HMRC " + developerHubTitle + ".")
renderedTxt.body should include("From HMRC " + developerHubTitle)
}
}
}
| saurabharora80/hmrc-email-renderer | test/uk/gov/hmrc/hmrcemailrenderer/templates/api/ApiDeveloperPasswordResetSpec.scala | Scala | apache-2.0 | 3,265 |
package com.arcusys.learn.liferay.update.version300
import java.sql.Connection
import com.arcusys.learn.liferay.update.version300.lesson.{LessonGradeTableComponent, LessonTableComponent}
import com.arcusys.valamis.lesson.model.LessonType
import com.arcusys.valamis.persistence.common.{SlickDBInfo, SlickProfile}
import com.escalatesoft.subcut.inject.NewBindingModule
import org.joda.time.DateTime
import org.scalatest.{BeforeAndAfter, FunSuite}
import slick.driver.H2Driver
import slick.driver.JdbcDriver
import slick.driver.JdbcProfile
import slick.jdbc._
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class UpdateLessonGradeTest extends FunSuite with BeforeAndAfter{
val driver = H2Driver
import driver.api._
val db = Database.forURL("jdbc:h2:mem:lessonGrades", driver = "org.h2.Driver")
var connection: Connection = _
val bindingModule = new NewBindingModule({ implicit module =>
module.bind[SlickDBInfo] toSingle new SlickDBInfo {
def databaseDef: JdbcBackend#DatabaseDef = db
def slickDriver: JdbcDriver = driver
def slickProfile: JdbcProfile = driver
}
})
before {
connection = db.source.createConnection()
Await.result(tables.createSchema, Duration.Inf)
}
after {
connection.close()
}
val tables = new LessonGradeTableComponent with SlickProfile with LessonTableComponent{
val driver: JdbcProfile = H2Driver
import driver.api._
def createSchema = db.run {
(lessons.schema ++ lessonGrades.schema).create
}
}
val updater = new DBUpdater3011(bindingModule)
test("update lesson grades") {
val lesson = tables.Lesson(1L,LessonType.Tincan,"title","description", None,123L,None, None, None, 2L, new DateTime, false, 0.7)
val lessonInsert = tables.lessons += lesson
val lessonGrades = tables.LessonGrade(1L,20197L,Some(40),new DateTime, None) ::
tables.LessonGrade(1L,20198L,None,new DateTime, None) ::
tables.LessonGrade(1L,20199L,Some(0),new DateTime, None) :: Nil
val lessonGradeInsert = tables.lessonGrades ++= lessonGrades
Await.result(db.run {lessonInsert >> lessonGradeInsert}, Duration.Inf)
updater.doUpgrade()
val data = Await.result(db.run {tables.lessonGrades.result}, Duration.Inf)
assert((data.filter(_.userId === 20197L).head.grade.get - 0.4).abs < 0.1)
assert(data.filter(_.userId == 20198L).head.grade.isEmpty)
assert((data.filter(_.userId == 20199L).head.grade.get - 0).abs < 0.1)
}
}
| igor-borisov/valamis | learn-portlet/src/test/scala/com/arcusys/learn/liferay/update/version300/UpdateLessonGradeTest.scala | Scala | gpl-3.0 | 2,490 |
import sbt._
import Keys._
import Build.data
object Marc4jBuild extends Build {
lazy val marc4jCore = Project(
id = "marc4j-core",
base = file("core"),
settings = commonSettings ++ Seq(
moduleName := "marc4j",
libraryDependencies <++= scalaVersion { sv => Seq(
"com.ibm.icu" % "icu4j" % "2.6.1",
"com.novocode" % "junit-interface" % "0.8" % "test"
)}
)
)
lazy val marc4jSamples = Project(
id = "marc4j-samples",
base = file("samples"),
dependencies = Seq(marc4jCore),
settings = commonSettings
)
def commonSettings = Defaults.defaultSettings ++ Seq(
organization := "org.marc4j",
version := "3.0.0-SNAPSHOT",
scalaVersion := "2.10.2",
scalaBinaryVersion := "2.10.2",
scalacOptions := Seq(
"-feature",
"-language:implicitConversions",
"-deprecation",
"-unchecked"
)
)
}
| travisbrown/marc4j-old | project/Build.scala | Scala | lgpl-2.1 | 899 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
package termination
import scala.collection.mutable.{Map => MutableMap, Set => MutableSet, ListBuffer}
import trees._
import inox._
/** A context-insensitive, field-sensitive control-flow analysis that computes
* the closures that are passed to call backs of given function.
*/
class CICFA(val program: Program { val trees: Trees }, val context: inox.Context) {
import context.{given, _}
import program._
import program.trees._
import program.symbols.{given, _}
import program.trees.exprOps._
sealed abstract class Function {
def body: Expr
override def equals(that: Any): Boolean = (this, that) match {
case (n1: NamedFunction, n2: NamedFunction) => n1.fd == n2.fd
case (l1: LambdaFunction, l2: LambdaFunction) => l1.lambda == l2.lambda
case _ => false
}
override def hashCode: Int = this match {
case n: NamedFunction => n.fd.hashCode
case l: LambdaFunction => l.lambda.hashCode
}
override def toString: String = this match {
case n: NamedFunction => n.fd.id.asString
case l: LambdaFunction => l.lambda.asString
}
}
implicit class NamedFunction(val fd: FunDef) extends Function {
def body: Expr = fd.fullBody
}
implicit class LambdaFunction(val lambda: Lambda) extends Function {
def body: Expr = lambda.body
}
// Abstract values and closures
sealed abstract class AbsValue
case class Closure(lam: Lambda) extends AbsValue
sealed abstract class AbsObj extends AbsValue {
val argvars: Seq[Variable]
}
// the argvars in the object are more like addresses and may not
// correspond to variables in the program (they are considered escaping)
case class ConsObject(adt: ADT, argvars: Seq[Variable]) extends AbsObj
case class TupleObject(tp: Tuple, argvars: Seq[Variable]) extends AbsObj
case object External extends AbsValue
// mapping from a set of live variables to their value
case class AbsEnv(store: Map[Variable, Set[AbsValue]]) {
// checks if this >= AbsElem
def greaterEquals(other: AbsEnv): Boolean = other.store.forall {
case (k, v) => store.contains(k) && other.store(k).subsetOf(store(k))
}
def join(other: AbsEnv): AbsEnv =
AbsEnv((store.keySet ++ other.store.keys).map { k =>
k -> (store.getOrElse(k, Set.empty) ++ other.store.getOrElse(k, Set.empty))
}.toMap)
// this is a disjoint union, where only the new keys that
// are found are added to the map (this likely to be efficient)
def ++(other: AbsEnv): AbsEnv = {
AbsEnv(store ++ (other.store.iterator.filter(p => !(store contains p._1))))
}
def +(entry: (Variable, Set[AbsValue])): AbsEnv = {
AbsEnv(store + entry)
}
override def toString = {
store.map { case (k, v) => s"$k-->$v" }.mkString("\n")
}
}
private val emptyEnv = AbsEnv(Map())
/** A helper function for combining multiple abstract values */
private def flatten(envs: Seq[AbsEnv]): AbsEnv = envs.foldLeft(emptyEnv)(_ join _)
case class Summary(in: AbsEnv, out: AbsEnv, ret: Set[AbsValue])
private val cache: MutableMap[Identifier, Analysis] = MutableMap.empty
def analyze(id: Identifier): Analysis =
cache.getOrElseUpdate(id, timers.termination.cfa.run {
new Analysis(id)
})
class Analysis(id: Identifier) {
val fd = getFunction(id)
private val seen: MutableSet[Function] = MutableSet.empty
// summary of each function
private val tabulation: MutableMap[Function, Summary] = MutableMap.empty
// initialize summaries to identity function from bot to empty
// for the current function, initialize it to External
private def getTabulation(fun: Function): Summary =
tabulation.getOrElseUpdate(
fun, {
Summary(
fun match {
case n: NamedFunction =>
if (id == n.fd.id)
AbsEnv(n.fd.fd.params.map(vd => vd.toVariable -> Set[AbsValue](External)).toMap)
else AbsEnv(n.fd.params.map(vd => vd.toVariable -> Set[AbsValue]()).toMap)
case l: LambdaFunction =>
AbsEnv(l.lambda.params.map(vd => vd.toVariable -> Set[AbsValue]()).toMap)
},
emptyEnv,
Set()
)
}
)
// a mapping from ADTs to argvars (used to represent arguments of each ADT creation by a fresh variable)
private val objectsMap: MutableMap[Expr, AbsObj] = MutableMap.empty
private def getOrCreateObject(objExpr: Expr): AbsObj =
objectsMap.getOrElseUpdate(objExpr, objExpr match {
case adt: ADT => ConsObject(adt, freshVars(adt.args.size))
case tp: Tuple => TupleObject(tp, freshVars(tp.exprs.size))
})
// set of lambdas that are applied
private val appliedLambdas: MutableSet[Lambda] = MutableSet.empty
// set of lambdas that are passed to a call back (so in principle like
// applied lambdas in the absence of information about the caller)
private val externallyEscapingLambdas: MutableSet[Lambda] = MutableSet.empty
private def recordPassedLambdas(args: Set[AbsValue], env: AbsEnv): Unit = {
externallyEscapingLambdas ++= passedLambdas(args, env)
}
private def passedLambdas(vals: Set[AbsValue], env: AbsEnv): Set[Lambda] = vals.flatMap {
case Closure(lam) =>
variablesOf(lam).flatMap { v =>
passedLambdas(env.store(v), env)
}.toSet + lam
case _ => Set[Lambda]()
}
private def freshVars(size: Int) = (1 to size).map(i => Variable.fresh("arg" + i, Untyped, true)).toSeq
// iteratively process functions from the worklist.
// (a) at every direct function call, join the arguments passed in with the `in` fact in the summary
// -- if the join results in a greater value, add the function back to the worklist
// (b) use the summary in the tabulation to complete the intra-procedural analysis
// (c) Update the caller information on seeing a function call.
// (d) if the return value of the function is found to be different from the return value in the tabulation
// -- update the entry in the tabulation to a the new value
// -- add all callers of the function to the worklist
// Repeat this until a fix point is reached
// initialize callers to empty sets
private val callers: MutableMap[Function, MutableSet[Function]] = MutableMap.empty
private def getCallers(fun: Function): MutableSet[Function] =
callers.getOrElseUpdate(fun, MutableSet.empty)
private val creator: MutableMap[Lambda, Function] = MutableMap.empty
private def createdBy(lambda: Function, fun: Function): Boolean = lambda match {
case f: LambdaFunction =>
creator.get(f.lambda) match {
case Some(`fun`) => true
case Some(f2: LambdaFunction) => createdBy(f2.lambda, fun)
case _ => false
}
case _ => false
}
private val escapingVars: MutableSet[Variable] = MutableSet.empty
val worklist = new ListBuffer[Function]()
worklist += fd
// the order of traversal is very important here, so using a custom traversal
private def rec(e: Expr, in: AbsEnv)(using current: Function): (Set[AbsValue], AbsEnv) = e match {
case Let(vd, v, body) =>
val (res, escenv) = rec(v, in)
val (bres, bescenv) = rec(body, AbsEnv(in.store + (vd.toVariable -> res)) join escenv)
(bres, escenv join bescenv)
case Application(callee, args) =>
val (targets, escenv) = rec(callee, in)
val absres = args.map(rec(_, in))
val absargs = absres.map(_._1)
val argescenv = flatten(absres.map(_._2))
val resabs = targets.map {
case Closure(lam) =>
getCallers(lam) += current
// record that the lambda is applied
appliedLambdas += lam
// create a new store with mapping for arguments and escaping variables
val argstore = in.store.view.filterKeys(escapingVars).toMap ++
(lam.params.map(_.toVariable) zip absargs) ++
escenv.store ++
argescenv.store
val argenv = AbsEnv(argstore)
val currSummary = getTabulation(lam)
if (!seen(lam) || !currSummary.in.greaterEquals(argenv)) {
val join = currSummary.in.join(argenv)
// here the input state has changed, so we need to reanalyze the callee
// (if it is not already scheduled to be analyzed)
if (!worklist.contains(lam))
worklist += lam
// update the in fact of the summary
tabulation.update(lam, Summary(join, currSummary.out, currSummary.ret))
}
// use the out fact as a temporary result
(currSummary.ret, currSummary.out)
case _ =>
// record all lambdas passed to external calls
recordPassedLambdas(absargs.flatten[AbsValue].toSet, in)
// invoking an external lambda will result in another external lambda
(Set(External), emptyEnv)
}
val resval = resabs.foldLeft(Set[AbsValue]()) { case (acc, (resvals, _)) => acc ++ resvals }
val resesc = argescenv join flatten(resabs.map(_._2).toSeq)
(resval, resesc)
case lam @ Lambda(args, body) =>
creator(lam) = current
val capvars = variablesOf(lam)
escapingVars ++= capvars // make all captured variables as escaping
val currSummary = getTabulation(lam)
val capenv = AbsEnv(in.store.view.filterKeys(capvars).toMap)
if (!currSummary.in.greaterEquals(capenv)) {
val join = currSummary.in.join(capenv)
tabulation.update(lam, Summary(join, currSummary.out, currSummary.ret))
}
(Set(Closure(lam)), AbsEnv(in.store.view.filterKeys(capvars).toMap))
case fi @ FunctionInvocation(_, _, args) =>
val fd = fi.tfd.fd
// update the callers info
getCallers(fd) += current
// (a) join the arguments passed in with the `in` fact in the summary.
// If the join results in a greater value, add the function back to the worklist.
val absres = args.map(rec(_, in))
val absargs = absres.map(_._1)
val argesc = flatten(absres.map(_._2))
val newenv = in ++ argesc
val argstore = newenv.store.view.filterKeys(escapingVars).toMap ++
(fd.params.map(_.toVariable) zip absargs)
val argenv = AbsEnv(argstore)
val currSummary = getTabulation(fd)
if (!seen(fd) || !currSummary.in.greaterEquals(argenv)) {
val join = currSummary.in.join(argenv)
// here the input state has changed, so we need to reanalyze the callee
// (if it is not already scheduled to be analyzed)
if (!worklist.contains(fd))
worklist += fd
// update the in fact of the summary
tabulation.update(fd, Summary(join, currSummary.out, currSummary.ret))
}
// use the out fact as a temporary result
(currSummary.ret, argesc join currSummary.out)
case adt @ ADT(id, tps, args) =>
val absres = args.map(rec(_, in))
val absargs = absres.map(_._1)
val argesc = flatten(absres.map(_._2))
// create a new object
val obj = getOrCreateObject(adt)
// make all argument variables escaping as they represent addresses that could be live across functions
escapingVars ++= obj.argvars
// construct an escaping environment
val esc = (obj.argvars zip absargs).toMap ++ argesc.store
(Set(obj), AbsEnv(esc))
case sel @ ADTSelector(adtExpr, selector) =>
val (absAdts, esc) = rec(adtExpr, in)
val store = in.store ++ esc.store
val resvals: Set[AbsValue] = absAdts.flatMap {
case ConsObject(cons, argvars) if cons.id == sel.constructor.id =>
val selarg = argvars(sel.selectorIndex)
store.getOrElse(selarg, Set())
// here, we are dereferencing an external ADT and hence should be external
case External => Set(External: AbsValue)
// these are type incompatible entries
case _ => Set[AbsValue]()
}
(resvals, esc)
case tp @ Tuple(args) =>
val absres = args.map(rec(_, in))
val absargs = absres.map(_._1)
val argesc = flatten(absres.map(_._2))
// create a new object
val obj = getOrCreateObject(tp)
// make all argument variables escaping as they represent addresses that could be live across functions
escapingVars ++= obj.argvars
// construct an escaping environment
val esc = (obj.argvars zip absargs).toMap ++ argesc.store
(Set(obj), AbsEnv(esc))
case TupleSelect(tp, index) =>
val (absTups, esc) = rec(tp, in)
val store = in.store ++ esc.store
val resvals: Set[AbsValue] = absTups.flatMap {
case TupleObject(_, argvars) =>
val selarg = argvars(index - 1)
store.getOrElse(selarg, Set())
// here, we are dereferencing an external Tuple and hence should be external
case External => Set(External: AbsValue)
// these are type incompatible entries
case _ => Set[AbsValue]()
}
(resvals, esc)
case IfExpr(cond, th, el) =>
val (_, condesc) = rec(cond, in)
val Seq((tval, tesc), (eval, eesc)) = Seq(th, el).map(ie => rec(ie, in))
(tval ++ eval, condesc join tesc join eesc)
case MatchExpr(scrut, cases) =>
import Path.{CloseBound, Condition}
var resenv: AbsEnv = emptyEnv
val absres = for (cse <- cases) yield {
val patCond = conditionForPattern[Path](scrut, cse.pattern, includeBinders = true)
val realCond = patCond withConds cse.optGuard.toSeq
val rhsIn = realCond.elements.foldLeft(in) {
case (in, CloseBound(vd, e)) =>
val (res, resc) = rec(e, in)
resenv = resenv join resc
AbsEnv(in.store + (vd.toVariable -> res))
case (in, Condition(cond)) =>
val (res, resc) = rec(cond, in)
resenv = resenv join resc
in
// Note that case pattern paths can't contain open bounds.
case _ => scala.sys.error("Should never happen")
}
rec(cse.rhs, rhsIn)
}
(absres.flatMap(_._1).toSet, resenv join flatten(absres.map(_._2)))
case v: Variable =>
(in.store.getOrElse(v, Set()), emptyEnv)
case Ensuring(body, Lambda(Seq(resvd), pred)) =>
val (resb, escb) = rec(body, in)
// this will record some calls made via contracts
// we can ignore its result value and escaping set as it cannot be used
rec(pred, in + (resvd.toVariable -> resb))
(resb, escb)
case Require(pred, body) =>
// pred cannot have an escaping set
rec(pred, in)
rec(body, in)
case Assert(pred, _, body) =>
// pred cannot have an escaping set
rec(pred, in)
rec(body, in)
case Annotated(e, _) => rec(e, in)
case NoTree(_) => (Set(), emptyEnv)
case Operator(args, op) =>
// every other operator will just add more esc sets and its return values cannot contain closures
val absres = args.map(rec(_, in))
(Set(), flatten(absres.map(_._2)))
// TODO: need to handle sets and maps
}
while (!worklist.isEmpty) {
val fun = worklist.remove(0)
seen += fun
val oldSummary = getTabulation(fun)
val (newret, newesc) = rec(fun.body, oldSummary.in)(using fun)
// if the return value of the function is found to be different from the return value in the tabulation:
// (a) update the entry in the tabulation to a the new value
// (b) add all callers of the function to the worklist
if (!newret.subsetOf(oldSummary.ret) || !oldSummary.out.greaterEquals(newesc)) {
// update summary
tabulation.update(fun, Summary(oldSummary.in, newesc, newret))
// reanalyze all clients with the new summary
val newcallers = getCallers(fun).filterNot(worklist.contains)
worklist ++= newcallers
val escaping: Set[Lambda] = {
val seen: MutableSet[AbsValue] = MutableSet.empty
val lambdas: MutableSet[Lambda] = MutableSet.empty
def rec(in: AbsValue): Unit = if (!seen(in)) {
seen += in
in match {
case Closure(lam) => lambdas += lam
case ConsObject(_, vars) =>
for (v <- vars; value <- newesc.store.getOrElse(v, Set.empty)) rec(value)
case TupleObject(_, vars) =>
for (v <- vars; value <- newesc.store.getOrElse(v, Set.empty)) rec(value)
case External =>
}
}
for (in <- newret) rec(in)
lambdas.toSet.filterNot(worklist contains _)
}
if (fun == (fd: Function) || createdBy(fun, fd)) {
// Register escaping lambda arguments as potentially containing external functions
for (lambda <- escaping) {
val currSummary = getTabulation(lambda)
val newEnv = AbsEnv(lambda.params.map(vd => vd.toVariable -> Set(External: AbsValue)).toMap)
tabulation.update(lambda, Summary(currSummary.in.join(newEnv), currSummary.out, currSummary.ret))
}
worklist ++= escaping.map(lam => lam: Function).filterNot(worklist.contains)
}
}
}
private val allEscaping: Set[Lambda] = externallyEscapingLambdas.toSet.flatMap { (l: Lambda) =>
var llams = Set(l)
var callees = Set[Identifier]()
exprOps.postTraversal {
case nl: Lambda => llams += nl
case FunctionInvocation(id, _, _) => callees += id
case _ =>
}(l.body)
callees.foreach { cid =>
val fd = getFunction(cid)
(transitiveCallees(fd) + fd).foreach { tc =>
exprOps.postTraversal {
case nl: Lambda => llams += nl
case _ =>
}(tc.fullBody)
}
}
llams
}
def isApplied(l: Lambda): Boolean = appliedLambdas(l) || allEscaping(l)
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/termination/ControlFlowAnalysis.scala | Scala | apache-2.0 | 18,517 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import org.scalatest._
import org.scalatest.prop.PropertyChecks
import org.scalactic.TypeCheckedTripleEquals
// SKIP-SCALATESTJS,NATIVE-START
import scala.collection.immutable.NumericRange
// SKIP-SCALATESTJS,NATIVE-END
import OptionValues._
import scala.collection.mutable.WrappedArray
//import org.scalactic.StrictCheckedEquality
import org.scalactic.Equality
import org.scalactic.{Pass, Fail}
import org.scalactic.{Good, Bad}
import scala.util.{Try, Success, Failure}
trait PosZFiniteFloatSpecSupport {
implicit val doubleEquality: Equality[Double] =
new Equality[Double] {
override def areEqual(a: Double, b: Any): Boolean =
(a, b) match {
case (a, bDouble: Double) if a.isNaN && bDouble.isNaN => true
case _ => a == b
}
}
implicit val floatEquality: Equality[Float] =
new Equality[Float] {
override def areEqual(a: Float, b: Any): Boolean =
(a, b) match {
case (a, bFloat: Float) if a.isNaN && bFloat.isNaN => true
case _ => a == b
}
}
implicit def tryEquality[T]: Equality[Try[T]] = new Equality[Try[T]] {
override def areEqual(a: Try[T], b: Any): Boolean = a match {
// I needed this because with GenDrivenPropertyChecks, got:
// [info] - should offer a '%' method that is consistent with Int *** FAILED ***
// [info] Success(NaN) did not equal Success(NaN) (PosIntExperiment.scala:498)
case Success(float: Float) if float.isNaN =>
b match {
case Success(bFloat: Float) if bFloat.isNaN => true
case _ => false
}
case Success(double: Double) if double.isNaN =>
b match {
case Success(bDouble: Double) if bDouble.isNaN => true
case _ => false
}
case _: Success[_] => a == b
case Failure(ex) => b match {
case _: Success[_] => false
case Failure(otherEx) => ex.getClass == otherEx.getClass && ex.getMessage == otherEx.getMessage
case _ => false
}
}
}
}
class PosZFiniteFloatSpec extends funspec.AnyFunSpec with matchers.should.Matchers with PropertyChecks with TypeCheckedTripleEquals with PosZFiniteFloatSpecSupport {
describe("A PosZFiniteFloat") {
describe("should offer a from factory method that") {
it("returns Some[PosZFiniteFloat] if the passed Float is greater than or equal to 0") {
PosZFiniteFloat.from(0.0f).value.value shouldBe 0.0f
PosZFiniteFloat.from(50.23f).value.value shouldBe 50.23f
PosZFiniteFloat.from(100.0f).value.value shouldBe 100.0f
}
it("returns None if the passed Float is NOT greater than or equal to 0") {
PosZFiniteFloat.from(-0.00001f) shouldBe None
PosZFiniteFloat.from(-99.9f) shouldBe None
}
}
describe("should offer an ensuringValid factory method that") {
it("returns PosZFiniteFloat if the passed Float is greater than or equal to 0") {
PosZFiniteFloat.ensuringValid(0.0f).value shouldBe 0.0f
PosZFiniteFloat.ensuringValid(50.23f).value shouldBe 50.23f
PosZFiniteFloat.ensuringValid(100.0f).value shouldBe 100.0f
}
it("throws AssertionError if the passed Float is NOT greater than or equal to 0") {
an [AssertionError] should be thrownBy PosZFiniteFloat.ensuringValid(-0.00001f)
an [AssertionError] should be thrownBy PosZFiniteFloat.ensuringValid(-99.9f)
an [AssertionError] should be thrownBy PosZFiniteFloat.ensuringValid(Float.PositiveInfinity)
an [AssertionError] should be thrownBy PosZFiniteFloat.ensuringValid(Float.NegativeInfinity)
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6710
an [AssertionError] should be thrownBy PosZFiniteFloat.ensuringValid(Float.NaN)
// SKIP-DOTTY-END
}
}
describe("should offer a tryingValid factory method that") {
import TryValues._
it("returns a PosZFiniteFloat wrapped in a Success if the passed Float is greater than or equal 0") {
PosZFiniteFloat.tryingValid(0.0f).success.value.value shouldBe 0.0f
PosZFiniteFloat.tryingValid(50.0f).success.value.value shouldBe 50.0f
PosZFiniteFloat.tryingValid(100.0f).success.value.value shouldBe 100.0f
}
it("returns an AssertionError wrapped in a Failure if the passed Float is lesser than 0") {
PosZFiniteFloat.tryingValid(-1.0f).failure.exception shouldBe an [AssertionError]
PosZFiniteFloat.tryingValid(-99.0f).failure.exception shouldBe an [AssertionError]
}
}
describe("should offer a passOrElse factory method that") {
it("returns a Pass if the given Float is greater than or equal 0") {
PosZFiniteFloat.passOrElse(0.0f)(i => i) shouldBe Pass
PosZFiniteFloat.passOrElse(50.0f)(i => i) shouldBe Pass
PosZFiniteFloat.passOrElse(100.0f)(i => i) shouldBe Pass
}
it("returns an error value produced by passing the given Float to the given function if the passed Float is lesser than 0, wrapped in a Fail") {
PosZFiniteFloat.passOrElse(-1.0f)(i => i) shouldBe Fail(-1.0f)
PosZFiniteFloat.passOrElse(-99.0f)(i => i + 3.0f) shouldBe Fail(-96.0f)
}
}
describe("should offer a goodOrElse factory method that") {
it("returns a PosZFiniteFloat wrapped in a Good if the given Float is greater than or equal 0") {
PosZFiniteFloat.goodOrElse(0.0f)(i => i) shouldBe Good(PosZFiniteFloat(0.0f))
PosZFiniteFloat.goodOrElse(50.0f)(i => i) shouldBe Good(PosZFiniteFloat(50.0f))
PosZFiniteFloat.goodOrElse(100.0f)(i => i) shouldBe Good(PosZFiniteFloat(100.0f))
}
it("returns an error value produced by passing the given Float to the given function if the passed Float is lesser than 0, wrapped in a Bad") {
PosZFiniteFloat.goodOrElse(-1.0f)(i => i) shouldBe Bad(-1.0f)
PosZFiniteFloat.goodOrElse(-99.0f)(i => i + 3.0f) shouldBe Bad(-96.0f)
}
}
describe("should offer a rightOrElse factory method that") {
it("returns a PosZFiniteFloat wrapped in a Right if the given Float is greater than or equal 0") {
PosZFiniteFloat.rightOrElse(0.0f)(i => i) shouldBe Right(PosZFiniteFloat(0.0f))
PosZFiniteFloat.rightOrElse(50.0f)(i => i) shouldBe Right(PosZFiniteFloat(50.0f))
PosZFiniteFloat.rightOrElse(100.0f)(i => i) shouldBe Right(PosZFiniteFloat(100.0f))
}
it("returns an error value produced by passing the given Float to the given function if the passed Float is lesser than 0, wrapped in a Left") {
PosZFiniteFloat.rightOrElse(-1.0f)(i => i) shouldBe Left(-1.0f)
PosZFiniteFloat.rightOrElse(-99.0f)(i => i + 3.0f) shouldBe Left(-96.0f)
}
}
describe("should offer an isValid predicate method that") {
it("returns true if the passed Float is greater than or equal to 0") {
PosZFiniteFloat.isValid(50.23f) shouldBe true
PosZFiniteFloat.isValid(100.0f) shouldBe true
PosZFiniteFloat.isValid(0.0f) shouldBe true
PosZFiniteFloat.isValid(-0.0f) shouldBe true
PosZFiniteFloat.isValid(-0.00001f) shouldBe false
PosZFiniteFloat.isValid(-99.9f) shouldBe false
}
}
describe("should offer a fromOrElse factory method that") {
it("returns a PosZFiniteFloat if the passed Float is greater than or equal to 0") {
PosZFiniteFloat.fromOrElse(50.23f, PosZFiniteFloat(42.0f)).value shouldBe 50.23f
PosZFiniteFloat.fromOrElse(100.0f, PosZFiniteFloat(42.0f)).value shouldBe 100.0f
PosZFiniteFloat.fromOrElse(0.0f, PosZFiniteFloat(42.0f)).value shouldBe 0.0f
}
it("returns a given default if the passed Float is NOT greater than or equal to 0") {
PosZFiniteFloat.fromOrElse(-0.00001f, PosZFiniteFloat(42.0f)).value shouldBe 42.0f
PosZFiniteFloat.fromOrElse(-99.9f, PosZFiniteFloat(42.0f)).value shouldBe 42.0f
}
}
it("should offer MaxValue and MinValue factory methods") {
PosZFiniteFloat.MaxValue shouldEqual PosZFiniteFloat.from(Float.MaxValue).get
PosZFiniteFloat.MinValue shouldEqual PosZFiniteFloat(0.0f)
}
it("should not offer a PositiveInfinity factory method") {
"PosZFiniteFloat.PositiveInfinity" shouldNot compile
}
it("should not offer a NegativeInfinity factory method") {
"PosZFiniteFloat.NegativeInfinity" shouldNot compile
}
it("should not offer a isNegInfinity method") {
"PosZFiniteFloat(1.0f).isNegInfinity" shouldNot compile
}
it("should be sortable") {
val xs = List(PosZFiniteFloat(2.2F), PosZFiniteFloat(0.0F), PosZFiniteFloat(1.1F),
PosZFiniteFloat(3.3F))
xs.sorted shouldEqual List(PosZFiniteFloat(0.0F), PosZFiniteFloat(1.1F),
PosZFiniteFloat(2.2F), PosZFiniteFloat(3.3F))
}
describe("when created with apply method") {
it("should compile when 8 is passed in") {
"PosZFiniteFloat(8)" should compile
PosZFiniteFloat(8).value shouldEqual 8.0F
"PosZFiniteFloat(8L)" should compile
PosZFiniteFloat(8L).value shouldEqual 8.0F
"PosZFiniteFloat(8.0F)" should compile
PosZFiniteFloat(8.0F).value shouldEqual 8.0F
}
it("should compile when 0 is passed in") {
"PosZFiniteFloat(0)" should compile
PosZFiniteFloat(0).value shouldEqual 0.0F
"PosZFiniteFloat(0L)" should compile
PosZFiniteFloat(0L).value shouldEqual 0.0F
"PosZFiniteFloat(0.0F)" should compile
PosZFiniteFloat(0.0F).value shouldEqual 0.0F
}
it("should not compile when -8 is passed in") {
"PosZFiniteFloat(-8)" shouldNot compile
"PosZFiniteFloat(-8L)" shouldNot compile
"PosZFiniteFloat(-8.0F)" shouldNot compile
}
it("should not compile when x is passed in") {
val a: Int = -8
"PosZFiniteFloat(a)" shouldNot compile
val b: Long = -8L
"PosZFiniteFloat(b)" shouldNot compile
val c: Float = -8.0F
"PosZFiniteFloat(c)" shouldNot compile
}
}
describe("when specified as a plain-old Float") {
def takesPosZFiniteFloat(pos: PosZFiniteFloat): Float = pos.value
it("should compile when 8 is passed in") {
"takesPosZFiniteFloat(8)" should compile
takesPosZFiniteFloat(8) shouldEqual 8.0F
"takesPosZFiniteFloat(8L)" should compile
takesPosZFiniteFloat(8L) shouldEqual 8.0F
"takesPosZFiniteFloat(8.0F)" should compile
takesPosZFiniteFloat(8.0F) shouldEqual 8.0F
}
it("should compile when 0 is passed in") {
"takesPosZFiniteFloat(0)" should compile
takesPosZFiniteFloat(0) shouldEqual 0.0F
"takesPosZFiniteFloat(0L)" should compile
takesPosZFiniteFloat(0L) shouldEqual 0.0F
"takesPosZFiniteFloat(0.0F)" should compile
takesPosZFiniteFloat(0.0F) shouldEqual 0.0F
}
it("should not compile when -8 is passed in") {
"takesPosZFiniteFloat(-8)" shouldNot compile
"takesPosZFiniteFloat(-8L)" shouldNot compile
"takesPosZFiniteFloat(-8.0F)" shouldNot compile
}
it("should not compile when x is passed in") {
val x: Int = -8
"takesPosZFiniteFloat(x)" shouldNot compile
val b: Long = -8L
"takesPosZFiniteFloat(b)" shouldNot compile
val c: Float = -8.0F
"takesPosZFiniteFloat(c)" shouldNot compile
}
}
it("should offer a unary + method that is consistent with Float") {
forAll { (p: PosZFiniteFloat) =>
(+p).toFloat shouldEqual (+(p.toFloat))
}
}
it("should offer a unary - method that returns NegZFiniteFloat") {
forAll { (p: PosZFiniteFloat) =>
(-p) shouldEqual (NegZFiniteFloat.ensuringValid(-(p.toFloat)))
}
}
it("should offer 'min' and 'max' methods that are consistent with Float") {
forAll { (pfloat1: PosZFiniteFloat, pfloat2: PosZFiniteFloat) =>
pfloat1.max(pfloat2).toFloat shouldEqual pfloat1.toFloat.max(pfloat2.toFloat)
pfloat1.min(pfloat2).toFloat shouldEqual pfloat1.toFloat.min(pfloat2.toFloat)
}
}
it("should offer an 'isWhole' method that is consistent with Float") {
forAll { (pzfloat: PosZFiniteFloat) =>
pzfloat.isWhole shouldEqual pzfloat.toFloat.isWhole
}
}
it("should offer 'round', 'ceil', and 'floor' methods that are consistent with Float") {
forAll { (pzfloat: PosZFiniteFloat) =>
// SKIP-SCALATESTJS,NATIVE-START
pzfloat.round.toFloat shouldEqual pzfloat.toFloat.round
// SKIP-SCALATESTJS,NATIVE-END
pzfloat.ceil.toFloat shouldEqual pzfloat.toFloat.ceil
pzfloat.floor.toFloat shouldEqual pzfloat.toFloat.floor
}
}
it("should offer 'toRadians' and 'toDegrees' methods that are consistent with Float") {
forAll { (pzfloat: PosZFiniteFloat) =>
pzfloat.toRadians.toFloat shouldEqual pzfloat.toFloat.toRadians
pzfloat.toDegrees.toFloat shouldEqual pzfloat.toFloat.toDegrees
}
}
it("should offer widening methods for basic types that are consistent with Float") {
forAll { (pzfloat: PosZFiniteFloat) =>
def widen(value: Float): Float = value
widen(pzfloat) shouldEqual widen(pzfloat.toFloat)
}
forAll { (pzfloat: PosZFiniteFloat) =>
def widen(value: Double): Double = value
widen(pzfloat) shouldEqual widen(pzfloat.toFloat)
}
forAll { (pzfloat: PosZFiniteFloat) =>
def widen(value: PosZDouble): PosZDouble = value
widen(pzfloat) shouldEqual widen(PosZDouble.from(pzfloat.toFloat).get)
}
}
it("should offer an ensuringValid method that takes a Float => Float, throwing AssertionError if the result is invalid") {
PosZFiniteFloat(33.0f).ensuringValid(_ + 1.0f) shouldEqual PosZFiniteFloat(34.0f)
an [AssertionError] should be thrownBy { PosZFiniteFloat.MaxValue.ensuringValid(_ - PosZFiniteFloat.MaxValue - 1) }
an [AssertionError] should be thrownBy { PosFiniteFloat.MaxValue.ensuringValid(_ => Float.PositiveInfinity) }
an [AssertionError] should be thrownBy { PosFiniteFloat.MaxValue.ensuringValid(_ => Float.NegativeInfinity) }
// SKIP-DOTTY-START
// https://github.com/lampepfl/dotty/issues/6710
an [AssertionError] should be thrownBy { PosZFiniteFloat.MaxValue.ensuringValid(_ => Float.NaN) }
// SKIP-DOTTY-END
}
}
}
| scalatest/scalatest | jvm/scalactic-test/src/test/scala/org/scalactic/anyvals/PosZFiniteFloatSpec.scala | Scala | apache-2.0 | 15,072 |
package com.mesosphere.cosmos.model.thirdparty.marathon
case class MarathonAppContainer(`type`: String, docker: Option[MarathonAppContainerDocker])
| movicha/cosmos | cosmos-model/src/main/scala/com/mesosphere/cosmos/model/thirdparty/marathon/MarathonAppContainer.scala | Scala | apache-2.0 | 149 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC213D(value: Option[Int]) extends CtBoxIdentifier(name = "Tangible assets - Office Equipment - cost - transfers")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value)
)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC213D.scala | Scala | apache-2.0 | 1,138 |
// using com.lihaoyi::os-lib:0.7.8
object UpdateBrewFormula {
def gitUsername = "Github Actions"
def gitEmail = "[email protected]"
val workDir = os.pwd / "target"
val templateFile = os.pwd / ".github" / "scripts" / "coursier.rb.template"
def main(args: Array[String]): Unit = {
val dryRun = args match {
case Array() => false
case Array("-n" | "--dry-run") => true
case _ =>
System.err.println(s"Usage: UpdateBrewFormula (-n|--dry-run)?")
sys.exit(1)
}
val version =
Option(System.getenv("GITHUB_REF")) match {
case None => sys.error(s"GITHUB_REF not set, could not get current tag")
case Some(tag) if tag.startsWith("refs/tags/v") =>
tag.stripPrefix("refs/tags/v")
case Some(other) =>
sys.error(s"GITHUB_REF $other not starting with refs/tags/v")
}
val repoDir = workDir / "homebrew-formulas"
if (os.exists(repoDir)) {
System.err.println(s"Cleaning up former clone at $repoDir")
os.remove.all(repoDir)
}
os.makeDir.all(workDir)
lazy val ghToken = Option(System.getenv("GH_TOKEN")).getOrElse {
sys.error(s"GH_TOKEN not set")
}
val repo =
if (dryRun) "https://github.com/coursier/homebrew-formulas.git"
else s"https://[email protected]/coursier/homebrew-formulas.git"
os.proc("git", "clone", repo, "-q", "-b", "master", "homebrew-formulas")
.call(cwd = workDir, stdout = os.Inherit)
os.proc("git", "config", "user.name", gitUsername)
.call(cwd = repoDir, stdout = os.Inherit)
os.proc("git", "config", "user.email", gitEmail)
.call(cwd = repoDir, stdout = os.Inherit)
val jarUrl = s"https://github.com/coursier/coursier/releases/download/v$version/coursier"
val launcherUrl =
s"https://github.com/coursier/coursier/releases/download/v$version/cs-x86_64-apple-darwin.gz"
val jarPath = os.rel / "jar-launcher"
val launcherPath = os.rel / "launcher"
System.err.println(s"Getting $jarUrl")
os.proc("curl", "-fLo", jarPath, jarUrl)
.call(cwd = repoDir, stdout = os.Inherit)
System.err.println(s"Getting $launcherUrl")
os.proc("curl", "-fLo", "launcher", launcherUrl)
.call(cwd = repoDir, stdout = os.Inherit)
def sha256(path: os.RelPath): String =
os.proc("/bin/bash", "-c", s"""openssl dgst -sha256 -binary < "$path" | xxd -p -c 256""")
.call(cwd = repoDir)
.out.text()
.trim
val jarSha256 = sha256(jarPath)
val launcherSha256 = sha256(launcherPath)
os.remove(repoDir / jarPath)
os.remove(repoDir / launcherPath)
val template = os.read(templateFile)
val content = template
.replace("@LAUNCHER_VERSION@", version)
.replace("@LAUNCHER_URL@", launcherUrl)
.replace("@LAUNCHER_SHA256@", launcherSha256)
.replace("@JAR_LAUNCHER_URL@", jarUrl)
.replace("@JAR_LAUNCHER_SHA256@", jarSha256)
val dest = os.rel / "coursier.rb"
os.write.over(repoDir / dest, content)
os.proc("git", "add", "--", dest)
.call(cwd = repoDir, stdout = os.Inherit)
val gitStatusOutput = os.proc("git", "status")
.call(cwd = repoDir, stderr = os.Pipe, mergeErrIntoOut = true)
.out
.text()
if (gitStatusOutput.contains("nothing to commit"))
println("Nothing changed")
else {
os.proc("git", "commit", "-m", s"Updates for $version")
.call(cwd = repoDir, stdout = os.Inherit)
if (dryRun)
println("Dry run, not pushing changes")
else {
println("Pushing changes")
os.proc("git", "push", "origin", "master")
.call(cwd = repoDir, stdout = os.Inherit)
}
}
}
}
| coursier/coursier | .github/scripts/UpdateBrewFormula.scala | Scala | apache-2.0 | 3,734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.input
import scala.collection.JavaConversions._
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.JobContext
import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat
import org.apache.hadoop.mapreduce.RecordReader
import org.apache.hadoop.mapreduce.TaskAttemptContext
/**
* A [[org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat CombineFileInputFormat]] for
* reading whole text files. Each file is read as key-value pair, where the key is the file path and
*读取整个文本文件,每个文件都被读取为键值对,其中键是文件路径和值是整个文件的内容
* the value is the entire content of file.
*/
private[spark] class WholeTextFileInputFormat
extends CombineFileInputFormat[String, String] with Configurable {
override protected def isSplitable(context: JobContext, file: Path): Boolean = false
override def createRecordReader(
split: InputSplit,
context: TaskAttemptContext): RecordReader[String, String] = {
val reader =
new ConfigurableCombineFileRecordReader(split, context, classOf[WholeTextFileRecordReader])
reader.setConf(getConf)
reader
}
/**
* Allow minPartitions set by end-user in order to keep compatibility with old Hadoop API,
* which is set through setMaxSplitSize
*允许最终用户设置的minPartition,以保持与旧Hadoop API的兼容性,通过setMaxSplitSize设置
*/
def setMinPartitions(context: JobContext, minPartitions: Int) {
val files = listStatus(context)
val totalLen = files.map { file =>
if (file.isDir) 0L else file.getLen
}.sum
val maxSplitSize = Math.ceil(totalLen * 1.0 /
(if (minPartitions == 0) 1 else minPartitions)).toLong
super.setMaxSplitSize(maxSplitSize)
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/input/WholeTextFileInputFormat.scala | Scala | apache-2.0 | 2,646 |
package api
import models.{AggregationRule, Article, Source, RoutingRule}
import anorm.SqlParser._
object DbApi {
import java.sql.Connection
import anorm._
def fetchRoutingRule(implicit conn: Connection): List[RoutingRule] = {
SQL("Select * from RULE").as(int("ID") ~ str("HOST") ~ str("URI") ~ str("CONTENT") ~ int("PRIORITY") *) map {
case id ~ host ~ uri ~ content ~ priority => RoutingRule(id, host, uri, content, priority)
}
}
def fetchAggregationRule(implicit conn: Connection): List[AggregationRule] = {
SQL("Select * from AGGREGATIONRULE").as(int("ID") ~ str("CONTENT") ~ str("SOURCE") ~ str("TARGET") *) map {
case id ~ content ~ source ~ target => AggregationRule(content, source, target)
}
}
def fetchSource(implicit conn: Connection): List[Source] = {
SQL("Select * from SOURCE").as(str("NAME") ~ str("URL") *) map {
case (name: String) ~ (url: String) => Source(name, url)
}
}
def fetchArticle(implicit conn: java.sql.Connection): List[Article] = {
SQL("Select * from ARTICLE").as(str("AUTHOR") ~ str("CONTENT") *) map {
case (author: String) ~ (content: String) => Article(author, content)
}
}
}
| jlcanela/fastcms | app/api/DbApi.scala | Scala | apache-2.0 | 1,204 |
package common
import scalaz.{Validation, Failure, Success}
case class Error(msg: String, exception: String = "") {
override def toString = "%s - %s".format(msg, exception)
}
object Error {
// returns None if an exception is thrown, Some[A] otherwise
def unsafeOption[A](op: => A): Option[A] = try {
Some(op)
} catch {
case exception: Throwable => None
}
def unsafeValidation[A](op: => A): Validation[Exception,A] = try {
Success(op)
} catch {
case e: Exception => {
println(e)
Failure(e)}
}
} | jurajzachar/obyvacka | app/common/Error.scala | Scala | artistic-2.0 | 543 |
package mesosphere.marathon
package raml
import mesosphere.UnitTest
import mesosphere.marathon.test.SettableClock
import mesosphere.marathon.core.launcher.OfferMatchResult
import mesosphere.marathon.core.launchqueue.LaunchStats.QueuedInstanceInfoWithStatistics
import mesosphere.marathon.state.{AppDefinition, PathId, Timestamp}
import mesosphere.marathon.test.MarathonTestHelper
import mesosphere.mesos.NoOfferMatchReason
class QueueInfoConversionTest extends UnitTest {
"QueueInfoConversion" should {
"A reject reason is converted correctly" in {
Given("A reject reason")
val reason = NoOfferMatchReason.InsufficientCpus
When("The value is converted to raml")
val raml = reason.toRaml[String]
Then("The value is converted correctly")
raml should be (reason.toString)
}
"A NoMatch is converted correctly" in {
Given("A NoMatch")
val app = AppDefinition(PathId("/test"))
val offer = MarathonTestHelper.makeBasicOffer().build()
val noMatch = OfferMatchResult.NoMatch(app, offer, Seq(NoOfferMatchReason.InsufficientCpus), Timestamp.now())
When("The value is converted to raml")
val raml = noMatch.toRaml[UnusedOffer]
Then("The value is converted correctly")
raml.offer should be (offer.toRaml[Offer])
raml.reason should be(noMatch.reasons.toRaml[Seq[String]])
raml.timestamp should be (noMatch.timestamp.toOffsetDateTime)
}
"A QueueInfoWithStatistics is converted correctly" in {
Given("A QueueInfoWithStatistics")
val clock = new SettableClock()
val now = clock.now()
val app = AppDefinition(PathId("/test"))
val offer = MarathonTestHelper.makeBasicOffer().build()
val noMatch = Seq(
OfferMatchResult.NoMatch(app, offer, Seq(NoOfferMatchReason.InsufficientCpus), now),
OfferMatchResult.NoMatch(app, offer, Seq(NoOfferMatchReason.InsufficientCpus), now),
OfferMatchResult.NoMatch(app, offer, Seq(NoOfferMatchReason.InsufficientCpus), now),
OfferMatchResult.NoMatch(app, offer, Seq(NoOfferMatchReason.InsufficientMemory), now)
)
val summary: Map[NoOfferMatchReason, Int] = Map(
NoOfferMatchReason.InsufficientCpus -> 75,
NoOfferMatchReason.InsufficientMemory -> 15,
NoOfferMatchReason.InsufficientDisk -> 10
)
val lastSummary: Map[NoOfferMatchReason, Int] = Map(
NoOfferMatchReason.InsufficientCpus -> 3,
NoOfferMatchReason.InsufficientMemory -> 1
)
val offersSummary: Seq[DeclinedOfferStep] = List(
DeclinedOfferStep("UnfulfilledRole", 0, 123),
DeclinedOfferStep("UnfulfilledConstraint", 0, 123),
DeclinedOfferStep("NoCorrespondingReservationFound", 0, 123),
DeclinedOfferStep("AgentMaintenance", 0, 123),
DeclinedOfferStep("InsufficientCpus", 75, 123), // 123 - 75 = 48
DeclinedOfferStep("InsufficientMemory", 15, 48), // 48 - 15 = 33
DeclinedOfferStep("InsufficientDisk", 10, 33), // 33 - 10 = 23
DeclinedOfferStep("InsufficientGpus", 0, 23),
DeclinedOfferStep("InsufficientPorts", 0, 23),
DeclinedOfferStep("DeclinedScarceResources", 0, 23)
)
val lastOffersSummary: Seq[DeclinedOfferStep] = List(
DeclinedOfferStep("UnfulfilledRole", 0, 4),
DeclinedOfferStep("UnfulfilledConstraint", 0, 4),
DeclinedOfferStep("NoCorrespondingReservationFound", 0, 4),
DeclinedOfferStep("AgentMaintenance", 0, 4),
DeclinedOfferStep("InsufficientCpus", 3, 4), // 4 - 3 = 1
DeclinedOfferStep("InsufficientMemory", 1, 1), // 1 - 1 = 0
DeclinedOfferStep("InsufficientDisk", 0, 0),
DeclinedOfferStep("InsufficientGpus", 0, 0),
DeclinedOfferStep("InsufficientPorts", 0, 0),
DeclinedOfferStep("DeclinedScarceResources", 0, 0)
)
val info = QueuedInstanceInfoWithStatistics(app, inProgress = true,
instancesLeftToLaunch = 23,
finalInstanceCount = 23,
backOffUntil = None,
startedAt = now,
rejectSummaryLastOffers = lastSummary,
rejectSummaryLaunchAttempt = summary,
processedOffersCount = 123,
unusedOffersCount = 100,
lastMatch = None,
lastNoMatch = Some(noMatch.head),
lastNoMatches = noMatch)
When("The value is converted to raml")
val raml = (Seq(info), true, clock).toRaml[Queue]
Then("The value is converted correctly")
raml.queue should have size 1
raml.queue.head shouldBe a[QueueApp]
val item = raml.queue.head.asInstanceOf[QueueApp]
item.app.id should be (app.id.toString)
item.count should be(23)
item.processedOffersSummary.processedOffersCount should be(info.processedOffersCount)
item.processedOffersSummary.unusedOffersCount should be(info.unusedOffersCount)
item.processedOffersSummary.lastUnusedOfferAt should be(Some(now.toOffsetDateTime))
item.processedOffersSummary.lastUsedOfferAt should be(None)
item.processedOffersSummary.rejectSummaryLaunchAttempt should be(offersSummary)
item.processedOffersSummary.rejectSummaryLastOffers should be(lastOffersSummary)
item.lastUnusedOffers should be (defined)
item.since should be(now.toOffsetDateTime)
}
}
}
| gsantovena/marathon | src/test/scala/mesosphere/marathon/raml/QueueInfoConversionTest.scala | Scala | apache-2.0 | 5,292 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.Validators.AllowancesQuestionsValidation
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CPQ8(value: Option[Boolean]) extends CtBoxIdentifier(name = "Did the company cease trading?")
with CtOptionalBoolean
with Input
with ValidatableBox[ComputationsBoxRetriever]
with AllowancesQuestionsValidation {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = {
val boxId = "CPQ8"
val companyCars = boxRetriever.cpQ7()
val machineryOrPlant = boxRetriever.cpQ10()
val structuresAndBuildings = boxRetriever.cpQ11()
val validateMandatory = {
if (companyCars.isTrue || machineryOrPlant.isTrue || structuresAndBuildings.isTrue) {
validateBooleanAsMandatory(boxId ,this)
} else validationSuccess
}
if(isSBALive(boxRetriever.cp2())) {
validateMandatory
}
else
{
validateAgainstCPQ7(boxRetriever, boxId, value)
validateMandatory
}
}
} | hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CPQ8.scala | Scala | apache-2.0 | 1,694 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.ScalaBundle
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
* Time: 9:24:19
*/
class ScParenthesisedExprImpl(node: ASTNode) extends ScExpressionImplBase(node) with ScParenthesisedExpr {
protected override def innerType: TypeResult = {
innerElement match {
case Some(x: ScExpression) =>
val res = x.getNonValueType()
res
case _ => Failure(ScalaBundle.message("no.expression.in.parentheseses"))
}
}
// implicit arguments are owned by inner element
override def findImplicitArguments: Option[Seq[ScalaResolveResult]] = None
override def toString: String = "ExpressionInParenthesis"
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScParenthesisedExprImpl.scala | Scala | apache-2.0 | 999 |
package algorithms.implementation
/**
* Created by yujieshui on 2017/5/23.
*/
object OrganizingContainersOfBalls {
type Contain = Vector[Long]
type Matrix = Seq[Contain]
def solution(matrix: Matrix): Boolean = {
matrix.map(_.sum).sorted == matrix.transpose.map(_.sum).sorted
}
def readListInt() = io.StdIn.readLine().split(" ").toList.map(_.toInt)
def main(args: Array[String]): Unit = {
val q :: Nil = readListInt()
val data =
1 to q map { _ =>
val n :: Nil = readListInt()
1 to n map (_ => io.StdIn.readLine().split(" ").toVector.map(_.toLong))
}
val result = data.map(solution).map {
case true => "Possible"
case false => "Impossible"
}
println(result.mkString("\\n"))
}
}
| 1178615156/hackerrank | src/main/scala/algorithms/implementation/OrganizingContainersOfBalls.scala | Scala | apache-2.0 | 763 |
package spire
package algebra
/** A left module is a generalization of a vector space over a field, where
* the scalars are the elements of a ring (not necessarily commutative).
*
* A left module has left multiplication by scalars. Let V be an abelian group
* (with additive notation) and R the scalar ring, we have the following laws
* for x, y in V and r, s in R:
*
* 1. r *: (x + y) = r *: x + r *: y
* 2. (r + s) *: x = r *: x + s *: x
* 3. (r * s) *: x = r *: (s *: x)
* 4. R.one * x = x
*
* (see https://en.wikipedia.org/wiki/Module_(mathematics) )
*
* @tparam V Abelian group type
* @tparam R Scalar type
*/
trait LeftModule[V, @sp(Int,Long,Float,Double) R] extends Any with AdditiveAbGroup[V] {
implicit def scalar: Ring[R] // note: we require a ring with identity (see https://arxiv.org/pdf/1404.0135.pdf)
def timesl(r: R, v: V): V
}
object LeftModule {
@inline final def apply[V, @sp(Int,Long,Float,Double) R](implicit V: LeftModule[V, R]): LeftModule[V, R] = V
}
/** A right module is a generalization of a vector space over a field, where
* the scalars are the elements of a ring (not necessarily commutative).
*
* A right module has right multiplication by scalars. Let V be an abelian group
* (with additive notation) and R the scalar ring, we have the following laws
* for x, y in V and r, s in R:
*
* 1. (x + y) :* r = x :* r + y :* r
* 2. x :* (r + s) = x :* r + x :* s
* 3. x :* (r * s) = (x :* r) :* s
* 4. x :* R.one = x
*
* @tparam V Abelian group type
* @tparam R Scalar type
*/
trait RightModule[V, @sp(Int,Long,Float,Double) R] extends Any with AdditiveAbGroup[V] {
implicit def scalar: Ring[R] // note: we require a ring with identity (see https://arxiv.org/pdf/1404.0135.pdf)
def timesr(v: V, r: R): V
}
object RightModule {
@inline final def apply[V, @sp(Int,Long,Float,Double) R](implicit V: RightModule[V, R]): RightModule[V, R] = V
}
// we skip the implementation of bimodules due to the explosion of specialized type parameters
/** A module over a commutative ring has by definition equivalent left and right modules.
*
* In addition to the laws above 1-5 left and 1-5 right, we have:
*
* 6. (r *: x) :* s = r *: (x :* s)
*
* @tparam V Abelian group type
* @tparam R Scalar type
*/
trait CModule[V, @sp(Int,Long,Float,Double) R] extends Any with LeftModule[V, R] with RightModule[V, R] {
implicit def scalar: CRing[R]
def timesr(v: V, r: R): V = timesl(r, v)
}
object CModule {
@inline final def apply[V, @sp(Int,Long,Float,Double) R](implicit V: CModule[V, R]): CModule[V, R] = V
}
| non/spire | core/src/main/scala/spire/algebra/Module.scala | Scala | mit | 2,618 |
package com.thangiee.lolhangouts.data.usecases
import com.thangiee.lolchat.LoLChat
import com.thangiee.lolchat.error.NoSession
import com.thangiee.lolhangouts.data.Cached
import com.thangiee.lolhangouts.data.datasources.entities.SummSearchHistEntity
import com.thangiee.lolhangouts.data.datasources.entities.mappers.SummSearchHistMapper
import com.thangiee.lolhangouts.data.datasources.sqlite.DB
import com.thangiee.lolhangouts.data.usecases.entities.SummSearchHist
import com.thangiee.lolhangouts.data._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
trait ManageSearchHistUseCase extends Interactor {
def loadSummSearchHist(): Future[List[SummSearchHist]]
def saveSummSearchHist(inGameName: String, regionId: String): Future[Unit]
}
case class ManageSearchHistUseCaseImpl() extends ManageSearchHistUseCase {
def loadSummSearchHist(): Future[List[SummSearchHist]] = Future {
val result = collection.mutable.ListBuffer[SummSearchHist]()
result ++= DB.getAllSummSearchHist.map(SummSearchHistMapper.transform)
LoLChat.findSession(Cached.loginUsername) match {
case Good(sess) =>
result ++= sess.friends.map(f => SummSearchHist(f.name, sess.region.id, isFriend = true))
case Bad(NoSession(msg)) => warn(s"[!] $msg")
}
result.toList
}
def saveSummSearchHist(inGameName: String, regionId: String): Future[Unit] = Future {
new SummSearchHistEntity(inGameName, regionId).save()
}
} | Thangiee/LoL-Hangouts | src/com/thangiee/lolhangouts/data/usecases/ManageSearchHistUseCase.scala | Scala | apache-2.0 | 1,480 |
/*
* Copyright 2016 rdbc contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.rdbc.sapi.exceptions
class NoKeysReturnedException(msg: String, maybeCause: Option[Throwable] = None)
extends RdbcException(msg, maybeCause)
| rdbc-io/rdbc | rdbc-api-scala/src/main/scala/io/rdbc/sapi/exceptions/NoKeysReturnedException.scala | Scala | apache-2.0 | 757 |
package org.ai4fm.proofprocess.project.core
import org.eclipse.emf.cdo.transaction.CDOTransaction
import org.ai4fm.proofprocess.project.core.PProcessDataManager.PProcessData
import scala.collection.mutable;
import org.eclipse.core.resources.IProject
/**
* Provides a mutable cache for transactions of PP data roots for each project.
*
* @author Andrius Velykis
*/
class PProcessDataStore {
private case class TrData(transaction: CDOTransaction, data: PProcessData)
private val transactions = mutable.Map[IProject, TrData]()
def apply(project: IProject): PProcessData = {
val trData = transactions.getOrElseUpdate(project, openTransaction(project))
trData.data
}
private def openTransaction(project: IProject): TrData = {
val session = PProcessDataManager.session(project)
val transaction = PProcessDataManager.openTransaction(session)
val data = PProcessDataManager.loadData(transaction)
TrData(transaction, data)
}
def dispose() {
// close all transaction
transactions.values map (_.transaction) foreach (_.close())
transactions.clear()
}
}
| andriusvelykis/proofprocess | org.ai4fm.proofprocess.project.core/src/org/ai4fm/proofprocess/project/core/PProcessDataStore.scala | Scala | epl-1.0 | 1,117 |
package com.wavesplatform.lang.v1.evaluator.ctx
import cats.Monad
import cats.syntax.applicative._
import cats.syntax.either._
import com.wavesplatform.lang.v1.compiler.Terms.CaseObj
import com.wavesplatform.lang.v1.compiler.Types.UNIT
package object impl {
def notImplemented[F[_] : Monad, R](funcName: String, args: List[Any]): F[Either[String, R]] =
s"Can't apply (${args.map(_.getClass.getSimpleName).mkString(", ")}) to '$funcName'"
.asLeft[R].pure[F]
lazy val unit: CaseObj = CaseObj(UNIT, Map.empty)
def callableResultError(expected: AnyRef, actual: AnyRef): String =
s"CallableFunction needs to return $expected, but got '$actual'"
}
| wavesplatform/Waves | lang/shared/src/main/scala/com/wavesplatform/lang/v1/evaluator/ctx/impl/package.scala | Scala | mit | 670 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.services.queries
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.nodes.{NodeGroup,NodeGroupId}
import com.unboundid.ldap.sdk.{DN,Filter}
import com.normation.ldap.sdk._
import BuildFilter._
import com.normation.rudder.domain.{RudderDit,RudderLDAPConstants}
import com.normation.inventory.ldap.core.LDAPConstants.{A_OC, A_NAME}
import RudderLDAPConstants._
import com.normation.utils.Control.sequence
import com.normation.inventory.ldap.core.LDAPConstants
import com.normation.rudder.repository.ldap.LDAPEntityMapper
import net.liftweb.common._
import com.normation.rudder.repository.WoNodeGroupRepository
import com.normation.rudder.repository.RoNodeGroupRepository
import com.normation.eventlog.EventActor
import com.normation.utils.HashcodeCaching
import com.normation.eventlog.ModificationId
/**
* A container for a dynamic group update.
* members are the list of members post-update,
* removed/added members are compared with the
* state pre-update.
*/
case class DynGroupDiff(
members:Seq[NodeId],
removed:Seq[NodeId],
added:Seq[NodeId]
) extends HashcodeCaching
trait DynGroupUpdaterService {
/**
* Update the given dynamic group, returning the diff
* from the pre-update.
*
* IMPORTANT NOTE: system group are not updated with
* that service !
*
* @return
*/
def update(dynGroupId:NodeGroupId, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[DynGroupDiff]
}
class DynGroupUpdaterServiceImpl(
roNodeGroupRepository: RoNodeGroupRepository,
woNodeGroupRepository: WoNodeGroupRepository,
queryProcessor : QueryProcessor
) extends DynGroupUpdaterService with Loggable {
override def update(dynGroupId:NodeGroupId, modId: ModificationId, actor:EventActor, reason:Option[String]) : Box[DynGroupDiff] = {
for {
(group,_) <- roNodeGroupRepository.getNodeGroup(dynGroupId)
isDynamic <- if(group.isDynamic) Full("OK") else Failure("Can not update a not dynamic group")
query <- Box(group.query) ?~! "Can not a group if its query is not defined"
newMembers <- queryProcessor.process(query) ?~! "Error when processing request for updating dynamic group with id %s".format(dynGroupId)
//save
newMemberIdsSet = newMembers.map( _.id).toSet
savedGroup <- {
val newGroup = group.copy(serverList = newMemberIdsSet)
if(group.isSystem) {
woNodeGroupRepository.updateSystemGroup(newGroup, modId, actor, reason)
} else {
woNodeGroupRepository.update(newGroup, modId, actor, reason)
}
} ?~! "Error when saving update for dynmic group '%s'".format(dynGroupId)
} yield {
val plus = newMemberIdsSet -- group.serverList
val minus = group.serverList -- newMemberIdsSet
DynGroupDiff(newMemberIdsSet.toSeq, minus.toSeq, plus.toSeq)
}
}
}
| jooooooon/rudder | rudder-core/src/main/scala/com/normation/rudder/services/queries/DynGroupUpdaterService.scala | Scala | agpl-3.0 | 4,668 |
package lib
import play.api.Logger
import play.api.data.validation.ValidationError
import play.api.http.HeaderNames
import play.api.libs.json._
import play.api.mvc.{Result, _}
import scala.language.postfixOps
/**
* HTTP responses wrapper. Mostly done for logging and nesting results in ResponseContainer
*/
object Responses {
/**
* 200/304 Response that will return 200 + content if the user does not have fresh content cache, otherwise just 304
*
* @param request from user used for getting the If-None-Match header
* @param entity the actual result of request
* @param t implicit wiring of converter of resulting entity to JSON
* @tparam A request type, usually JsValue
* @tparam E resulting entity type
* @return a result OK or NotModified
*/
def cachedOk[A, E](request: EnrichedRequest[A], entity: E)(implicit t: Writes[E]): Result = {
val objectHashCode = entity.hashCode.toString
if (request.headers.get(HeaderNames.IF_NONE_MATCH).contains(objectHashCode)) Results.NotModified
else Results.Ok(Json.toJson(new ResponseContainer(entity))).withHeaders(HeaderNames.ETAG -> objectHashCode)
}
/**
* 404 with a custom message
*
* @param message if the default one needs to be overridden
* @return a result Not Found
*/
def notFound(message: String = "404: Resource Not Found") = Results.NotFound(Json.toJson(new ResponseContainer(message)))
/**
* 400 for validation errors. It converts Play validation errors to this framework preferred style errors
*
* @param message a sequence object that is returned from Play validation
* @return 400 with field paths and it's error descriptions
*/
def badRequestValidation(message: Seq[(JsPath, Seq[ValidationError])]) = {
val msg = message.map {
pathError: (JsPath, Seq[ValidationError]) =>
val fieldPath = pathError._1.path.map(pathNode => pathNode.toJsonString).mkString
val errors = pathError._2.map(error => error.message + error.args.mkString(".", "-", ""))
fieldPath -> errors
} toMap
Results.BadRequest(Json.toJson(new ResponseContainer(msg)))
}
/**
* 400 with a custom message
*
* @param message to return to user
* @return Bad request result
*/
def badRequestMessage(message: String) = Results.BadRequest(Json.toJson(new ResponseContainer(message)))
/**
* 403 when authorization is required and fails
* It is also used when password/user combination not found during authentication
*
* @param message message to override the default one
* @return 403 not authorized
*/
def forbidden(message: String = "Request is not authorized") = Results.Forbidden(Json.toJson(new ResponseContainer(message)))
/**
* 201 after creating resource during POST
*
* @param newURI of the new resource created
* @return 201 with new URI in the header
*/
def created(newURI: String) = Results.Created(Json.toJson(new ResponseContainer(newURI))).withHeaders((HeaderNames.LOCATION, newURI))
/**
* 204 No content. Returned after PUT and DELETE
*
* @return No Content
*/
def noContent = Results.NoContent
/**
* 409. Conflict. Used to notify user on PUT that resource changed in between and user's update did not go through,
* user should try again. Optimistic locking.
*
* @param message message to send to user in body
* @return a message in container
*/
def consistenceConflict(message: String) = Results.Conflict(Json.toJson(new ResponseContainer(message)))
/**
* 401. Resource requires authentication and no valid auth token is provided
*
* @param message overriding message that explains what is wrong with authentication proof (token) provided
* @return 401 no auth
*/
def missingAuthentication(message: String = "Resource requires authentication") = Results.Unauthorized(Json.toJson(new ResponseContainer(message)))
/**
* 500. Internal server error. It also logs the problem. Never provides for user any of the messages
*
* @param internalMessage - internal message from code raising the exception
* @param exception itself if exists.
* @return 500 result
*/
def internalServerError(internalMessage: String, exception: Option[Throwable]) = {
if (exception.isDefined) Logger(this.getClass).error(s"500: $internalMessage. Exception message: ${exception.get.getMessage}", exception.get)
else Logger(this.getClass).error(s"500: $internalMessage")
Results.InternalServerError(Json.toJson(new ResponseContainer("Internal Server Error")))
}
}
/**
* Error helper that allows to return requesting user meaningful and consistent validation errors
*/
object Errors {
/**
* Generates a field validation error as it would be returned from Play validation engine.
*
* @param fieldPath sequence of strings to define a nested path of the field.
* @param message Message of the error
* @return error object
*/
def fieldError(fieldPath: Seq[String], message: String): Seq[(JsPath, Seq[ValidationError])] = {
val fieldPathList = fieldPath.map(KeyPathNode).toList
Seq(JsPath(fieldPathList) -> Seq(ValidationError(message)))
}
}
/**
* A container that every content response is wrapped in
*
* @param result - the actual data object returned, or error message if response is not 200
* @param timeGenerated - the time when result was generated. A client might want to know, in case of 304 when it was last fetched
* @tparam T - Type of the actual object to be returned
*/
case class ResponseContainer[T](result: T, timeGenerated: Long = System.currentTimeMillis)
/**
* Object to define conversion to JSON
*/
object ResponseContainer {
import play.api.libs.functional.syntax._
implicit def responseContainerWrites[T](implicit fmt: Writes[T]): Writes[ResponseContainer[T]] = new Writes[ResponseContainer[T]] {
def writes(i: ResponseContainer[T]) =
Json.obj(
"result" -> Json.toJson[T](i.result),
"timeGenerated" -> JsNumber(i.timeGenerated)
)
}
implicit def responseContainerReads[T](implicit fmt: Reads[T]): Reads[ResponseContainer[T]] = {
( (__ \\ "result").read[T] ~ (__ \\ "timeGenerated").read[Long]) (ResponseContainer[T](_, _))
}
}
| lrimkus/microservice-example | app/lib/ResponseHelper.scala | Scala | mit | 6,339 |
package com.github.vicpara.eda
import com.github.vicpara.eda.stats.{ SequenceStats, PercentileStats, PercentileStatsWithFilterLevel, PrettyPercentileStats }
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.specs2.matcher.ScalaCheckMatchers
import org.specs2.mutable.Specification
import scalaz.Scalaz._
case object StatsSpec extends Specification with ScalaCheckMatchers with TestUtils {
"ExploratoryDataAnalysis" should {
"correctly compute percentiles statistics on monotonic increasing 21 sample dataset" in {
val rawData =
(0l until 21).map((" ", _))
.map {
case (drillDownKey, dimKey) => (((drillDownKey, dimKey), dimKey), dimKey)
}
.toList
val dataRDD: RDD[(((String, Long), Long), Long)] = sc.parallelize(rawData)
val res: PercentileStats = SequenceStats.percentileStats[Long](numPercentiles = 21)(dataRDD).head._2
res must_=== PercentileStats(points = rawData.map(el => (el._2.toString, el._2)), numBuckets = 21l)
}
"correctly compute the right number of elements in the percentiles statistics on 101 constant samples dataset" in {
val key = " "
val rawData: List[(((String, Long), Long), Long)] = (0l until 101 by 1).toList.map(e => (((key, e), e), e))
val dataRDD: RDD[(((String, Long), Long), Long)] = sc.parallelize(rawData)
val intRes = SequenceStats.percentileStats[Long](numPercentiles = 11)(dataRDD)
val res =
List(1, 2, 3, 5, 6, 7, 9, 11, 20, 21, 99, 100)
.map(r => (r, SequenceStats.percentileStats[Long](numPercentiles = r)(dataRDD).get(key).get.points.size))
.filter(e => e._1 != e._2)
res.foreach(el => el._1 must_=== el._2)
res.size must_=== 0
}
"correctly compute number percentiles when Num Percentiles is larger than samples" in {
val numSamplesList = List(2, 4, 5, 10, 11, 13, 17, 21, 39, 55, 101)
val res = numSamplesList.flatMap(numSample => {
val ires =
List(102, 103, 104, 101, 130, 200, 300, 500, 1000)
.map(v => (numSample, SequenceStats.percentileIndices(numSample, v).size))
.filter(v => v._1 != v._2)
ires.forall(el => el._1 must_=== el._2)
ires.size must_=== 0
ires
})
if (res.nonEmpty)
println("Numer of percentile indices" + res.map(el => s"Expected:${el._1} but got ${el._2} ").mkString("\\n"))
res.size must_=== 0
}
"correctly compute percentiles statistics on 101 constant samples dataset" in {
val rawData: List[(((String, Long), Long), Long)] = (0l until 101 by 1).toList.map(e => (((" ", e), 2l), e))
val dataRDD: RDD[(((String, Long), Long), Long)] = sc.parallelize(rawData)
val result = SequenceStats.percentileStats[Long](numPercentiles = 11)(dataRDD).head._2
val expected = PercentileStats(points = (0 until 101 by 10).toList.zipWithIndex.map(e => (e._1.toString, 2l)), numBuckets = 101)
result must_=== expected
}
"correctly compute percentiles statistics on smaller dataset than stats" in {
val rawData = (1l until 6 by 1).map(e => (((" ", e), e), e - 1))
val dataRDD: RDD[(((String, Long), Long), Long)] = sc.parallelize(rawData)
val result: PercentileStats = SequenceStats.percentileStats[Long](numPercentiles = 10)(dataRDD).head._2
val expected = PercentileStats(points = (1l until 6 by 1).map(e => (e.toString, e)).toList, numBuckets = 5l)
result must_=== expected
}
"correctly computes the number of Percentile Indices for increasing number of percentiles" in {
val res =
List(1, 2, 3, 5, 6, 7, 9, 11, 17, 20, 21, 40, 50, 51, 99, 100, 101)
.map(v => (v, SequenceStats.percentileIndices(101, v).size))
.filter(v => v._1 != v._2)
res.foreach(el => el._1 must_=== el._2)
res.size must_=== 0
}
"correctly generate 10 Percentile Indexes from 1 to 10" in {
val result: Set[Long] = SequenceStats.percentileIndices(10, 10)
val expected: Set[Long] = (0 until 10).toList.map(_.toLong).toSet
result must_=== expected
}
"correctly generate 10 Percentile Indexes from 1 to 10 when requested 20 for a smaller dataset" in {
val result: Set[Long] = SequenceStats.percentileIndices(10, 20)
val expected: Set[Long] = (0 until 10).toList.map(_.toLong).toSet
result must_=== expected
}
"correctly generate 10 Percentile Indexes from 0 to 1000 by 100 when requested 10 for a 1001 dataset" in {
val result: Set[Long] = SequenceStats.percentileIndices(1001, 11)
val expected: Set[Long] = (0 until 1001 by 100).toList.map(_.toLong).toSet
result must_=== expected
}
"correctly pretty prints humanReadable" in {
val setEmpty = PrettyPercentileStats(
name = "xxx",
levels = List(PercentileStatsWithFilterLevel(
"NONE",
stats = PercentileStats(points = Nil, numBuckets = 0)
))
)
val set1 = PrettyPercentileStats(
name = "xxx",
levels = List(PercentileStatsWithFilterLevel(
"NONE",
stats = PercentileStats(points = List(("Key1", 2l)), numBuckets = 1)
))
)
val set2 = PrettyPercentileStats(
name = "xxx",
levels = List(PercentileStatsWithFilterLevel(
"NONE",
stats = PercentileStats(points = List(("Key1", 1l), ("Key2", 2l)), numBuckets = 2)
))
)
val set3 = PrettyPercentileStats(
name = "xxx",
levels = List(PercentileStatsWithFilterLevel(
"NONE",
stats = PercentileStats(points = List(("Key1", 1l), ("Key2", 2l), ("Key3", 3l)), numBuckets = 3)
))
)
List(setEmpty, set1, set2, set3)
.map(e => e.levels.head.stats.points.size -> e.toHumanReadable)
.map(e => e._1 -> (e._1 == e._2.split("\\n").size + 2))
.count(_._2) must_=== 0
}
"correctly pretty prints for humans bad samples" in {
val data = List(
PrettyPercentileStats(
name = "BusinessId x Day - Count(Tx)",
levels = List(PercentileStatsWithFilterLevel(
drillDownFilterValue = "DrillDownKey.ALL",
stats = PercentileStats(points = List(("1", 1830683l)), numBuckets = 1l)
))
),
PrettyPercentileStats(
name = "Postcode x Day - Count(RichTx)",
levels = List(PercentileStatsWithFilterLevel(
drillDownFilterValue = "DrillDownKey.ALL",
PercentileStats(points = List(
(("YO126EE", "2014-12-02").toString(), 1l),
(("CH441BA", "2014-09-23").toString(), 1l), (("LS287BJ", "2014-10-24").toString(), 1l),
(("G156RX", "2014-01-08").toString(), 1l)
), numBuckets = 4)
))
)
)
val hr = data.map(_.toHumanReadable)
hr.foreach(println)
hr.nonEmpty must_=== true
}
"correctly runs end to end SequenceStats.percentile on constant dataset" in {
case class DataP(k: String, v: Int)
val dataRDD = sc.parallelize((0 until 101).map(el => DataP(k = el.toString, v = el)))
@transient implicit lazy val isc: SparkContext = sc
val res = SequenceStats.percentile[DataP, String, Long](
data = dataRDD,
toDrillDownKeyOption = None,
toDimKey = _.k,
toVal = _ => 1l,
toStats = identity,
reduceFunc = _ |+| _,
numPercentiles = 10
)
println(PrettyPercentileStats(name = "Constant Dataset", levels = res).toHumanReadable)
val expected = PercentileStats(points = (0 until 10).toList.map(e => (e.toString, 1l)), numBuckets = 101l)
res.head.stats.points.map(_._2) must_=== expected.points.map(_._2)
}
"correctly runs end to end SequenceStats.percentile on increasing 10 bucket dataset" in {
case class DataP(k: String, v: Int)
val dataRDD = sc.parallelize((1 until 11).flatMap(el => (0 until el).map(num => DataP(k = el.toString, v = 1))))
@transient implicit lazy val isc: SparkContext = sc
val res = SequenceStats.percentile[DataP, String, Long](
data = dataRDD,
toDrillDownKeyOption = None,
toDimKey = _.k,
toVal = _ => 1l,
toStats = identity,
reduceFunc = _ + _,
numPercentiles = 10
)
println(PrettyPercentileStats(name = "", levels = res).toHumanReadable)
val expected = PercentileStats(
points = (1 until 11).toList.map(e => (e.toString, e.toLong)),
numBuckets = 10l
)
res.head.stats must_=== expected
res.head.stats.points.map(_._2) must_=== expected.points.map(_._2)
}
}
}
| vicpara/exploratory-data-analysis | src/test/scala/com/github/vicpara/eda/StatsSpec.scala | Scala | mit | 8,721 |
package org.infinispan.spark.suites
import org.infinispan.client.hotrod.Search
import org.infinispan.spark.domain._
import org.infinispan.spark.test._
import org.scalatest.{DoNotDiscover, FunSuite, Matchers}
@DoNotDiscover
class FilterByQueryProtoAnnotationSuite extends FunSuite with RunnersCache with Spark with MultipleServers with Matchers {
override protected def getNumEntries: Int = 100
override def getCacheType: CacheType.Value = CacheType.REPLICATED
override def getConfiguration = {
val configuration = super.getConfiguration
configuration.addProtoAnnotatedClass(classOf[Runner])
configuration.setAutoRegisterProto()
configuration
}
test("Filter by Query") {
val query = Search.getQueryFactory(remoteCacheManager.getCache(getCacheName)).from(classOf[Runner])
.having("finishTimeSeconds").between(4000, 4500).build
val rdd = createInfinispanRDD[Int, Runner].filterByQuery[Runner](query)
rdd.count shouldBe query.getResultSize
rdd.first()._2.getFinishTimeSeconds should be(4000 +- 4500)
}
test("Filter by Query String") {
val ickleQuery = "FROM runner WHERE finishTimeSeconds BETWEEN 4000 AND 4500"
val rdd = createInfinispanRDD[Int, Runner].filterByQuery[Runner](ickleQuery)
rdd.count shouldBe Search.getQueryFactory(remoteCacheManager.getCache(getCacheName)).create(ickleQuery).list().size()
rdd.first()._2.getFinishTimeSeconds should be(4000 +- 4500)
}
test("Filter by Query with projections") {
val query = Search.getQueryFactory(remoteCacheManager.getCache(getCacheName)).from(classOf[Runner]).select("name", "age").having("finished").equal(true)
.build()
val rdd = createInfinispanRDD[Int, Runner].filterByQuery[Array[AnyRef]](query)
val first = rdd.values.collect().head
first(0).getClass shouldBe classOf[String]
first(1).getClass shouldBe classOf[Integer]
rdd.count shouldBe query.getResultSize
}
}
| galderz/infinispan-spark | src/test/scala/org/infinispan/spark/suites/FilterByQueryProtoAnnotationSuite.scala | Scala | apache-2.0 | 1,990 |
package uk.gov.dvla.vehicles.presentation.common.pages
import org.openqa.selenium.{WebDriver}
import org.scalatest.selenium.WebBrowser.Element
import org.scalatest.selenium.WebBrowser.find
import org.scalatest.selenium.WebBrowser.id
import uk.gov.dvla.vehicles.presentation.common.helpers.webbrowser.{Page, WebDriverFactory}
import uk.gov.dvla.vehicles.presentation.common.models.AddressPickerModel
import uk.gov.dvla.vehicles.presentation.common.views.widgetdriver.AddressPickerDriver
object AddressPickerPage extends Page {
final val address = "/address-picker"
override val url: String = WebDriverFactory.testUrl + address.substring(1)
final override val title: String = "Address Picker"
val jsTestUrl = WebDriverFactory.testUrl + "jstest" + address
val addressPickerDriver = new AddressPickerDriver(AddressPickerModel.Form.datePicker1Id)
def submit(implicit driver: WebDriver): Element = find(id("submit")).get
}
| dvla/vehicles-presentation-common | common-test/test/uk/gov/dvla/vehicles/presentation/common/pages/AddressPickerPage.scala | Scala | mit | 935 |
package com.twitter.finagle.stats
/**
* Interface used via the LoadService mechanism to obtain a stats
* registry used in MetricsRegistry. This avoids MetricsHandler,
* (which uses a stats registry), in twitter-server, having a
* dependency on finagle-stats.
*/
private[twitter] trait StatsRegistry {
def getStats(): Map[String, StatEntry]
}
/**
* Interface to allow MetricsHandler (in twitter-server) to
* use metrics from MetricsRegistry (in finagle-stats)
* without twitter-server having a dependency on finagle-stats.
*/
private[twitter] trait StatEntry {
val value: Double
val totalValue: Double
}
| yancl/finagle-6.22.0 | finagle-core/src/main/scala/com/twitter/finagle/stats/StatsRegistry.scala | Scala | apache-2.0 | 620 |
package is.hail.compatibility
import is.hail.HailContext
import is.hail.expr.JSONAnnotationImpex
import is.hail.expr.ir.ExecuteContext
import is.hail.types.encoded._
import is.hail.types.virtual._
import is.hail.io._
import is.hail.io.fs.FS
import is.hail.rvd.{AbstractRVDSpec, IndexSpec2, IndexedRVDSpec2, RVD, RVDPartitioner}
import is.hail.utils.{FastIndexedSeq, Interval}
import org.json4s.JValue
case class IndexSpec private(
relPath: String,
keyType: String,
annotationType: String,
offsetField: Option[String]
) {
val baseSpec = LEB128BufferSpec(
BlockingBufferSpec(32 * 1024,
LZ4BlockBufferSpec(32 * 1024,
new StreamBlockBufferSpec)))
val (keyVType, keyEType) = LegacyEncodedTypeParser.parseTypeAndEType(keyType)
val (annotationVType, annotationEType) = LegacyEncodedTypeParser.parseTypeAndEType(annotationType)
val leafEType = EBaseStruct(FastIndexedSeq(
EField("first_idx", EInt64Required, 0),
EField("keys", EArray(EBaseStruct(FastIndexedSeq(
EField("key", keyEType, 0),
EField("offset", EInt64Required, 1),
EField("annotation", annotationEType, 2)
), required = true), required = true), 1)
))
val leafVType = TStruct(FastIndexedSeq(
Field("first_idx", TInt64, 0),
Field("keys", TArray(TStruct(FastIndexedSeq(
Field("key", keyVType, 0),
Field("offset", TInt64, 1),
Field("annotation", annotationVType, 2)
))), 1)))
val internalNodeEType = EBaseStruct(FastIndexedSeq(
EField("children", EArray(EBaseStruct(FastIndexedSeq(
EField("index_file_offset", EInt64Required, 0),
EField("first_idx", EInt64Required, 1),
EField("first_key", keyEType, 2),
EField("first_record_offset", EInt64Required, 3),
EField("first_annotation", annotationEType, 4)
), required = true), required = true), 0)
))
val internalNodeVType = TStruct(FastIndexedSeq(
Field("children", TArray(TStruct(FastIndexedSeq(
Field("index_file_offset", TInt64, 0),
Field("first_idx", TInt64, 1),
Field("first_key", keyVType, 2),
Field("first_record_offset", TInt64, 3),
Field("first_annotation", annotationVType, 4)
))), 0)
))
val leafCodec: AbstractTypedCodecSpec = TypedCodecSpec(leafEType, leafVType, baseSpec)
val internalNodeCodec: AbstractTypedCodecSpec = TypedCodecSpec(internalNodeEType, internalNodeVType, baseSpec)
def toIndexSpec2: IndexSpec2 = IndexSpec2(
relPath, leafCodec, internalNodeCodec, keyVType, annotationVType, offsetField
)
}
case class PackCodecSpec private(child: BufferSpec)
case class LegacyRVDType(rowType: TStruct, rowEType: EType, key: IndexedSeq[String]) {
def keyType: TStruct = rowType.select(key)._1
}
trait ShimRVDSpec extends AbstractRVDSpec {
val shim: AbstractRVDSpec
final def key: IndexedSeq[String] = shim.key
override def partitioner: RVDPartitioner = shim.partitioner
override def read(
ctx: ExecuteContext,
path: String,
requestedType: TStruct,
newPartitioner: Option[RVDPartitioner],
filterIntervals: Boolean
): RVD = shim.read(ctx, path, requestedType, newPartitioner, filterIntervals)
override def typedCodecSpec: AbstractTypedCodecSpec = shim.typedCodecSpec
override def partFiles: Array[String] = shim.partFiles
override lazy val indexed: Boolean = shim.indexed
lazy val attrs: Map[String, String] = shim.attrs
}
case class IndexedRVDSpec private(
rvdType: String,
codecSpec: PackCodecSpec,
indexSpec: IndexSpec,
override val partFiles: Array[String],
jRangeBounds: JValue
) extends ShimRVDSpec {
private val lRvdType = LegacyEncodedTypeParser.parseLegacyRVDType(rvdType)
lazy val shim = IndexedRVDSpec2(lRvdType.key,
TypedCodecSpec(lRvdType.rowEType.setRequired(true), lRvdType.rowType, codecSpec.child),
indexSpec.toIndexSpec2, partFiles, jRangeBounds, Map.empty[String, String])
}
case class UnpartitionedRVDSpec private(
rowType: String,
codecSpec: PackCodecSpec,
partFiles: Array[String]
) extends AbstractRVDSpec {
private val (rowVType: TStruct, rowEType) = LegacyEncodedTypeParser.parseTypeAndEType(rowType)
def partitioner: RVDPartitioner = RVDPartitioner.unkeyed(partFiles.length)
def key: IndexedSeq[String] = FastIndexedSeq()
def typedCodecSpec: AbstractTypedCodecSpec = TypedCodecSpec(rowEType.setRequired(true), rowVType, codecSpec.child)
val attrs: Map[String, String] = Map.empty
}
case class OrderedRVDSpec private(
rvdType: String,
codecSpec: PackCodecSpec,
partFiles: Array[String],
jRangeBounds: JValue
) extends AbstractRVDSpec {
private val lRvdType = LegacyEncodedTypeParser.parseLegacyRVDType(rvdType)
def key: IndexedSeq[String] = lRvdType.key
def partitioner: RVDPartitioner = {
val rangeBoundsType = TArray(TInterval(lRvdType.keyType))
new RVDPartitioner(lRvdType.keyType,
JSONAnnotationImpex.importAnnotation(jRangeBounds, rangeBoundsType, padNulls = false).asInstanceOf[IndexedSeq[Interval]])
}
override def typedCodecSpec: AbstractTypedCodecSpec = TypedCodecSpec(lRvdType.rowEType.setRequired(true), lRvdType.rowType, codecSpec.child)
val attrs: Map[String, String] = Map.empty
}
| danking/hail | hail/src/main/scala/is/hail/compatibility/LegacyRVDSpecs.scala | Scala | mit | 5,166 |
import scala.tools.partest.ReplTest
import scala.tools.util.Javap
object Test extends ReplTest {
// ugh, windows
def expectedOutput =
"""Type in expressions to have them evaluated.
Type :help for more information.
scala>
scala> object Bippy { class Dingus ; object Bop }
defined module Bippy
scala> :javap Bippy.Dingus
Compiled from "<console>"public class Bippy$Dingus extends java.lang.Object implements scala.ScalaObject{ public Bippy$Dingus();}
scala> :javap Bippy.Bop
Compiled from "<console>"public final class Bippy$Bop$ extends java.lang.Object implements scala.ScalaObject{ public static final Bippy$Bop$ MODULE$; public static {}; public Bippy$Bop$();}
scala>
scala>
"""
override def eval() =
if (Javap.isAvailable()) super.eval()
else expectedOutput.lines
def code = """
|object Bippy { class Dingus ; object Bop }
|:javap Bippy.Dingus
|:javap Bippy.Bop
""".stripMargin
}
| felixmulder/scala | test/disabled/run/t4532.scala | Scala | bsd-3-clause | 939 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.concurrent.{CountDownLatch, TimeoutException, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import scala.concurrent.duration._
import org.scalatest.concurrent.{ThreadSignaler, TimeLimits}
import org.apache.spark.SparkFunSuite
class KeyLockSuite extends SparkFunSuite with TimeLimits {
// Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
private implicit val defaultSignaler = ThreadSignaler
private val foreverMs = 60 * 1000L
test("The same key should wait when its lock is held") {
val keyLock = new KeyLock[Object]
val numThreads = 10
// Create different objects that are equal
val keys = List.fill(numThreads)(List(1))
require(keys.tail.forall(_ ne keys.head) && keys.tail.forall(_ == keys.head))
// A latch to make `withLock` be called almost at the same time
val latch = new CountDownLatch(1)
// Track how many threads get the lock at the same time
val numThreadsHoldingLock = new AtomicInteger(0)
// Track how many functions get called
val numFuncCalled = new AtomicInteger(0)
@volatile var e: Throwable = null
val threads = (0 until numThreads).map { i =>
new Thread() {
override def run(): Unit = {
latch.await(foreverMs, TimeUnit.MILLISECONDS)
keyLock.withLock(keys(i)) {
var cur = numThreadsHoldingLock.get()
if (cur != 0) {
e = new AssertionError(s"numThreadsHoldingLock is not 0: $cur")
}
cur = numThreadsHoldingLock.incrementAndGet()
if (cur != 1) {
e = new AssertionError(s"numThreadsHoldingLock is not 1: $cur")
}
cur = numThreadsHoldingLock.decrementAndGet()
if (cur != 0) {
e = new AssertionError(s"numThreadsHoldingLock is not 0: $cur")
}
numFuncCalled.incrementAndGet()
}
}
}
}
threads.foreach(_.start())
latch.countDown()
threads.foreach(_.join())
if (e != null) {
throw e
}
assert(numFuncCalled.get === numThreads)
}
test("A different key should not be locked") {
val keyLock = new KeyLock[Object]
val k1 = new Object
val k2 = new Object
// Start a thread to hold the lock for `k1` forever
val latch = new CountDownLatch(1)
val t = new Thread() {
override def run(): Unit = try {
keyLock.withLock(k1) {
latch.countDown()
Thread.sleep(foreverMs)
}
} catch {
case _: InterruptedException => // Ignore it as it's the exit signal
}
}
t.start()
try {
// Wait until the thread gets the lock for `k1`
if (!latch.await(foreverMs, TimeUnit.MILLISECONDS)) {
throw new TimeoutException("thread didn't get the lock")
}
var funcCalled = false
// Verify we can acquire the lock for `k2` and call `func`
failAfter(foreverMs.millis) {
keyLock.withLock(k2) {
funcCalled = true
}
}
assert(funcCalled, "func is not called")
} finally {
t.interrupt()
t.join()
}
}
}
| hvanhovell/spark | core/src/test/scala/org/apache/spark/util/KeyLockSuite.scala | Scala | apache-2.0 | 3,995 |
package com.ecfront.fs.operation
import com.jcraft.jsch.JSch
import org.apache.commons.net.ftp.FTPReply
import scala.collection.mutable.ArrayBuffer
class SFTPOperation(host: String, port: Int, userName: String, password: String) extends FSOperation {
private val jsch = new JSch()
private val session = jsch.getSession(userName, host, port)
if (password != null) {
session.setPassword(password)
}
session.connect()
private val channel = session.openChannel("sftp")
channel.connect()
def getSFTPClient = {
channel
}
/**
* 关闭SFTP连接
*
*/
def close() {
if (channel != null) {
channel.disconnect()
}
if (session != null) {
session.disconnect()
}
}
private def formatPath(path: String): String = {
if (path.startsWith("/")) path.substring(1) else path
}
override protected def _isFile(path: String): Boolean = ???
override protected def separator: String = "/"
override protected def _createDir(path: String): Boolean = ???
override protected def _existDir(path: String): Boolean = ???
override protected def _deleteDir(path: String): Boolean = ???
override protected def _seekDir(path: String): Array[FileInfo] = ???
override protected def _existFile(path: String): Boolean = ???
override protected def _moveDir(sourcePath: String, targetPath: String): Boolean = ???
override protected def _seekFile(path: String): FileInfo = ???
override protected def _moveFile(sourcePath: String, targetPath: String): Boolean = ???
override protected def _copyFile(sourcePath: String, targetPath: String): Boolean = ???
override protected def _deleteFile(path: String): Boolean = ???
}
object SFTPOperation {
def apply(host: String, port: Int, userName: String, password: String) = new SFTPOperation(host, port, userName, password)
}
| gudaoxuri/ez-fs | src/main/scala/com/ecfront/fs/operation/SFTPOperation.scala | Scala | apache-2.0 | 1,851 |
package at.logic.gapt.proofs.reduction
import at.logic.gapt.expr._
import at.logic.gapt.formats.babel.BabelSignature
import at.logic.gapt.proofs._
import at.logic.gapt.proofs.expansion.{ ETAtom, ETWeakQuantifier, ExpansionProof }
import at.logic.gapt.proofs.resolution.{ Input, MguResolution, eliminateSplitting }
import at.logic.gapt.provers.escargot.Escargot
import at.logic.gapt.provers.smtlib.Z3
import at.logic.gapt.utils.SatMatchers
import org.specs2.mutable._
class ErasureReductionTest extends Specification with SatMatchers {
"two-sorted" in {
implicit var ctx = Context()
ctx += Context.InductiveType( "nat", hoc"0: nat", hoc"s: nat>nat" )
ctx += Context.Sort( "witness" )
ctx += hoc"f: witness > witness"
ctx += hoc"P: nat > witness > o"
ctx += hoc"Q: nat > o"
val red = new ErasureReductionHelper( ctx.constants.toSet )
val c1 = Clause() :+ hoa"P 0 y"
val c2 = hoa"P x (f y)" +: Clause() :+ hoa"P (s x) y"
val c3 = hoa"P x y" +: Clause() :+ hoa"Q x"
val c4 = hoa"Q (s (s (s (s 0))))" +: Clause()
val Seq( ec1, ec2, ec3, ec4 ) = Seq( c1, c2, c3, c4 ) map { red.forward }
val p1 = Input( ec2 )
val p2 = MguResolution( p1, Suc( 0 ), p1, Ant( 0 ) )
val p3 = MguResolution( p2, Suc( 0 ), p2, Ant( 0 ) )
val p4 = MguResolution( Input( ec1 ), Suc( 0 ), p3, Ant( 0 ) )
val p5 = MguResolution( Input( ec3 ), Suc( 0 ), Input( ec4 ), Ant( 0 ) )
val p6 = MguResolution( p4, Suc( 0 ), p5, Ant( 0 ) )
p6.conclusion must_== Clause()
val reifiedProof = red.back( p6, Set( c1, c2, c3, c4 ) )
reifiedProof.conclusion must_== Clause()
}
"variables as weak quantifier instances" in {
implicit var ctx = Context()
ctx += Context.Sort( "foo" )
ctx += hoc"P: foo>o"
val sequent = hof"∀x P x" +: Sequent() :+ hof"∃x P x"
val red = new ErasureReductionHelper( ctx.constants.toSet )
val deepAtom = red.forward( hof"P z", Map( hov"z: foo" -> FOLVar( "z" ) ) ).asInstanceOf[FOLAtom]
val firstOrderEP =
ExpansionProof(
ETWeakQuantifier(
red.forward( hof"∀x P x", Map() ),
Map( FOLVar( "z" ) -> ETAtom( deepAtom, Polarity.InAntecedent ) )
) +:
Sequent()
:+ ETWeakQuantifier(
red.forward( hof"∃x P x", Map() ),
Map( FOLVar( "z" ) -> ETAtom( deepAtom, Polarity.InSuccedent ) )
)
)
red.back( firstOrderEP, sequent ).deep must beValidSequent
}
}
class ReductionTest extends Specification {
"many-sorted lambda" in {
val sequent = hos"∀f P(f) = f(c: nat) :- P(λx h(h(x))) = h(h(c))"
"resolution" in {
val reduction =
LambdaEliminationReductionRes() |>
HOFunctionReductionRes() |>
CNFReductionResRes |>
// PredicateReductionCNF |>
ErasureReductionCNF
val ( folCNF, back ) = reduction.forward( sequent )
val Some( folProof ) = Escargot.getResolutionProof( folCNF )
val proof = back( eliminateSplitting( folProof ) )
proof.subProofs foreach {
case Input( Sequent( Seq( conj ), Seq() ) ) => conj must_== sequent.succedent.head
case Input( Sequent( Seq(), Seq( axiom ) ) ) => axiom must_== sequent.antecedent.head
case Input( _ ) => ko
case _ => ok
}
ok
}
"expansion" in {
val reduction =
LambdaEliminationReductionET() |>
HOFunctionReductionET() |>
// PredicateReductionET |>
ErasureReductionET
val ( folSequent, back ) = reduction.forward( sequent )
val Some( folProof ) = Escargot.getExpansionProof( folSequent )
val proof = back( folProof )
proof.shallow must_== sequent
val reductionForChecking =
LambdaEliminationReduction() |>
HOFunctionReduction()
val ( tffDeep, _ ) = reductionForChecking.forward( proof.deep )
Escargot isValid tffDeep must_== true
val z3WithQuantifiers = new Z3( "UF" )
if ( !z3WithQuantifiers.isInstalled ) skipped
z3WithQuantifiers.isValid( tffDeep ) must_== true
}
}
}
| gebner/gapt | tests/src/test/scala/at/logic/gapt/proofs/reduction/ErasureReductionTest.scala | Scala | gpl-3.0 | 4,170 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.socko.client
import java.io.{BufferedReader, InputStreamReader, DataOutputStream}
import java.net.{URL, HttpURLConnection}
import akka.pattern.ask
import akka.pattern.pipe
import akka.actor.Props
import akka.util.Timeout
import com.webtrends.harness.app.HActor
import scala.concurrent.{Promise, Future}
import scala.concurrent.duration.Duration
import scala.util.{Success, Failure}
import scala.collection.JavaConverters._
/**
* @author Michael Cuthbert on 1/30/15.
*/
object CoreSockoClient {
def props: Props = Props[CoreSockoClient]
}
class CoreSockoClient extends HActor {
import context.dispatcher
/**
* Will route messages to correct behavior, mainly will go to service
*/
override def receive = super.receive orElse {
case HttpGet(config, path, headers) => service(config, HttpConstants.GET, path, None, headers)
case HttpPost(config, path, body, headers) => service(config, HttpConstants.POST, path, Some(body), headers)
case HttpPut(config, path, body, headers) => service(config, HttpConstants.PUT, path, Some(body), headers)
case HttpDelete(config, path, headers) => service(config, HttpConstants.DELETE, path, None, headers)
case HttpOptions(config, path, body, headers) => service(config, HttpConstants.OPTIONS, path, Some(body), headers)
case HttpPatch(config, path, body, headers) => service(config, HttpConstants.PATCH, path, Some(body), headers)
case HttpPing(config, timeout, path) => pipe(ping(config, timeout, path)) to sender
}
private def getHeaderFields(conn:HttpURLConnection) : Map[String, String] = {
conn.getHeaderFields.asScala map {
case (x, y) => x -> y.asScala.mkString(",")
} toMap
}
def service[T:Manifest](config:HttpConfig, method:String, path:String, body:Option[T], headers:Map[String, String]) = {
val urlConnection = new URL(config.fullPath(path)).openConnection().asInstanceOf[HttpURLConnection]
urlConnection.setRequestMethod(method)
headers foreach {
case (name, value) => urlConnection.setRequestProperty(name, value)
}
method match {
case HttpConstants.POST | HttpConstants.PUT =>
body match {
case Some(v) =>
urlConnection.setDoOutput(true)
val wr = new DataOutputStream(urlConnection.getOutputStream)
// TODO marshall this
wr.writeBytes(v.toString)
wr.flush()
wr.close()
case None => // ignore
}
case _ => // don't have to do anything special with the other ones
}
try {
urlConnection.connect()
val responseCode = urlConnection.getResponseCode
val response = try {
val in = new BufferedReader(new InputStreamReader(urlConnection.getInputStream))
var inputLine: String = null
val response = new StringBuffer()
while ((inputLine = in.readLine()) != null) {
response.append(inputLine)
}
in.close()
response.toString.getBytes
} catch {
case t: Throwable =>
log.debug(t, t.getMessage)
new Array[Byte](0)
}
sender ! HttpResp(response, getHeaderFields(urlConnection), responseCode)
} finally {
urlConnection.disconnect()
}
}
def ping(config:HttpConfig, timeoutValue:Int=1, pingPath:String="ping") : Future[ConnectionStatus] = {
implicit val timeout = Timeout(Duration(timeoutValue, "seconds"))
val p = Promise[ConnectionStatus]()
try {
val f = (self ? HttpGet(config, pingPath))(timeout)
f onComplete {
case Failure(f) =>
p success ConnectionStatus(false, "Could not connect to " + config.fullPath("ping") + " responded with exception " + f.getMessage)
case Success(s) =>
s.asInstanceOf[HttpResp].statusCode match {
case 200 => p success ConnectionStatus(true, "Connected successfully to " + config.fullPath(pingPath))
case _ => p success ConnectionStatus(false, "Could not connect to " + config.fullPath(pingPath))
}
}
} catch {
case e:Throwable =>
log.debug("Could not connect to " + config.fullPath(pingPath) + " responded with error " + e.getMessage)
p success ConnectionStatus(false, "Could not connect to " + config.fullPath(pingPath) + " responded with error " + e.getMessage)
}
p.future
}
}
| Webtrends/wookiee-socko | src/main/scala/com/webtrends/harness/component/socko/client/CoreSockoClient.scala | Scala | apache-2.0 | 5,133 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.config
import java.util.Locale
import java.util.concurrent.TimeUnit
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.util.SparkConfWithEnv
class ConfigEntrySuite extends SparkFunSuite {
private val PREFIX = "spark.ConfigEntrySuite"
private def testKey(name: String): String = s"$PREFIX.$name"
test("conf entry: int") {
val conf = new SparkConf()
val iConf = ConfigBuilder(testKey("int")).intConf.createWithDefault(1)
assert(conf.get(iConf) === 1)
conf.set(iConf, 2)
assert(conf.get(iConf) === 2)
}
test("conf entry: long") {
val conf = new SparkConf()
val lConf = ConfigBuilder(testKey("long")).longConf.createWithDefault(0L)
conf.set(lConf, 1234L)
assert(conf.get(lConf) === 1234L)
}
test("conf entry: double") {
val conf = new SparkConf()
val dConf = ConfigBuilder(testKey("double")).doubleConf.createWithDefault(0.0)
conf.set(dConf, 20.0)
assert(conf.get(dConf) === 20.0)
}
test("conf entry: boolean") {
val conf = new SparkConf()
val bConf = ConfigBuilder(testKey("boolean")).booleanConf.createWithDefault(false)
assert(!conf.get(bConf))
conf.set(bConf, true)
assert(conf.get(bConf))
}
test("conf entry: optional") {
val conf = new SparkConf()
val optionalConf = ConfigBuilder(testKey("optional")).intConf.createOptional
assert(conf.get(optionalConf) === None)
conf.set(optionalConf, 1)
assert(conf.get(optionalConf) === Some(1))
}
test("conf entry: fallback") {
val conf = new SparkConf()
val parentConf = ConfigBuilder(testKey("parent1")).intConf.createWithDefault(1)
val confWithFallback = ConfigBuilder(testKey("fallback1")).fallbackConf(parentConf)
assert(conf.get(confWithFallback) === 1)
conf.set(confWithFallback, 2)
assert(conf.get(parentConf) === 1)
assert(conf.get(confWithFallback) === 2)
}
test("conf entry: time") {
val conf = new SparkConf()
val time = ConfigBuilder(testKey("time")).timeConf(TimeUnit.SECONDS)
.createWithDefaultString("1h")
assert(conf.get(time) === 3600L)
conf.set(time.key, "1m")
assert(conf.get(time) === 60L)
}
test("conf entry: bytes") {
val conf = new SparkConf()
val bytes = ConfigBuilder(testKey("bytes")).bytesConf(ByteUnit.KiB)
.createWithDefaultString("1m")
assert(conf.get(bytes) === 1024L)
conf.set(bytes.key, "1k")
assert(conf.get(bytes) === 1L)
}
test("conf entry: regex") {
val conf = new SparkConf()
val rConf = ConfigBuilder(testKey("regex")).regexConf.createWithDefault(".*".r)
conf.set(rConf, "[0-9a-f]{8}".r)
assert(conf.get(rConf).toString === "[0-9a-f]{8}")
conf.set(rConf.key, "[0-9a-f]{4}")
assert(conf.get(rConf).toString === "[0-9a-f]{4}")
conf.set(rConf.key, "[.")
val e = intercept[IllegalArgumentException](conf.get(rConf))
assert(e.getMessage.contains("regex should be a regex, but was"))
}
test("conf entry: string seq") {
val conf = new SparkConf()
val seq = ConfigBuilder(testKey("seq")).stringConf.toSequence.createWithDefault(Seq())
conf.set(seq.key, "1,,2, 3 , , 4")
assert(conf.get(seq) === Seq("1", "2", "3", "4"))
conf.set(seq, Seq("1", "2"))
assert(conf.get(seq) === Seq("1", "2"))
}
test("conf entry: int seq") {
val conf = new SparkConf()
val seq = ConfigBuilder(testKey("intSeq")).intConf.toSequence.createWithDefault(Seq())
conf.set(seq.key, "1,,2, 3 , , 4")
assert(conf.get(seq) === Seq(1, 2, 3, 4))
conf.set(seq, Seq(1, 2))
assert(conf.get(seq) === Seq(1, 2))
}
test("conf entry: transformation") {
val conf = new SparkConf()
val transformationConf = ConfigBuilder(testKey("transformation"))
.stringConf
.transform(_.toLowerCase(Locale.ROOT))
.createWithDefault("FOO")
assert(conf.get(transformationConf) === "foo")
conf.set(transformationConf, "BAR")
assert(conf.get(transformationConf) === "bar")
}
test("conf entry: checkValue()") {
def createEntry(default: Int): ConfigEntry[Int] =
ConfigBuilder(testKey("checkValue"))
.intConf
.checkValue(value => value >= 0, "value must be non-negative")
.createWithDefault(default)
val conf = new SparkConf()
val entry = createEntry(10)
conf.set(entry, -1)
val e1 = intercept[IllegalArgumentException] {
conf.get(entry)
}
assert(e1.getMessage == "value must be non-negative")
val e2 = intercept[IllegalArgumentException] {
createEntry(-1)
}
assert(e2.getMessage == "value must be non-negative")
}
test("conf entry: valid values check") {
val conf = new SparkConf()
val enum = ConfigBuilder(testKey("enum"))
.stringConf
.checkValues(Set("a", "b", "c"))
.createWithDefault("a")
assert(conf.get(enum) === "a")
conf.set(enum, "b")
assert(conf.get(enum) === "b")
conf.set(enum, "d")
val enumError = intercept[IllegalArgumentException] {
conf.get(enum)
}
assert(enumError.getMessage === s"The value of ${enum.key} should be one of a, b, c, but was d")
}
test("conf entry: conversion error") {
val conf = new SparkConf()
val conversionTest = ConfigBuilder(testKey("conversionTest")).doubleConf.createOptional
conf.set(conversionTest.key, "abc")
val conversionError = intercept[IllegalArgumentException] {
conf.get(conversionTest)
}
assert(conversionError.getMessage === s"${conversionTest.key} should be double, but was abc")
}
test("default value handling is null-safe") {
val conf = new SparkConf()
val stringConf = ConfigBuilder(testKey("string")).stringConf.createWithDefault(null)
assert(conf.get(stringConf) === null)
}
test("variable expansion of spark config entries") {
val env = Map("ENV1" -> "env1")
val conf = new SparkConfWithEnv(env)
val stringConf = ConfigBuilder(testKey("stringForExpansion"))
.stringConf
.createWithDefault("string1")
val optionalConf = ConfigBuilder(testKey("optionForExpansion"))
.stringConf
.createOptional
val intConf = ConfigBuilder(testKey("intForExpansion"))
.intConf
.createWithDefault(42)
val fallbackConf = ConfigBuilder(testKey("fallbackForExpansion"))
.fallbackConf(intConf)
val refConf = ConfigBuilder(testKey("configReferenceTest"))
.stringConf
.createWithDefault(null)
def ref(entry: ConfigEntry[_]): String = "${" + entry.key + "}"
def testEntryRef(entry: ConfigEntry[_], expected: String): Unit = {
conf.set(refConf, ref(entry))
assert(conf.get(refConf) === expected)
}
testEntryRef(stringConf, "string1")
testEntryRef(intConf, "42")
testEntryRef(fallbackConf, "42")
testEntryRef(optionalConf, ref(optionalConf))
conf.set(optionalConf, ref(stringConf))
testEntryRef(optionalConf, "string1")
conf.set(optionalConf, ref(fallbackConf))
testEntryRef(optionalConf, "42")
// Default string values with variable references.
val parameterizedStringConf = ConfigBuilder(testKey("stringWithParams"))
.stringConf
.createWithDefault(ref(stringConf))
assert(conf.get(parameterizedStringConf) === conf.get(stringConf))
// Make sure SparkConf's env override works.
conf.set(refConf, "${env:ENV1}")
assert(conf.get(refConf) === env("ENV1"))
// Conf with null default value is not expanded.
val nullConf = ConfigBuilder(testKey("nullString"))
.stringConf
.createWithDefault(null)
testEntryRef(nullConf, ref(nullConf))
}
test("conf entry : default function") {
var data = 0
val conf = new SparkConf()
val iConf = ConfigBuilder(testKey("intval")).intConf.createWithDefaultFunction(() => data)
assert(conf.get(iConf) === 0)
data = 2
assert(conf.get(iConf) === 2)
}
test("conf entry: alternative keys") {
val conf = new SparkConf()
val iConf = ConfigBuilder(testKey("a"))
.withAlternative(testKey("b"))
.withAlternative(testKey("c"))
.intConf.createWithDefault(0)
// no key is set, return default value.
assert(conf.get(iConf) === 0)
// the primary key is set, the alternative keys are not set, return the value of primary key.
conf.set(testKey("a"), "1")
assert(conf.get(iConf) === 1)
// the primary key and alternative keys are all set, return the value of primary key.
conf.set(testKey("b"), "2")
conf.set(testKey("c"), "3")
assert(conf.get(iConf) === 1)
// the primary key is not set, (some of) the alternative keys are set, return the value of the
// first alternative key that is set.
conf.remove(testKey("a"))
assert(conf.get(iConf) === 2)
conf.remove(testKey("b"))
assert(conf.get(iConf) === 3)
}
test("conf entry: prepend with default separator") {
val conf = new SparkConf()
val prependedKey = testKey("prepended1")
val prependedConf = ConfigBuilder(prependedKey).stringConf.createOptional
val derivedConf = ConfigBuilder(testKey("prepend1"))
.withPrepended(prependedKey)
.stringConf
.createOptional
conf.set(derivedConf, "1")
assert(conf.get(derivedConf) === Some("1"))
conf.set(prependedConf, "2")
assert(conf.get(derivedConf) === Some("2 1"))
}
test("conf entry: prepend with custom separator") {
val conf = new SparkConf()
val prependedKey = testKey("prepended2")
val prependedConf = ConfigBuilder(prependedKey).stringConf.createOptional
val derivedConf = ConfigBuilder(testKey("prepend2"))
.withPrepended(prependedKey, ",")
.stringConf
.createOptional
conf.set(derivedConf, "1")
assert(conf.get(derivedConf) === Some("1"))
conf.set(prependedConf, "2")
assert(conf.get(derivedConf) === Some("2,1"))
}
test("conf entry: prepend with fallback") {
val conf = new SparkConf()
val prependedKey = testKey("prepended3")
val prependedConf = ConfigBuilder(prependedKey).stringConf.createOptional
val derivedConf = ConfigBuilder(testKey("prepend3"))
.withPrepended(prependedKey)
.stringConf
.createOptional
val confWithFallback = ConfigBuilder(testKey("fallback2")).fallbackConf(derivedConf)
assert(conf.get(confWithFallback) === None)
conf.set(derivedConf, "1")
assert(conf.get(confWithFallback) === Some("1"))
conf.set(prependedConf, "2")
assert(conf.get(confWithFallback) === Some("2 1"))
conf.set(confWithFallback, Some("3"))
assert(conf.get(confWithFallback) === Some("3"))
}
test("conf entry: prepend should work only with string type") {
var i = 0
def testPrependFail(createConf: (String, String) => Unit): Unit = {
intercept[IllegalArgumentException] {
createConf(testKey(s"prependedFail$i"), testKey(s"prependFail$i"))
}.getMessage.contains("type must be string if prepend used")
i += 1
}
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).intConf
)
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).longConf
)
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).doubleConf
)
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).booleanConf
)
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).timeConf(TimeUnit.MILLISECONDS)
)
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).bytesConf(ByteUnit.BYTE)
)
testPrependFail( (prependedKey, prependKey) =>
ConfigBuilder(testKey(prependKey)).withPrepended(prependedKey).regexConf
)
}
test("onCreate") {
var onCreateCalled = false
ConfigBuilder(testKey("oc1")).onCreate(_ => onCreateCalled = true).intConf.createWithDefault(1)
assert(onCreateCalled)
onCreateCalled = false
ConfigBuilder(testKey("oc2")).onCreate(_ => onCreateCalled = true).intConf.createOptional
assert(onCreateCalled)
onCreateCalled = false
ConfigBuilder(testKey("oc3")).onCreate(_ => onCreateCalled = true).intConf
.createWithDefaultString("1.0")
assert(onCreateCalled)
val fallback = ConfigBuilder(testKey("oc4")).intConf.createWithDefault(1)
onCreateCalled = false
ConfigBuilder(testKey("oc5")).onCreate(_ => onCreateCalled = true).fallbackConf(fallback)
assert(onCreateCalled)
}
}
| pgandhi999/spark | core/src/test/scala/org/apache/spark/internal/config/ConfigEntrySuite.scala | Scala | apache-2.0 | 13,530 |
package dtos.report
import models.qlkh.Task
import play.api.libs.json.Json
/**
* The Class TaskDto.
*
* @author Nguyen Duc Dung
* @since 4/15/14 2:44 PM
*
*/
case class TaskDto(
id: Long,
name: String,
code: String,
donVi: String,
soLan: Option[Double] = None,
dinhMuc: Double,
hidden: Boolean,
children: List[TaskDto] = Nil
)
object TaskDto {
implicit val jsonFormat = Json.format[TaskDto]
} | SunriseSoftVN/sunerp | app/dtos/report/TaskDto.scala | Scala | apache-2.0 | 585 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.maven
import java.net.URLClassLoader
import javax.inject.{ Inject, Singleton }
import org.eclipse.aether.artifact.Artifact
/**
* Implements sharing of Scala classloaders, to save on memory
*/
@Singleton
class ScalaClassLoaderManager @Inject() (logger: MavenLoggerProxy) {
/**
* The list of Scala libraries. None of these libraries may have a dependency outside of this list, otherwise there
* will be classloading issues.
*
* Note that while adding more libraries to this list will allow more to be shared, it may also mean that classloaders
* can be shared in less cases, since it becomes less likely that there will be an exact match between two projects
* in what can be shared.
*/
private val ScalaLibs = Set(
"org.scala-lang" -> "scala-library",
"org.scala-lang" -> "scala-reflect",
"org.scala-lang.modules" -> "scala-xml",
"org.scala-lang.modules" -> "scala-parser-combinators",
"org.scala-lang.modules" -> "scala-java8-compat"
)
private val ScalaVersionPattern = "_\\\\d+\\\\.\\\\d+.*$".r
private def stripScalaVersion(artifactId: String) = ScalaVersionPattern.replaceFirstIn(artifactId, "")
private def createCacheKey(artifacts: Seq[Artifact]): String = {
artifacts.map { artifact =>
import artifact._
s"$getGroupId:$getArtifactId:$getVersion"
}.sorted.mkString(",")
}
private var cache = Map.empty[String, ClassLoader]
/**
* Extract a Scala ClassLoader from the given classpath.
*/
def extractScalaClassLoader(artifacts: Seq[Artifact]): ClassLoader = synchronized {
val scalaArtifacts = artifacts.filter { artifact =>
ScalaLibs.contains(artifact.getGroupId -> stripScalaVersion(artifact.getArtifactId))
}
val cacheKey = createCacheKey(scalaArtifacts)
cache.get(cacheKey) match {
case Some(classLoader) =>
logger.debug(s"ScalaClassLoader cache hit - $cacheKey")
classLoader
case None =>
logger.debug(s"ScalaClassLoader cache miss - $cacheKey")
val classLoader = new URLClassLoader(scalaArtifacts.map(_.getFile.toURI.toURL).toArray, null)
cache += (cacheKey -> classLoader)
classLoader
}
}
}
| rstento/lagom | dev/maven-plugin/src/main/scala/com/lightbend/lagom/maven/ScalaClassLoaderManager.scala | Scala | apache-2.0 | 2,286 |
package object functions {
def map[A, B](list: List[A])(fn: A => B): List[B] = list match {
case head :: tail => fn(head) :: map(tail)(fn)
case _ => Nil
}
def filter[A](list: List[A])(fn: A => Boolean): List[A] = list match {
case head :: tail =>
val rest = filter(tail)(fn)
if (fn(head))
head :: rest
else
rest
case _ => Nil
}
def foldLeft[A, B](list: List[A], acc: B)(fn: (B, A) => B): B = list match {
case head :: tail => foldLeft(tail, fn(acc, head))(fn)
case _ => acc
}
def partition[A](list: List[A])(fn: A => Boolean): (List[A], List[A]) =
(filter(list)(fn), filter(list)(!fn(_)))
} | helton-hcs/scala-sandbox | Sandbox/src/main/scala/functions/package.scala | Scala | mit | 685 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.mongodb.expression
import slamdata.Predef._
import quasar.physical.mongodb.Bson
import matryoshka._
import matryoshka.data.Fix
import scalaz._
trait ExprOpOps[IN[_]] {
/** Type to be emitted from algebras. */
// TODO: break out the members that use this parameter in a separate typeclass?
type OUT[_]
def simplify: AlgebraM[Option, IN, Fix[OUT]]
def bson: Algebra[IN, Bson]
def rebase[T](base: T)(implicit T: Recursive.Aux[T, OUT])
: TransformM[Option, T, IN, OUT]
def rewriteRefs0(applyVar: PartialFunction[DocVar, DocVar]): AlgebraM[Option, IN, Fix[OUT]]
final def rewriteRefs(applyVar: PartialFunction[DocVar, DocVar])(implicit I: IN :<: OUT): Algebra[IN, Fix[OUT]] = {
val r0 = rewriteRefs0(applyVar)
x => r0(x).getOrElse(Fix(I.inj(x)))
}
}
object ExprOpOps {
/** Useful in implementations, when you need to require an instance with a
* certain "output" type. */
type Aux[IN[_], F[_]] = ExprOpOps[IN] { type OUT[A] = F[A] }
/** For the typical use case where you want the in/out parameters to be the same. */
type Uni[F[_]] = Aux[F, F]
implicit def apply[F[_]](implicit ops: ExprOpOps.Aux[F, F]): ExprOpOps.Uni[F] = ops
implicit def coproduct[F[_], G[_], H[_]](implicit
F: ExprOpOps.Aux[F, H],
G: ExprOpOps.Aux[G, H])
: ExprOpOps.Aux[Coproduct[F, G, ?], H] =
new ExprOpOps[Coproduct[F, G, ?]] {
type OUT[A] = H[A]
override def simplify =
_.run.fold(F.simplify, G.simplify)
val bson: Algebra[Coproduct[F, G, ?], Bson] =
_.run.fold(F.bson(_), G.bson(_))
def rebase[T](base: T)(implicit T: Recursive.Aux[T, H]) =
_.run.fold(F.rebase(base), G.rebase(base))
override def rewriteRefs0(applyVar: PartialFunction[DocVar, DocVar]) = {
val rf = F.rewriteRefs0(applyVar)
val rg = G.rewriteRefs0(applyVar)
_.run.fold(rf, rg)
}
}
}
| drostron/quasar | mongodb/src/main/scala/quasar/physical/mongodb/expression/ExprOpOps.scala | Scala | apache-2.0 | 2,525 |
package io.citrine.lolo.trees.impurity
/**
* Calculate the weighted Gini Impurity: weight * (1 - \\sum_j f_j^2), where f_j is the frequency of the jth label
*
* @param totalCategoryWeights the total weight of each label
* @param totalSquareSum the sum of the squares of the weights
* @param totalWeight the total weight over all the labels
*/
class GiniCalculator(
totalCategoryWeights: Array[Double],
totalSquareSum: Double,
totalWeight: Double
) extends ImpurityCalculator[Char] {
def add(value: Char, weight: Double): Double = {
if (value > 0) {
val wl = leftCategoryWeights(value)
leftCategoryWeights(value) = wl + weight
leftSquareSum += weight * (weight + 2 * wl)
val wr = totalCategoryWeights(value) - wl
rightSquareSum += weight * (weight - 2 * wr)
leftWeight += weight
}
getImpurity
}
def remove(value: Char, weight: Double): Double = {
if (value > 0) {
val wl = leftCategoryWeights(value)
leftCategoryWeights(value) = wl - weight
leftSquareSum += weight * (weight - 2 * wl)
val wr = totalCategoryWeights(value) - wl
rightSquareSum += weight * (weight + 2 * wr)
leftWeight -= weight
}
getImpurity
}
def reset(): Unit = {
leftCategoryWeights.indices.foreach { i => leftCategoryWeights(i) = 0.0 }
leftWeight = 0.0
leftSquareSum = 0.0
rightSquareSum = totalSquareSum
}
def copy(): GiniCalculator = {
new GiniCalculator(totalCategoryWeights, totalSquareSum, totalWeight)
}
def getImpurity: Double = {
if (totalWeight == 0) {
0.0
} else if (leftSquareSum == 0 || rightSquareSum == 0) {
totalWeight - totalSquareSum / totalWeight
} else {
totalWeight - leftSquareSum / leftWeight - rightSquareSum / (totalWeight - leftWeight)
}
}
private val leftCategoryWeights = new Array[Double](totalCategoryWeights.size)
private var leftWeight: Double = 0.0
private var leftSquareSum: Double = 0.0
private var rightSquareSum: Double = totalSquareSum
}
/**
* Companion object
*/
object GiniCalculator {
/**
* Build a GiniCalculator from weighted data
*
* @param data to build the calculator for
* @return a GiniCalculator
*/
def build(data: Seq[(Char, Double)]): GiniCalculator = {
// Be sure to filter out missing labels, which are marked as 0.toChar
val totalCategoryWeights = data.filter(_._1 > 0).groupBy(_._1).mapValues(_.map(_._2).sum)
if (totalCategoryWeights.isEmpty) {
return new GiniCalculator(Array.empty[Double], 0.0, 0.0)
}
val weightsArray = new Array[Double](totalCategoryWeights.keySet.max + 1)
val totalSquareSum = totalCategoryWeights.map {
case (k, v) =>
weightsArray(k) = v
Math.pow(v, 2)
}.sum
val totalWeight = totalCategoryWeights.values.sum
new GiniCalculator(weightsArray, totalSquareSum, totalWeight)
}
}
| CitrineInformatics/lolo | src/main/scala/io/citrine/lolo/trees/impurity/GiniCalculator.scala | Scala | apache-2.0 | 2,945 |
package scrupal.html
import scalatags.generic.Util
/** Polymer Paper Elements
* Paper elements are a set of visual elements that implement Google's Material Design.
*/
trait PolymerPaperTags[Builder, Output <: FragT, FragT] extends Util[Builder, Output, FragT] {
/** Material design status message for elements */
lazy val `badge` = "paper-badge".tag
/** Common behaviors across the paper elements */
lazy val `behaviors` = "paper-behaviors".tag
/** Material design button */
lazy val `button` = "paper-button".tag
/** Material design piece of paper with unique related data */
lazy val `card` = "paper-card".tag
/** A material design checkbox */
lazy val `checkbox` = "paper-checkbox".tag
/** A Material Design dialog */
lazy val `dialog` = "paper-dialog".tag
/** Implements a behavior used for material design dialogs */
lazy val `dialog-behavior` = "paper-dialog-behavior".tag
/** A scrollable area used inside the material design dialog */
lazy val `dialog-scrollable` = "paper-dialog-scrollable".tag
/** A responsive drawer panel */
lazy val `drawer-panel` = "paper-drawer-panel".tag
/** An element that works similarly to a native browser select */
lazy val `dropdown-menu` = "paper-dropdown-menu".tag
/** A material design floating action button */
lazy val `fab` = "paper-fab".tag
/** A header and content wrapper for layout with headers */
lazy val `header-panel` = "paper-header-panel".tag
/** A material design icon button */
lazy val `icon-button` = "paper-icon-button".tag
/** Material design text fields */
lazy val `input` = "paper-input".tag
/** A material-design styled list item */
lazy val `item` = "paper-item".tag
/** Implements an accessible material design listbox */
lazy val `listbox` = "paper-listbox".tag
/** A material design container that looks like a lifted sheet of paper */
lazy val `material` = "paper-material".tag
/** Implements an accessible material design menu */
lazy val `menu` = "paper-menu".tag
/** A material design element that composes a trigger and a dropdown menu */
lazy val `menu-button` = "paper-menu-button".tag
/** A material design progress bar */
lazy val `progress` = "paper-progress".tag
/** A material design radio button */
lazy val `radio-button` = "paper-radio-button".tag
/** A group of material design radio buttons */
lazy val `radio-group` = "paper-radio-group".tag
/** Adds a material design ripple to any container */
lazy val `ripple` = "paper-ripple".tag
/** A header bar with scrolling behavior */
lazy val `scroll-header-panel` = "paper-scroll-header-panel".tag
/** A material design-style slider */
lazy val `slider` = "paper-slider".tag
/** A material design spinner */
lazy val `spinner` = "paper-spinner".tag
/** Common (global) styles for Material Design elements. */
lazy val `styles` = "paper-styles".tag
/** Material design tabs */
lazy val `tabs` = "paper-tabs".tag
/** A material design notification toast */
lazy val `toast` = "paper-toast".tag
/** A material design toggle button control */
lazy val `toggle-button` = "paper-toggle-button".tag
/** A material design toolbar that is easily customizable */
lazy val `toolbar` = "paper-toolbar".tag
/** Material design tooltip popup for content */
lazy val `tooltip` = "paper-tooltip".tag
}
| scrupal/scrupal-core | scrupal-shared/src/main/scala/scrupal/html/PolymerPaperTags.scala | Scala | apache-2.0 | 3,355 |
package com.esri
case class OnlineMu(var n: Int = 0, var mx: Double = 0.0, var my: Double = 0.0) {
def add(x: Double, y: Double) = {
n += 1
val dx = x - mx
mx += dx / n
val dy = y - my
my += dy / n
this
}
def deviations(xy: Iterable[(Double, Double)]) = {
xy.foldLeft((Array.empty[(Double, Double)], 0.0, 0.0, 0.0)) {
case ((arr, x2, y2, xy), (x, y)) => {
val dx = x - mx
val dy = y - my
(arr :+(dx, dy), x2 + dx * dx, y2 + dy * dy, xy + dx * dy)
}
}
}
}
| mraad/spark-std-dist | src/main/scala/com/esri/OnlineMu.scala | Scala | apache-2.0 | 537 |
// Copyright 2017 EPFL DATA Lab (data.epfl.ch)
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package squid
package utils
import scala.language.experimental.macros
/** Takes anything that looks like a tuple and makes it a List of the appropriate type.
* `asList` also acts on "pseudo-tuples" of 0 and 1 field: () and (x) */
object Tuple2List {
implicit def productAsList(tup: Product): List[Any] = macro asListImpl
implicit def asList(tup: Any): List[Any] = macro asListImpl
import scala.reflect.macros.whitebox
def asListImpl(c: whitebox.Context)(tup: c.Tree): c.Tree = {
import c.universe._
// Used to be:
/*
tup match {
case q"(..$defs)" => q"_root_.scala.List(..$defs)"
case _ => null
}
*/
// Converts, eg, `a -> b -> c` to `a :: b :: c :: Nil`
def rec(x: Tree): Tree = x match {
case q"scala.this.Predef.ArrowAssoc[$_]($a).->[$_]($b)" =>
val r = rec(b)
if (r == null) q"$a :: $b :: Nil" else q"$a :: $r"
case q"(..$defs)" => q"_root_.scala.List(..$defs)"
case _ => null
}
//c.warning(c.enclosingPosition, showCode(tup))
rec(tup) //and (x=>c.warning(c.enclosingPosition, showCode(x)))
}
}
| epfldata/squid | core/src/main/scala/squid/utils/Tuple2List.scala | Scala | apache-2.0 | 1,734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import java.math.MathContext
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import scala.collection.JavaConverters._
import org.apache.orc.storage.ql.io.sarg.{PredicateLeaf, SearchArgument}
import org.apache.spark.sql.{AnalysisException, Column, DataFrame}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.PhysicalOperation
import org.apache.spark.sql.execution.datasources.{DataSourceStrategy, HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.execution.datasources.v2.orc.OrcTable
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
/**
* A test suite that tests Apache ORC filter API based filter pushdown optimization.
* OrcFilterSuite and HiveOrcFilterSuite is logically duplicated to provide the same test coverage.
* The difference are the packages containing 'Predicate' and 'SearchArgument' classes.
* - OrcFilterSuite uses 'org.apache.orc.storage.ql.io.sarg' package.
* - HiveOrcFilterSuite uses 'org.apache.hadoop.hive.ql.io.sarg' package.
*/
class OrcFilterSuite extends OrcTest with SharedSQLContext {
protected def checkFilterPredicate(
df: DataFrame,
predicate: Predicate,
checker: (SearchArgument) => Unit): Unit = {
val output = predicate.collect { case a: Attribute => a }.distinct
val query = df
.select(output.map(e => Column(e)): _*)
.where(Column(predicate))
query.queryExecution.optimizedPlan match {
case PhysicalOperation(_, filters,
DataSourceV2Relation(orcTable: OrcTable, _, options)) =>
assert(filters.nonEmpty, "No filter is analyzed from the given query")
val scanBuilder = orcTable.newScanBuilder(options)
scanBuilder.pushFilters(filters.flatMap(DataSourceStrategy.translateFilter).toArray)
val pushedFilters = scanBuilder.pushedFilters()
assert(pushedFilters.nonEmpty, "No filter is pushed down")
val maybeFilter = OrcFilters.createFilter(query.schema, pushedFilters)
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pushedFilters")
checker(maybeFilter.get)
case _ =>
throw new AnalysisException("Can not match OrcTable in the query.")
}
}
protected def checkFilterPredicate
(predicate: Predicate, filterOperator: PredicateLeaf.Operator)
(implicit df: DataFrame): Unit = {
def checkComparisonOperator(filter: SearchArgument) = {
val operator = filter.getLeaves.asScala
assert(operator.map(_.getOperator).contains(filterOperator))
}
checkFilterPredicate(df, predicate, checkComparisonOperator)
}
protected def checkFilterPredicate
(predicate: Predicate, stringExpr: String)
(implicit df: DataFrame): Unit = {
def checkLogicalOperator(filter: SearchArgument) = {
assert(filter.toString == stringExpr)
}
checkFilterPredicate(df, predicate, checkLogicalOperator)
}
test("filter pushdown - integer") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - long") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toLong)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - float") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toFloat)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - double") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i.toDouble)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === 1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> 1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < 2, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > 3, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= 1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= 4, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(1) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(1) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(2) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(3) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(1) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(4) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - string") {
withOrcDataFrame((1 to 4).map(i => Tuple1(i.toString))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === "1", PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> "1", PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < "2", PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > "3", PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= "1", PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= "4", PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal("1") === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal("1") <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal("2") > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal("3") < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal("1") >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal("4") <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - boolean") {
withOrcDataFrame((true :: false :: Nil).map(b => Tuple1.apply(Option(b)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === true, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> true, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < true, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > false, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= false, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= false, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(false) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(false) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(false) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(true) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(true) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(true) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - decimal") {
withOrcDataFrame((1 to 4).map(i => Tuple1.apply(BigDecimal.valueOf(i)))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === BigDecimal.valueOf(1), PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> BigDecimal.valueOf(1), PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < BigDecimal.valueOf(2), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > BigDecimal.valueOf(3), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= BigDecimal.valueOf(1), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= BigDecimal.valueOf(4), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(
Literal(BigDecimal.valueOf(1)) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(1)) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(2)) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(
Literal(BigDecimal.valueOf(3)) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(1)) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(
Literal(BigDecimal.valueOf(4)) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - timestamp") {
val timeString = "2015-08-20 14:57:00"
val timestamps = (1 to 4).map { i =>
val milliseconds = Timestamp.valueOf(timeString).getTime + i * 3600
new Timestamp(milliseconds)
}
withOrcDataFrame(timestamps.map(Tuple1(_))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === timestamps(0), PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> timestamps(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < timestamps(1), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > timestamps(2), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= timestamps(0), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= timestamps(3), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(timestamps(0)) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(timestamps(0)) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(timestamps(1)) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(timestamps(2)) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(timestamps(0)) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(timestamps(3)) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("filter pushdown - combinations with logical operators") {
withOrcDataFrame((1 to 4).map(i => Tuple1(Option(i)))) { implicit df =>
checkFilterPredicate(
'_1.isNotNull,
"leaf-0 = (IS_NULL _1), expr = (not leaf-0)"
)
checkFilterPredicate(
'_1 =!= 1,
"leaf-0 = (IS_NULL _1), leaf-1 = (EQUALS _1 1), expr = (and (not leaf-0) (not leaf-1))"
)
checkFilterPredicate(
!('_1 < 4),
"leaf-0 = (IS_NULL _1), leaf-1 = (LESS_THAN _1 4), expr = (and (not leaf-0) (not leaf-1))"
)
checkFilterPredicate(
'_1 < 2 || '_1 > 3,
"leaf-0 = (LESS_THAN _1 2), leaf-1 = (LESS_THAN_EQUALS _1 3), " +
"expr = (or leaf-0 (not leaf-1))"
)
checkFilterPredicate(
'_1 < 2 && '_1 > 3,
"leaf-0 = (IS_NULL _1), leaf-1 = (LESS_THAN _1 2), leaf-2 = (LESS_THAN_EQUALS _1 3), " +
"expr = (and (not leaf-0) leaf-1 (not leaf-2))"
)
}
}
test("filter pushdown - date") {
val dates = Seq("2017-08-18", "2017-08-19", "2017-08-20", "2017-08-21").map { day =>
Date.valueOf(day)
}
withOrcDataFrame(dates.map(Tuple1(_))) { implicit df =>
checkFilterPredicate('_1.isNull, PredicateLeaf.Operator.IS_NULL)
checkFilterPredicate('_1 === dates(0), PredicateLeaf.Operator.EQUALS)
checkFilterPredicate('_1 <=> dates(0), PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate('_1 < dates(1), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate('_1 > dates(2), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 <= dates(0), PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate('_1 >= dates(3), PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(dates(0)) === '_1, PredicateLeaf.Operator.EQUALS)
checkFilterPredicate(Literal(dates(0)) <=> '_1, PredicateLeaf.Operator.NULL_SAFE_EQUALS)
checkFilterPredicate(Literal(dates(1)) > '_1, PredicateLeaf.Operator.LESS_THAN)
checkFilterPredicate(Literal(dates(2)) < '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(dates(0)) >= '_1, PredicateLeaf.Operator.LESS_THAN_EQUALS)
checkFilterPredicate(Literal(dates(3)) <= '_1, PredicateLeaf.Operator.LESS_THAN)
}
}
test("no filter pushdown - non-supported types") {
implicit class IntToBinary(int: Int) {
def b: Array[Byte] = int.toString.getBytes(StandardCharsets.UTF_8)
}
// ArrayType
withOrcDataFrame((1 to 4).map(i => Tuple1(Array(i)))) { implicit df =>
checkNoFilterPredicate('_1.isNull, noneSupported = true)
}
// BinaryType
withOrcDataFrame((1 to 4).map(i => Tuple1(i.b))) { implicit df =>
checkNoFilterPredicate('_1 <=> 1.b, noneSupported = true)
}
// MapType
withOrcDataFrame((1 to 4).map(i => Tuple1(Map(i -> i)))) { implicit df =>
checkNoFilterPredicate('_1.isNotNull, noneSupported = true)
}
}
test("SPARK-12218 and SPARK-25699 Converting conjunctions into ORC SearchArguments") {
import org.apache.spark.sql.sources._
// The `LessThan` should be converted while the `StringContains` shouldn't
val schema = new StructType(
Array(
StructField("a", IntegerType, nullable = true),
StructField("b", StringType, nullable = true)))
assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
LessThan("a", 10),
StringContains("b", "prefix")
)).get.toString
}
// The `LessThan` should be converted while the whole inner `And` shouldn't
assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
LessThan("a", 10),
Not(And(
GreaterThan("a", 1),
StringContains("b", "prefix")
))
)).get.toString
}
// Safely remove unsupported `StringContains` predicate and push down `LessThan`
assertResult("leaf-0 = (LESS_THAN a 10), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
And(
LessThan("a", 10),
StringContains("b", "prefix")
)
)).get.toString
}
// Safely remove unsupported `StringContains` predicate, push down `LessThan` and `GreaterThan`.
assertResult("leaf-0 = (LESS_THAN a 10), leaf-1 = (LESS_THAN_EQUALS a 1)," +
" expr = (and leaf-0 (not leaf-1))") {
OrcFilters.createFilter(schema, Array(
And(
And(
LessThan("a", 10),
StringContains("b", "prefix")
),
GreaterThan("a", 1)
)
)).get.toString
}
}
test("SPARK-27699 Converting disjunctions into ORC SearchArguments") {
import org.apache.spark.sql.sources._
// The `LessThan` should be converted while the `StringContains` shouldn't
val schema = new StructType(
Array(
StructField("a", IntegerType, nullable = true),
StructField("b", StringType, nullable = true)))
// The predicate `StringContains` predicate is not able to be pushed down.
assertResult("leaf-0 = (LESS_THAN_EQUALS a 10), leaf-1 = (LESS_THAN a 1)," +
" expr = (or (not leaf-0) leaf-1)") {
OrcFilters.createFilter(schema, Array(
Or(
GreaterThan("a", 10),
And(
StringContains("b", "prefix"),
LessThan("a", 1)
)
)
)).get.toString
}
assertResult("leaf-0 = (LESS_THAN_EQUALS a 10), leaf-1 = (LESS_THAN a 1)," +
" expr = (or (not leaf-0) leaf-1)") {
OrcFilters.createFilter(schema, Array(
Or(
And(
GreaterThan("a", 10),
StringContains("b", "foobar")
),
And(
StringContains("b", "prefix"),
LessThan("a", 1)
)
)
)).get.toString
}
assert(OrcFilters.createFilter(schema, Array(
Or(
StringContains("b", "foobar"),
And(
StringContains("b", "prefix"),
LessThan("a", 1)
)
)
)).isEmpty)
}
test("SPARK-27160: Fix casting of the DecimalType literal") {
import org.apache.spark.sql.sources._
val schema = StructType(Array(StructField("a", DecimalType(3, 2))))
assertResult("leaf-0 = (LESS_THAN a 3.14), expr = leaf-0") {
OrcFilters.createFilter(schema, Array(
LessThan(
"a",
new java.math.BigDecimal(3.14, MathContext.DECIMAL64).setScale(2)))
).get.toString
}
}
}
| aosagie/spark | sql/core/v1.2.1/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcFilterSuite.scala | Scala | apache-2.0 | 20,343 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.util
import java.io.File
import java.net.URI
//
// Transform requires absolute URL as input, the url resolver tries
// to construct an full URI from a system id.
//
object URLResolver {
def toAbsoluteSystemId(systemId : String) : String = {
toAbsoluteSystemId(systemId, (new File(System.getProperty("user.dir")).toURI().toString))
}
def toAbsoluteSystemId(systemId : String, base : String) : String = {
val inURI = new URI(systemId)
if (!inURI.isAbsolute()) {
(new URI(base)).resolve(systemId).toString
} else {
systemId
}
}
}
| wdschei/api-checker | util/src/main/scala/com/rackspace/com/papi/compenents/checker/util/URLResolver.scala | Scala | apache-2.0 | 1,246 |
package org.scalaide.core
package sbtbuilder
import org.junit.Test
import org.eclipse.core.runtime.NullProgressMonitor
import org.eclipse.core.resources.IncrementalProjectBuilder
import org.eclipse.jdt.core.IJavaModelMarker
import org.eclipse.jdt.core.JavaCore
import org.eclipse.jdt.core.IProblemRequestor
import org.eclipse.jdt.core.WorkingCopyOwner
import org.eclipse.core.runtime.Path
import org.eclipse.core.resources.IResource
import org.junit.Assert
import org.eclipse.core.resources.IMarker
import testsetup.SDTTestUtils
import org.eclipse.core.resources.IFile
import org.junit.Ignore
import org.junit.Before
import org.junit.After
object ScalaCompilerClasspathTest extends testsetup.TestProjectSetup("builder-compiler-classpath") {
val baseRawClasspath = project.javaProject.getRawClasspath()
}
class ScalaCompilerClasspathTest {
import ScalaCompilerClasspathTest._
@Before
def setupWorkspace(): Unit = {
SDTTestUtils.enableAutoBuild(false)
}
@Test def testWithoutCompilerOnClasspath(): Unit = {
println("building " + project)
project.javaProject.setRawClasspath(baseRawClasspath, new NullProgressMonitor)
project.clean(new NullProgressMonitor())
project.underlying.build(IncrementalProjectBuilder.CLEAN_BUILD, new NullProgressMonitor)
project.underlying.build(IncrementalProjectBuilder.FULL_BUILD, new NullProgressMonitor)
val unit = compilationUnit("test/CompilerDep.scala")
val errors = unit.getUnderlyingResource().findMarkers(IJavaModelMarker.JAVA_MODEL_PROBLEM_MARKER, true, IResource.DEPTH_INFINITE)
println("problem: %s: %s".format(unit.getResource(), errors.toList))
Assert.assertTrue("Single compiler error expected", errors.length == 1)
}
@Test def testWithCompilerOnClasspath(): Unit = {
println("building " + project)
project.clean(new NullProgressMonitor())
val p = new Path(project.underlying.getLocation().toOSString()).append("/lib/2.10.x/scala-compiler.jar")
Assert.assertTrue("scala compiler exists in the test framework", p.toFile().exists())
val newRawClasspath = baseRawClasspath :+ JavaCore.newLibraryEntry(p, null, null)
project.javaProject.setRawClasspath(newRawClasspath, new NullProgressMonitor)
project.underlying.build(IncrementalProjectBuilder.FULL_BUILD, new NullProgressMonitor)
val unit = compilationUnit("test/CompilerDep.scala")
// val errors = unit.getUnderlyingResource().findMarkers(IJavaModelMarker.JAVA_MODEL_PROBLEM_MARKER, true, IResource.DEPTH_INFINITE)
val errors = SDTTestUtils.getErrorMessages(unit)
println("problem: %s: %s".format(unit, errors))
Assert.assertTrue("Build errors found", errors.isEmpty)
}
}
| Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/sbtbuilder/ScalaCompilerClasspathTest.scala | Scala | bsd-3-clause | 2,680 |
package com.github.uchibori3.mfcloud.invoice.service
import akka.{ Done, NotUsed }
import akka.actor.ActorSystem
import akka.http.javadsl.model.headers.RawHeader
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import com.github.uchibori3.mfcloud.invoice.HttpClient
import com.github.uchibori3.mfcloud.invoice.request.CreateBillRequest
import com.github.uchibori3.mfcloud.invoice.response.BillResponse
import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport
import io.circe.generic.AutoDerivation
import io.circe.syntax._
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.ExecutionContextExecutor
trait Bills {
def post(createBillRequest: CreateBillRequest): Source[Either[Throwable, BillResponse], NotUsed]
def getPdf(id: String): Source[Either[Throwable, HttpResponse], NotUsed]
def delete(id: String): Source[Either[Throwable, Done], NotUsed]
}
class BillsImpl(
host: String,
client: HttpClient,
credential: OAuth2BearerToken,
maxConnections: Int
)(implicit system: ActorSystem)
extends Bills
with Service
with FailFastCirceSupport
with AutoDerivation
with LazyLogging {
implicit val executor: ExecutionContextExecutor = system.dispatcher
implicit val materializer: ActorMaterializer = ActorMaterializer()
override def post(createBillRequest: CreateBillRequest): Source[Either[Throwable, BillResponse], NotUsed] = {
val entity = HttpEntity(`application/json`, createBillRequest.asJson.noSpaces)
val request = HttpRequest(HttpMethods.POST, "/api/v1/billings", entity = entity)
.addCredentials(credential)
.addHeader(RawHeader.create("Accept", "*/*"))
Source
.single(request)
.via(client.connectionHttps(host))
.map { res =>
logger.debug(s"Http request: $request")
logger.debug(s"Http response: $res")
res
}
.mapAsync(maxConnections)(handleError)
.mapAsync(maxConnections)(Unmarshal(_).to[BillResponse])
.map(Right.apply)
.recover {
case ex =>
logger.error("Failed", ex)
Left(ex)
}
}
override def getPdf(id: String): Source[Either[Throwable, HttpResponse], NotUsed] = {
val request = HttpRequest(HttpMethods.GET, s"/api/v1/billings/$id.pdf")
.addCredentials(credential)
.addHeader(RawHeader.create("Accept", "*/*"))
Source
.single(request)
.via(client.connectionHttps(host))
.map { res =>
logger.debug(s"Http request: $request")
logger.debug(s"Http response: $res")
res
}
.mapAsync(maxConnections)(handleError)
.map(Right.apply)
.recover {
case ex =>
logger.error("Failed", ex)
Left(ex)
}
}
override def delete(id: String): Source[Either[Throwable, Done], NotUsed] = {
val request = HttpRequest(HttpMethods.DELETE, s"/api/v1/billings/$id")
.addCredentials(credential)
.addHeader(RawHeader.create("Accept", "*/*"))
Source
.single(request)
.via(client.connectionHttps(host))
.map { res =>
logger.debug(s"Http request: $request")
logger.debug(s"Http response: $res")
res
}
.mapAsync(maxConnections)(handleError)
.map(_ => Right(Done))
.recover {
case ex =>
logger.error("Failed", ex)
Left(ex)
}
}
}
| Uchibori3/mfcloud-invoice-scala | src/main/scala/com/github/uchibori3/mfcloud/invoice/service/Bills.scala | Scala | apache-2.0 | 3,590 |
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package services
import models.Person
import no.uio.musit.security.AuthenticatedUser
import scala.concurrent.Future
class UserService {
/**
* Gets an actor representing the current user. This is now a silly service!
*
* @param user the current AuthenticatedUser.
* @return a Future Person representation of the current user.
*/
def currentUserAsActor(user: AuthenticatedUser): Future[Person] = {
Future.successful(Person.fromAuthUser(user))
}
}
| kpmeen/musit | service_actor/app/services/UserService.scala | Scala | gpl-2.0 | 1,338 |
package viewModel
import com.thetestpeople.trt.model._
import com.github.nscala_time.time.Imports._
import org.joda.time.LocalDate
import com.thetestpeople.trt.utils.DateUtils
case class WeatherInfo(weather: Double) {
def iconPath: String = WeatherIcons.weatherIcon(weather)
def passRate: String = "Pass rate: " + (weather * 100).toInt + "%"
}
case class TestView(
enrichedTest: EnrichedTest,
categories: Seq[String] = Seq(),
isIgnoredInConfiguration: Boolean = false) extends HasTestName {
private val test = enrichedTest.test
def testName = test.qualifiedName
def id = test.id
def deleted = enrichedTest.test.deleted
def ballIconOpt: Option[String] =
if (isIgnoredInConfiguration)
Some(BallIcons.GreyBall)
else
enrichedTest.statusOpt.map(BallIcons.icon)
def statusOpt = enrichedTest.statusOpt
def lastSummaryOpt: Option[AbbreviableText] = enrichedTest.analysisOpt.flatMap(_.lastSummaryOpt).map(AbbreviableText)
def weatherInfoOpt: Option[WeatherInfo] = enrichedTest.analysisOpt.map(_.weather).map(WeatherInfo)
def consecutiveFailuresOpt: Option[Int] =
for {
analysis ← enrichedTest.analysisOpt
consecutiveFailures = analysis.consecutiveFailures
if consecutiveFailures > 0
} yield consecutiveFailures
def lastExecutionOpt: Option[(Id[Execution], TimeDescription)] =
for (id ← lastExecutionIdOpt; time ← lastExecutedTimeOpt) yield (id, time)
def lastExecutionIdOpt: Option[Id[Execution]] =
enrichedTest.analysisOpt.map(_.lastExecutionId)
def lastExecutedTimeOpt: Option[TimeDescription] =
enrichedTest.analysisOpt.map(_.lastExecutionTime).map(TimeDescription)
def lastPassedExecutionOpt: Option[(Id[Execution], TimeDescription)] =
for (id ← lastPassedExecutionIdOpt; time ← lastPassedTimeOpt) yield (id, time)
def lastPassedExecutionIdOpt: Option[Id[Execution]] =
enrichedTest.analysisOpt.flatMap(_.lastPassedExecutionIdOpt)
def lastPassedTimeOpt: Option[TimeDescription] =
enrichedTest.analysisOpt.flatMap(_.lastPassedTimeOpt).map(TimeDescription)
def lastFailedExecutionOpt: Option[(Id[Execution], TimeDescription)] =
for (id ← lastFailedExecutionIdOpt; time ← lastFailedTimeOpt) yield (id, time)
def lastFailedExecutionIdOpt: Option[Id[Execution]] =
enrichedTest.analysisOpt.flatMap(_.lastFailedExecutionIdOpt)
def lastFailedTimeOpt: Option[TimeDescription] =
enrichedTest.analysisOpt.flatMap(_.lastFailedTimeOpt).map(TimeDescription)
def failingSinceOpt: Option[TimeDescription] =
for {
analysis ← enrichedTest.analysisOpt
failingSince ← analysis.failingSinceOpt
} yield TimeDescription(failingSince)
def commentOpt: Option[String] = enrichedTest.commentOpt
def medianDurationOpt: Option[String] =
enrichedTest.analysisOpt.flatMap(_.medianDurationOpt).map(DateUtils.describeDuration)
} | thetestpeople/trt | app/viewModel/TestView.scala | Scala | mit | 2,900 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.{util => ju}
import java.text.SimpleDateFormat
import java.util.Date
import org.scalatest.BeforeAndAfter
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.streaming.OutputMode._
class EventTimeWatermarkSuite extends StreamTest with BeforeAndAfter with Logging {
import testImplicits._
after {
sqlContext.streams.active.foreach(_.stop())
}
test("error on bad column") {
val inputData = MemoryStream[Int].toDF()
val e = intercept[AnalysisException] {
inputData.withWatermark("badColumn", "1 minute")
}
assert(e.getMessage contains "badColumn")
}
test("error on wrong type") {
val inputData = MemoryStream[Int].toDF()
val e = intercept[AnalysisException] {
inputData.withWatermark("value", "1 minute")
}
assert(e.getMessage contains "value")
assert(e.getMessage contains "int")
}
test("event time and watermark metrics") {
// No event time metrics when there is no watermarking
val inputData1 = MemoryStream[Int]
val aggWithoutWatermark = inputData1.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(aggWithoutWatermark, outputMode = Complete)(
AddData(inputData1, 15),
CheckAnswer((15, 1)),
assertEventStats { e => assert(e.isEmpty) },
AddData(inputData1, 10, 12, 14),
CheckAnswer((10, 3), (15, 1)),
assertEventStats { e => assert(e.isEmpty) }
)
// All event time metrics where watermarking is set
val inputData2 = MemoryStream[Int]
val aggWithWatermark = inputData2.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(aggWithWatermark)(
AddData(inputData2, 15),
CheckAnswer(),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(15))
assert(e.get("min") === formatTimestamp(15))
assert(e.get("avg") === formatTimestamp(15))
assert(e.get("watermark") === formatTimestamp(0))
},
AddData(inputData2, 10, 12, 14),
CheckAnswer(),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(14))
assert(e.get("min") === formatTimestamp(10))
assert(e.get("avg") === formatTimestamp(12))
assert(e.get("watermark") === formatTimestamp(5))
},
AddData(inputData2, 25),
CheckAnswer(),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(25))
assert(e.get("min") === formatTimestamp(25))
assert(e.get("avg") === formatTimestamp(25))
assert(e.get("watermark") === formatTimestamp(5))
},
AddData(inputData2, 25),
CheckAnswer((10, 3)),
assertEventStats { e =>
assert(e.get("max") === formatTimestamp(25))
assert(e.get("min") === formatTimestamp(25))
assert(e.get("avg") === formatTimestamp(25))
assert(e.get("watermark") === formatTimestamp(15))
}
)
}
test("append mode") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckLastBatch(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckLastBatch(),
assertNumStateRows(3),
AddData(inputData, 25), // Emit items less than watermark and drop their state
CheckLastBatch((10, 5)),
assertNumStateRows(2),
AddData(inputData, 10), // Should not emit anything as data less than watermark
CheckLastBatch(),
assertNumStateRows(2)
)
}
test("update mode") {
val inputData = MemoryStream[Int]
spark.conf.set("spark.sql.shuffle.partitions", "10")
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation, OutputMode.Update)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckLastBatch((10, 5), (15, 1)),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckLastBatch((25, 1)),
assertNumStateRows(3),
AddData(inputData, 10, 25), // Ignore 10 as its less than watermark
CheckLastBatch((25, 2)),
assertNumStateRows(2),
AddData(inputData, 10), // Should not emit anything as data less than watermark
CheckLastBatch(),
assertNumStateRows(2)
)
}
test("delay in months and years handled correctly") {
val currentTimeMs = System.currentTimeMillis
val currentTime = new Date(currentTimeMs)
val input = MemoryStream[Long]
val aggWithWatermark = input.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "2 years 5 months")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
def monthsSinceEpoch(date: Date): Int = { date.getYear * 12 + date.getMonth }
testStream(aggWithWatermark)(
AddData(input, currentTimeMs / 1000),
CheckAnswer(),
AddData(input, currentTimeMs / 1000),
CheckAnswer(),
assertEventStats { e =>
assert(timestampFormat.parse(e.get("max")).getTime === (currentTimeMs / 1000) * 1000)
val watermarkTime = timestampFormat.parse(e.get("watermark"))
val monthDiff = monthsSinceEpoch(currentTime) - monthsSinceEpoch(watermarkTime)
// monthsSinceEpoch is like `math.floor(num)`, so monthDiff has two possible values.
assert(monthDiff === 29 || monthDiff === 30,
s"currentTime: $currentTime, watermarkTime: $watermarkTime")
}
)
}
test("recovery") {
val inputData = MemoryStream[Int]
val df = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(df)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckLastBatch(),
AddData(inputData, 25), // Advance watermark to 15 seconds
StopStream,
StartStream(),
CheckLastBatch(),
AddData(inputData, 25), // Evict items less than previous watermark.
CheckLastBatch((10, 5)),
StopStream,
AssertOnQuery { q => // purge commit and clear the sink
val commit = q.batchCommitLog.getLatest().map(_._1).getOrElse(-1L) + 1L
q.batchCommitLog.purge(commit)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(),
CheckLastBatch((10, 5)), // Recompute last batch and re-evict timestamp 10
AddData(inputData, 30), // Advance watermark to 20 seconds
CheckLastBatch(),
StopStream,
StartStream(), // Watermark should still be 15 seconds
AddData(inputData, 17),
CheckLastBatch(), // We still do not see next batch
AddData(inputData, 30), // Advance watermark to 20 seconds
CheckLastBatch(),
AddData(inputData, 30), // Evict items less than previous watermark.
CheckLastBatch((15, 2)) // Ensure we see next window
)
}
test("dropping old data") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10, 11, 12),
CheckAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckAnswer(),
AddData(inputData, 25), // Evict items less than previous watermark.
CheckAnswer((10, 3)),
AddData(inputData, 10), // 10 is later than 15 second watermark
CheckAnswer((10, 3)),
AddData(inputData, 25),
CheckAnswer((10, 3)) // Should not emit an incorrect partial result.
)
}
test("complete mode") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
// No eviction when asked to compute complete results.
testStream(windowedAggregation, OutputMode.Complete)(
AddData(inputData, 10, 11, 12),
CheckAnswer((10, 3)),
AddData(inputData, 25),
CheckAnswer((10, 3), (25, 1)),
AddData(inputData, 25),
CheckAnswer((10, 3), (25, 2)),
AddData(inputData, 10),
CheckAnswer((10, 4), (25, 2)),
AddData(inputData, 25),
CheckAnswer((10, 4), (25, 3))
)
}
test("group by on raw timestamp") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy($"eventTime")
.agg(count("*") as 'count)
.select($"eventTime".cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10),
CheckAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckAnswer(),
AddData(inputData, 25), // Evict items less than previous watermark.
CheckAnswer((10, 1))
)
}
test("delay threshold should not be negative.") {
val inputData = MemoryStream[Int].toDF()
var e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "-1 year")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "1 year -13 months")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "1 month -40 days")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "-10 seconds")
}
assert(e.getMessage contains "should not be negative.")
}
test("the new watermark should override the old one") {
val df = MemoryStream[(Long, Long)].toDF()
.withColumn("first", $"_1".cast("timestamp"))
.withColumn("second", $"_2".cast("timestamp"))
.withWatermark("first", "1 minute")
.withWatermark("second", "2 minutes")
val eventTimeColumns = df.logicalPlan.output
.filter(_.metadata.contains(EventTimeWatermark.delayKey))
assert(eventTimeColumns.size === 1)
assert(eventTimeColumns(0).name === "second")
}
test("EventTime watermark should be ignored in batch query.") {
val df = testData
.withColumn("eventTime", $"key".cast("timestamp"))
.withWatermark("eventTime", "1 minute")
.select("eventTime")
.as[Long]
checkDataset[Long](df, 1L to 100L: _*)
}
private def assertNumStateRows(numTotalRows: Long): AssertOnQuery = AssertOnQuery { q =>
val progressWithData = q.recentProgress.filter(_.numInputRows > 0).lastOption.get
assert(progressWithData.stateOperators(0).numRowsTotal === numTotalRows)
true
}
private def assertEventStats(body: ju.Map[String, String] => Unit): AssertOnQuery = {
AssertOnQuery { q =>
body(q.recentProgress.filter(_.numInputRows > 0).lastOption.get.eventTime)
true
}
}
private val timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") // ISO8601
timestampFormat.setTimeZone(ju.TimeZone.getTimeZone("UTC"))
private def formatTimestamp(sec: Long): String = {
timestampFormat.format(new ju.Date(sec * 1000))
}
}
| aokolnychyi/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/EventTimeWatermarkSuite.scala | Scala | apache-2.0 | 13,973 |
package com.example
import scala.util.Random
import akka.actor.Actor
import spray.routing._
import spray.http._
import spray.http.DateTime
import MediaTypes._
import spray.json._
import DefaultJsonProtocol._
import spray.httpx.SprayJsonSupport._
import com.example.model.Ticket
import java.util.UUID
import scala.collection.mutable
class MyServiceActor extends Actor with MyService {
def actorRefFactory = context
def receive = runRoute(routes)
}
trait MyService extends HttpService {
val tickets = mutable.Map[UUID, Ticket]()
var queue = mutable.Queue[Ticket]()
implicit val ticketFormat = jsonFormat4(Ticket)
val routes =
path("") {
get {
respondWithMediaType(`text/html`) {
complete {
"Running"
}
}
}
} ~
path("tickets") {
post {
respondWithMediaType(`application/json`) {
ctx =>
ctx.complete {
val uuid = UUID.randomUUID
val ticket = Ticket(uuid.toString, queue.size + 1, Random.nextInt(10).abs * queue.size, DateTime.now.toIsoDateTimeString)
tickets += (uuid -> ticket)
queue += ticket
ticket
}
}
}
} ~
path("tickets" / JavaUUID) {
uuid =>
get {
ctx =>
if (!tickets.contains(uuid)) {
ctx.complete(StatusCodes.NotFound)
} else {
ctx.complete(tickets(uuid))
}
} ~
delete {
ctx =>
if (tickets.contains(uuid)) {
val ticketToBeRemoved: Option[Ticket] = tickets.remove(uuid)
remove(ticketToBeRemoved.get, queue)
}
ctx.complete(StatusCodes.NoContent)
}
}
// ~
// path("queue") {
// get {
// _.complete {
// queue.size.toJson
// }
// }
// }
def remove(num: Ticket, list: mutable.Queue[Ticket]) = list diff List(num)
} | enigma11/spray-can-server-heroku-sample | src/main/scala/com/example/MyService.scala | Scala | unlicense | 2,096 |
/*
* Copyright 2013-2016 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package laika.parse.rst.ext
import laika.tree.Elements._
import laika.parse.rst.Directives._
import laika.parse.rst.Directives.Parts._
import java.text.SimpleDateFormat
import java.util.Date
/** Defines all supported standard span directives of the reStructuredText reference parser.
* A span directive can be used in substitution definitions.
*
* The `replace` directive is fully supported. The other directives have the following
* adjustments or limitations compared to their counterparts in the reference parser:
*
* - `unicode`: does not support the various trim options, as that would require modifying adjacent elements
* (and no other directive has this requirement, therefore API/impl changes did not seem justified)
*
* - `date`: Uses the patterns of `java.text.SimpleDateFormat` instead of Python's `time.strftime` function.
*
* - `image`: Does not support the various layout options (`width`, `height`, `scale`, `align`), as no other
* tree nodes in Laika carry concrete layout information. It is recommended to use styles instead.
*
* @author Jens Halm
*/
trait StandardSpanDirectives {
/** All custom parsers needed by the directive implementations.
*/
val parse: StandardDirectiveParsers = new StandardDirectiveParsers {}
/** The name option which is supported by almost all reStructuredText directives.
*/
protected val nameOpt: DirectivePart[Option[String]] = optField("name")
/** The class option which is supported by almost all reStructuredText directives.
*/
protected val classOpt: DirectivePart[Option[String]] = optField("class")
/** The standard class and name options supported by most directives,
* combined in the result into an Options instance.
*/
protected val stdOpt: DirectivePart[Options] = (nameOpt ~ classOpt) { (id, styles) => toOptions(id, styles) }
/** Converts an optional id and an optional style parameter containing
* a space-delimited list of styles to an `Options` instance.
*/
protected def toOptions (id: Option[String], styles: Option[String]): Options =
Options(id, styles.map(_.split(" ").toSet).getOrElse(Set()))
/** The image directive for span elements,
* see [[http://docutils.sourceforge.net/docs/ref/rst/directives.html#image]] for details.
*/
lazy val image: DirectivePart[Span] = {
def multilineURI (text: String) = Right(text.split("\n").map(_.trim).mkString("\n").trim)
(argument(multilineURI, withWS = true) ~ optField("alt") ~ optField("target", parse.target) ~ stdOpt) { (uri, alt, target, opt) =>
val image = Image(alt.getOrElse(""), URI(uri), None, opt)
(target map {
case ref: ExternalLink => ref.copy(content = List(image))
case ref: LinkReference => ref.copy(content = List(image))
}).getOrElse(image)
}
}
/** The replace directive,
* see [[http://docutils.sourceforge.net/docs/ref/rst/directives.html#replacement-text]] for details.
*/
lazy val replace: DirectivePart[Span] = spanContent map (SpanSequence(_))
/** The unicode directive,
* see [[http://docutils.sourceforge.net/docs/ref/rst/directives.html#unicode-character-codes]] for details.
*/
lazy val unicode: DirectivePart[Span] = argument(parse.unicode, withWS = true) map (Text(_))
/** The date directive,
* see [[http://docutils.sourceforge.net/docs/ref/rst/directives.html#date]] for details.
*/
lazy val date: DirectivePart[Span] = {
optArgument(withWS = true) map { pattern =>
Text(new SimpleDateFormat(pattern.getOrElse("yyyy-MM-dd")).format(new Date))
}
}
/** All standard reStrucuturedText span directives,
* to be used in substitution references.
*/
lazy val spanDirectives: List[Directive[Span]] = List(
SpanDirective("image")(image),
SpanDirective("replace")(replace),
SpanDirective("unicode")(unicode),
SpanDirective("date")(date)
)
}
| amuramatsu/Laika | core/src/main/scala/laika/parse/rst/ext/StandardSpanDirectives.scala | Scala | apache-2.0 | 4,573 |
package rocks.muki
import rocks.muki.graphql.GraphQLPlugin.autoImport.graphqlSchemas
import rocks.muki.graphql.schema.{GraphQLSchema, GraphQLSchemas}
import sbt._
import sbt.complete.DefaultParsers._
import sbt.complete.{FixedSetExamples, Parser}
package object graphql {
/**
* Throw an exception without a stacktrace.
*
* @param msg the error message
* @return nothing - throws an exception
*/
def quietError(msg: String): Nothing = {
val exc = new RuntimeException(msg)
exc.setStackTrace(Array.empty)
throw exc
}
/**
* @return a parser that parses exactly one schema l
*/
val singleGraphQLSchemaParser: Def.Initialize[Parser[GraphQLSchema]] =
Def.setting {
val gqlSchema = graphqlSchemas.value
val labels = gqlSchema.schemas.map(_.label)
// create a dependent parser. A label can only be selected once
schemaLabelParser(labels).flatMap(label => schemaOrError(label, gqlSchema))
}
/**
* Parses two schema labels
*/
val tupleGraphQLSchemaParser: Def.Initialize[Parser[(GraphQLSchema, GraphQLSchema)]] =
Def.setting {
val gqlSchemas = graphqlSchemas.value
val labels = gqlSchemas.schemas.map(_.label)
// create a depended parser. A label can only be selected once
schemaLabelParser(labels).flatMap {
case selectedLabel if labels.contains(selectedLabel) =>
schemaOrError(selectedLabel, gqlSchemas) ~ schemaLabelParser(labels.filterNot(_ == selectedLabel))
.flatMap(label => schemaOrError(label, gqlSchemas))
case selectedLabel =>
failure(s"$selectedLabel is not available. Use: [${labels.mkString(" | ")}]")
}
}
/**
* @param labels list of available schemas by label
* @return a parser for the given labels
*/
private[this] def schemaLabelParser(labels: Iterable[String]): Parser[String] = {
val schemaParser = StringBasic.examples(FixedSetExamples(labels))
token(Space.? ~> schemaParser)
}
private def schemaOrError(label: String, graphQLSchema: GraphQLSchemas): Parser[GraphQLSchema] =
graphQLSchema.schemaByLabel
.get(label)
.map(success(_))
.getOrElse(failure(s"The schema '$label' is not defined in graphqlSchemas"))
}
| muuki88/sbt-graphql | src/main/scala/rocks/muki/graphql/package.scala | Scala | apache-2.0 | 2,259 |
package metal
package generic
import scala.reflect.ClassTag
import spire.syntax.cfor._
/* We do not extract a common `Seq` base type, because [[Buffer]] would be its only subtype.
* Let's make a `Seq` base trait when we have more input on its design.
*/
trait Buffer[@specialized V] extends Collection with NElements1[V] with Enumerable with Values[V] { self =>
implicit def ctV: ClassTag[V]
implicit def V: MetalTag[V]
type Mutable = mutable.Buffer[V]
type Immutable = immutable.Buffer[V]
type Scala = scala.collection.immutable.IndexedSeq[V]
private[metal] def array: Array[V]
/**
* Return the value at element i.
*
* If the index exceeds the length, the result is undefined; an exception could be
* thrown, but this is not guaranteed.
*/
def apply(idx: Int): V
def length: Int
final def longSize = length
final def isEmpty = length == 0
final def nonEmpty = length > 0
def stringPrefix = "Buffer"
final def ptr: Ptr[self.type] = if (length == 0) Ptr.Null(self) else VPtr(self, 0)
final def ptrNext(ptr: VPtr[self.type]): Ptr[self.type] =
if (ptr.raw == length - 1) Ptr.Null(self) else VPtr(self, ptr.raw + 1)
final def ptrElement1[@specialized E](ptr: VPtr[self.type]): E = this.asInstanceOf[Buffer[E]].apply(ptr.raw.toInt)
final def ptrValue[@specialized W](ptr: VPtr[self.type]): W = this.asInstanceOf[Buffer[W]].apply(ptr.raw.toInt)
final def toArray: Array[V] = {
val res = ctV.newArray(length.toInt)
Array.copy(array, 0, res, 0, length)
res
}
def mutableCopy = new metal.mutable.Buffer(array.clone, length)
override def equals(that: Any): Boolean = that match {
case s: Buffer[_] =>
if (V != s.V) return false
ctV match {
case ClassTag.Long =>
Buffer.specEquals[Long](self.asInstanceOf[Buffer[Long]], that.asInstanceOf[Buffer[Long]])
case ClassTag.Int =>
Buffer.specEquals[Int](self.asInstanceOf[Buffer[Int]], that.asInstanceOf[Buffer[Int]])
case ClassTag.Short =>
Buffer.specEquals[Short](self.asInstanceOf[Buffer[Short]], that.asInstanceOf[Buffer[Short]])
case ClassTag.Byte =>
Buffer.specEquals[Byte](self.asInstanceOf[Buffer[Byte]], that.asInstanceOf[Buffer[Byte]])
case ClassTag.Char =>
Buffer.specEquals[Char](self.asInstanceOf[Buffer[Char]], that.asInstanceOf[Buffer[Char]])
case ClassTag.Boolean =>
Buffer.specEquals[Boolean](self.asInstanceOf[Buffer[Boolean]], that.asInstanceOf[Buffer[Boolean]])
case ClassTag.Double =>
Buffer.specEquals[Double](self.asInstanceOf[Buffer[Double]], that.asInstanceOf[Buffer[Double]])
case ClassTag.Float =>
Buffer.specEquals[Float](self.asInstanceOf[Buffer[Float]], that.asInstanceOf[Buffer[Float]])
case ClassTag.Unit =>
Buffer.specEquals[Unit](self.asInstanceOf[Buffer[Unit]], that.asInstanceOf[Buffer[Unit]])
case _ =>
Buffer.specEquals[V](self.asInstanceOf[Buffer[V]], that.asInstanceOf[Buffer[V]])
}
case _ => false
}
override def hashCode: Int = ctV match {
case ClassTag.Long => Buffer.specHashCode[Long](self.asInstanceOf[Buffer[Long]])
case ClassTag.Int => Buffer.specHashCode[Int](self.asInstanceOf[Buffer[Int]])
case ClassTag.Short => Buffer.specHashCode[Short](self.asInstanceOf[Buffer[Short]])
case ClassTag.Byte => Buffer.specHashCode[Byte](self.asInstanceOf[Buffer[Byte]])
case ClassTag.Char => Buffer.specHashCode[Char](self.asInstanceOf[Buffer[Char]])
case ClassTag.Boolean => Buffer.specHashCode[Boolean](self.asInstanceOf[Buffer[Boolean]])
case ClassTag.Double => Buffer.specHashCode[Double](self.asInstanceOf[Buffer[Double]])
case ClassTag.Float => Buffer.specHashCode[Float](self.asInstanceOf[Buffer[Float]])
case ClassTag.Unit => Buffer.specHashCode[Unit](self.asInstanceOf[Buffer[Unit]])
case _ => Buffer.specHashCode[V](self)
}
override def toString: String = {
val sb = new StringBuilder
sb.append(stringPrefix)
sb.append("(")
var prefix = ""
val len = self.length
cforRange(0 until len) { i =>
sb.append(prefix)
sb.append(apply(i).toString)
prefix = ", "
}
sb.append(")")
sb.toString
}
}
object Buffer {
def specEquals[@specialized V](lhs: Buffer[V], rhs: Buffer[V]): Boolean = {
val len = lhs.length
if (len != rhs.length) return false
cforRange(0 until len) { i =>
if (lhs.apply(i) != rhs.apply(i)) return false
}
true
}
def specHashCode[@specialized V](lhs: Buffer[V]): Int = {
import lhs.V
import scala.util.hashing.MurmurHash3._
val len = lhs.length
var h = arraySeed
cforRange(0 until len) { i =>
h = mix(h, V.hash(lhs.apply(i)))
}
finalizeHash(h, len)
}
}
| denisrosset/ptrcoll | library/src/main/scala/metal/generic/Buffer.scala | Scala | mit | 4,824 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.frontend.logicalplan
import slamdata.Predef._
import quasar.common.data.Data
import quasar.contrib.matryoshka.implicits._
import quasar.std.StdLib._
import matryoshka._
sealed abstract class JoinDir(val name: String) {
import structural.MapProject
val data: Data = Data.Str(name)
def const[T](implicit T: Corecursive.Aux[T, LogicalPlan]): T =
constant[T](data).embed
def projectFrom[T](lp: T)(implicit T: Corecursive.Aux[T, LogicalPlan]): T =
MapProject(lp, const).embed
}
object JoinDir {
final case object Left extends JoinDir("left")
final case object Right extends JoinDir("right")
}
| quasar-analytics/quasar | frontend/src/main/scala/quasar/frontend/logicalplan/JoinDir.scala | Scala | apache-2.0 | 1,226 |
package beam.sim
import java.util.concurrent.TimeUnit
import akka.actor.Status.Success
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Cancellable, DeadLetter, Props, Terminated}
import akka.pattern.ask
import akka.util.Timeout
import beam.agentsim.agents.BeamAgent.Finish
import beam.agentsim.agents.ridehail.RideHailManager.{BufferedRideHailRequestsTrigger, RideHailRepositioningTrigger}
import beam.agentsim.agents.ridehail.{RideHailIterationHistory, RideHailManager, RideHailSurgePricingManager}
import beam.agentsim.agents.vehicles.BeamVehicleType
import beam.agentsim.agents.{BeamAgent, InitializeTrigger, Population, TransitSystem}
import beam.agentsim.infrastructure.ZonalParkingManager
import beam.agentsim.scheduler.BeamAgentScheduler
import beam.agentsim.scheduler.BeamAgentScheduler.{CompletionNotice, ScheduleTrigger, StartSchedule}
import beam.router._
import beam.router.osm.TollCalculator
import beam.sim.common.GeoUtils
import beam.sim.config.BeamConfig.Beam
import beam.sim.metrics.{Metrics, MetricsSupport}
import beam.sim.monitoring.ErrorListener
import beam.sim.vehiclesharing.Fleets
import beam.utils._
import beam.utils.matsim_conversion.ShapeUtils.QuadTreeBounds
import com.conveyal.r5.transit.TransportNetwork
import com.google.inject.Inject
import com.typesafe.scalalogging.LazyLogging
import org.matsim.api.core.v01.population.{Activity, Person, Population => MATSimPopulation}
import org.matsim.api.core.v01.{Id, Scenario}
import org.matsim.core.api.experimental.events.EventsManager
import org.matsim.core.mobsim.framework.Mobsim
import org.matsim.core.utils.misc.Time
import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._
class BeamMobsim @Inject()(
val beamServices: BeamServices,
val beamScenario: BeamScenario,
val transportNetwork: TransportNetwork,
val tollCalculator: TollCalculator,
val scenario: Scenario,
val eventsManager: EventsManager,
val actorSystem: ActorSystem,
val rideHailSurgePricingManager: RideHailSurgePricingManager,
val rideHailIterationHistory: RideHailIterationHistory,
val routeHistory: RouteHistory,
val beamSkimmer: BeamSkimmer,
val travelTimeObserved: TravelTimeObserved,
val geo: GeoUtils,
val networkHelper: NetworkHelper
) extends Mobsim
with LazyLogging
with MetricsSupport {
private implicit val timeout: Timeout = Timeout(50000, TimeUnit.SECONDS)
override def run(): Unit = {
logger.info("Starting Iteration")
startMeasuringIteration(beamServices.matsimServices.getIterationNumber)
logger.info("Preparing new Iteration (Start)")
startSegment("iteration-preparation", "mobsim")
validateVehicleTypes()
if (beamServices.beamConfig.beam.debug.debugEnabled)
logger.info(DebugLib.gcAndGetMemoryLogMessage("run.start (after GC): "))
Metrics.iterationNumber = beamServices.matsimServices.getIterationNumber
eventsManager.initProcessing()
val iteration = actorSystem.actorOf(
Props(
new BeamMobsimIteration(
beamServices,
rideHailSurgePricingManager,
rideHailIterationHistory,
routeHistory,
beamSkimmer,
travelTimeObserved
)
),
"BeamMobsim.iteration"
)
Await.result(iteration ? "Run!", timeout.duration)
logger.info("Agentsim finished.")
eventsManager.finishProcessing()
logger.info("Events drained.")
endSegment("agentsim-events", "agentsim")
logger.info("Processing Agentsim Events (End)")
}
def validateVehicleTypes(): Unit = {
if (!beamScenario.vehicleTypes.contains(
Id.create(beamScenario.beamConfig.beam.agentsim.agents.bodyType, classOf[BeamVehicleType])
)) {
throw new RuntimeException(
"Vehicle type for human body: " + beamScenario.beamConfig.beam.agentsim.agents.bodyType + " is missing. Please add it to the vehicle types."
)
}
if (!beamScenario.vehicleTypes.contains(
Id.create(
beamScenario.beamConfig.beam.agentsim.agents.rideHail.initialization.procedural.vehicleTypeId,
classOf[BeamVehicleType]
)
)) {
throw new RuntimeException(
"Vehicle type for ride-hail: " + beamScenario.beamConfig.beam.agentsim.agents.rideHail.initialization.procedural.vehicleTypeId + " is missing. Please add it to the vehicle types."
)
}
}
}
class BeamMobsimIteration(
val beamServices: BeamServices,
val rideHailSurgePricingManager: RideHailSurgePricingManager,
val rideHailIterationHistory: RideHailIterationHistory,
val routeHistory: RouteHistory,
val beamSkimmer: BeamSkimmer,
val travelTimeObserved: TravelTimeObserved
) extends Actor
with ActorLogging
with MetricsSupport {
import beamServices._
private val config: Beam.Agentsim = beamConfig.beam.agentsim
var runSender: ActorRef = _
private val errorListener = context.actorOf(ErrorListener.props())
context.watch(errorListener)
context.system.eventStream.subscribe(errorListener, classOf[BeamAgent.TerminatedPrematurelyEvent])
private val scheduler = context.actorOf(
Props(
classOf[BeamAgentScheduler],
beamConfig,
Time.parseTime(beamConfig.matsim.modules.qsim.endTime).toInt,
config.schedulerParallelismWindow,
new StuckFinder(beamConfig.beam.debug.stuckAgentDetection)
).withDispatcher("beam-agent-scheduler-pinned-dispatcher"),
"scheduler"
)
context.system.eventStream.subscribe(errorListener, classOf[DeadLetter])
context.watch(scheduler)
private val envelopeInUTM = geo.wgs2Utm(beamScenario.transportNetwork.streetLayer.envelope)
envelopeInUTM.expandBy(beamConfig.beam.spatial.boundingBoxBuffer)
val activityQuadTreeBounds: QuadTreeBounds = buildActivityQuadTreeBounds(matsimServices.getScenario.getPopulation)
log.info(s"envelopeInUTM before expansion: $envelopeInUTM")
envelopeInUTM.expandToInclude(activityQuadTreeBounds.minx, activityQuadTreeBounds.miny)
envelopeInUTM.expandToInclude(activityQuadTreeBounds.maxx, activityQuadTreeBounds.maxy)
log.info(s"envelopeInUTM after expansion: $envelopeInUTM")
private val parkingManager = context.actorOf(
ZonalParkingManager
.props(beamScenario.beamConfig, beamScenario.tazTreeMap, geo, beamRouter, envelopeInUTM)
.withDispatcher("zonal-parking-manager-pinned-dispatcher"),
"ParkingManager"
)
context.watch(parkingManager)
private val rideHailManager = context.actorOf(
Props(
new RideHailManager(
Id.create("GlobalRHM", classOf[RideHailManager]),
beamServices,
beamScenario,
beamScenario.transportNetwork,
tollCalculator,
matsimServices.getScenario,
matsimServices.getEvents,
scheduler,
beamRouter,
parkingManager,
envelopeInUTM,
activityQuadTreeBounds,
rideHailSurgePricingManager,
rideHailIterationHistory.oscillationAdjustedTNCIterationStats,
beamSkimmer,
routeHistory
)
).withDispatcher("ride-hail-manager-pinned-dispatcher"),
"RideHailManager"
)
context.watch(rideHailManager)
scheduler ! ScheduleTrigger(InitializeTrigger(0), rideHailManager)
var memoryLoggingTimerActorRef: ActorRef = _
var memoryLoggingTimerCancellable: Cancellable = _
var debugActorWithTimerActorRef: ActorRef = _
var debugActorWithTimerCancellable: Cancellable = _
if (beamConfig.beam.debug.debugActorTimerIntervalInSec > 0) {
debugActorWithTimerActorRef = context.actorOf(Props(classOf[DebugActorWithTimer], rideHailManager, scheduler))
debugActorWithTimerCancellable = prepareMemoryLoggingTimerActor(
beamConfig.beam.debug.debugActorTimerIntervalInSec,
context.system,
debugActorWithTimerActorRef
)
}
private val sharedVehicleFleets = config.agents.vehicles.sharedFleets.map { fleetConfig =>
context.actorOf(
Fleets.lookup(fleetConfig).props(beamServices, beamSkimmer, scheduler, parkingManager),
fleetConfig.name
)
}
sharedVehicleFleets.foreach(context.watch)
sharedVehicleFleets.foreach(scheduler ! ScheduleTrigger(InitializeTrigger(0), _))
private val transitSystem = context.actorOf(
Props(
new TransitSystem(
beamScenario,
matsimServices.getScenario,
beamScenario.transportNetwork,
scheduler,
parkingManager,
tollCalculator,
geo,
networkHelper,
matsimServices.getEvents
)
),
"transit-system"
)
context.watch(transitSystem)
scheduler ! ScheduleTrigger(InitializeTrigger(0), transitSystem)
private val population = context.actorOf(
Population.props(
matsimServices.getScenario,
beamScenario,
beamServices,
scheduler,
beamScenario.transportNetwork,
tollCalculator,
beamRouter,
rideHailManager,
parkingManager,
sharedVehicleFleets,
matsimServices.getEvents,
routeHistory,
beamSkimmer,
travelTimeObserved,
envelopeInUTM
),
"population"
)
context.watch(population)
scheduler ! ScheduleTrigger(InitializeTrigger(0), population)
scheduleRideHailManagerTimerMessages()
def prepareMemoryLoggingTimerActor(
timeoutInSeconds: Int,
system: ActorSystem,
memoryLoggingTimerActorRef: ActorRef
): Cancellable = {
import system.dispatcher
val cancellable = system.scheduler.schedule(
0.milliseconds,
(timeoutInSeconds * 1000).milliseconds,
memoryLoggingTimerActorRef,
Tick
)
cancellable
}
override def receive: PartialFunction[Any, Unit] = {
case CompletionNotice(_, _) =>
log.info("Scheduler is finished.")
endSegment("agentsim-execution", "agentsim")
log.info("Ending Agentsim")
log.info("Processing Agentsim Events (Start)")
startSegment("agentsim-events", "agentsim")
population ! Finish
rideHailManager ! Finish
transitSystem ! Finish
context.stop(scheduler)
context.stop(errorListener)
context.stop(parkingManager)
sharedVehicleFleets.foreach(context.stop)
if (beamConfig.beam.debug.debugActorTimerIntervalInSec > 0) {
debugActorWithTimerCancellable.cancel()
context.stop(debugActorWithTimerActorRef)
}
case Terminated(_) =>
if (context.children.isEmpty) {
context.stop(self)
runSender ! Success("Ran.")
} else {
log.debug("Remaining: {}", context.children)
}
case "Run!" =>
runSender = sender
log.info("Running BEAM Mobsim")
endSegment("iteration-preparation", "mobsim")
log.info("Preparing new Iteration (End)")
log.info("Starting Agentsim")
startSegment("agentsim-execution", "agentsim")
scheduler ! StartSchedule(matsimServices.getIterationNumber)
}
private def scheduleRideHailManagerTimerMessages(): Unit = {
if (config.agents.rideHail.allocationManager.repositionTimeoutInSeconds > 0)
scheduler ! ScheduleTrigger(RideHailRepositioningTrigger(0), rideHailManager)
if (config.agents.rideHail.allocationManager.requestBufferTimeoutInSeconds > 0)
scheduler ! ScheduleTrigger(BufferedRideHailRequestsTrigger(0), rideHailManager)
}
def buildActivityQuadTreeBounds(population: MATSimPopulation): QuadTreeBounds = {
val persons = population.getPersons.values().asInstanceOf[java.util.Collection[Person]].asScala.view
val activities = persons.flatMap(p => p.getSelectedPlan.getPlanElements.asScala.view).collect {
case activity: Activity =>
activity
}
val coordinates = activities.map(_.getCoord)
// Force to compute xs and ys arrays
val xs = coordinates.map(_.getX).toArray
val ys = coordinates.map(_.getY).toArray
val xMin = xs.min
val xMax = xs.max
val yMin = ys.min
val yMax = ys.max
log.info(
s"QuadTreeBounds with X: [$xMin; $xMax], Y: [$yMin, $yMax]. boundingBoxBuffer: ${beamConfig.beam.spatial.boundingBoxBuffer}"
)
QuadTreeBounds(
xMin - beamConfig.beam.spatial.boundingBoxBuffer,
yMin - beamConfig.beam.spatial.boundingBoxBuffer,
xMax + beamConfig.beam.spatial.boundingBoxBuffer,
yMax + beamConfig.beam.spatial.boundingBoxBuffer
)
}
}
| colinsheppard/beam | src/main/scala/beam/sim/BeamMobsim.scala | Scala | gpl-3.0 | 12,280 |
/*
* Copyright 2007-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package http
package js
import scala.xml.{NodeSeq, Group, Unparsed, Elem}
import net.liftweb.util.Helpers._
import net.liftweb.common._
import net.liftweb.util._
import scala.xml.Node
object JsCommands {
def create = new JsCommands(Nil)
def apply(in: Seq[JsCmd]) = new JsCommands(in.toList.reverse)
def apply(in: JsExp) = new JsCommands(List(in.cmd))
}
/**
* A container for accumulating `[[JsCmd]]`s that need to be sent to the client.
* When `[[toResponse]]` is called to finalize the response, in addition to the
* JS passed directly to this instance, the commands in `[[S.jsToAppend]]` are
* also read and included in the response. Also in this process, all of the
* `JsCmd` instances have their `toJsCmd` methods called to convert them to a
* string.
*
* @note The contents of `jsToAppend` are cleared in this process!
*/
class JsCommands(val reverseList: List[JsCmd]) {
def &(in: JsCmd) = new JsCommands(in :: reverseList)
def &(in: List[JsCmd]) = new JsCommands(in.reverse ::: reverseList)
def toResponse = {
// Evaluate all toJsCmds, which may in turn call S.append[Global]Js.
val containedJs = reverseList.reverse.map(_.toJsCmd)
val toAppend = S.jsToAppend(clearAfterReading = true).map(_.toJsCmd)
val data = (containedJs ++ toAppend).mkString("\\n").getBytes("UTF-8")
InMemoryResponse(data, List("Content-Length" -> data.length.toString, "Content-Type" -> "text/javascript; charset=utf-8"), S.responseCookies, 200)
}
}
case class JsonCall(funcId: String) {
def exp(exp: JsExp): JsCmd = JsCmds.Run(funcId + "(" + exp.toJsCmd + ");")
def apply(command: String): JsCmd = apply(JE.Str(command))
def apply(command: JsExp): JsCmd =
JsCmds.Run(funcId + "({'command': " + command.toJsCmd + ", 'params': false});")
def apply(command: String, params: JsExp) =
JsCmds.Run(funcId + "({'command': " + command.encJs + ", 'params':" +
params.toJsCmd + "});")
def apply(command: String, target: String, params: JsExp) =
JsCmds.Run(funcId + "({'command': " + command.encJs + ", 'target': " +
target.encJs +
", 'params':" +
params.toJsCmd + "});")
def apply(command: JsExp, params: JsExp) =
JsCmds.Run(funcId + "({'command': " + command.toJsCmd + ", 'params':" +
params.toJsCmd + "});")
def apply(command: JsExp, target: JsExp, params: JsExp) =
JsCmds.Run(funcId + "({'command': " + command.toJsCmd + ", 'target': " +
target.toJsCmd +
", 'params':" +
params.toJsCmd + "});")
}
trait JsObj extends JsExp {
def props: List[(String, JsExp)]
def toJsCmd = props.map {case (n, v) => n.encJs + ": " + v.toJsCmd}.mkString("{", ", ", "}")
override def toString(): String = toJsCmd
override def equals(other: Any): Boolean = {
other match {
case jsObj: JsObj => {
import scala.annotation.tailrec
@tailrec def test(me: Map[String, JsExp], them: List[(String, JsExp)]): Boolean = {
them match {
case Nil => me.isEmpty
case _ if me.isEmpty => false
case (k, v) :: xs =>
me.get(k) match {
case None => false
case Some(mv) if mv != v => false
case _ => test(me - k, xs)
}
}
}
test(Map(props :_*), jsObj.props)
}
case x => super.equals(x)
}
}
def +*(other: JsObj) = {
val np = props ::: other.props
new JsObj {
def props = np
}
}
/**
* Overwrites any existing keys and adds the rest.
*/
def extend(other: JsObj) = {
// existing, non-existing props
val (ep, nep) = other.props.partition { case (key, exp) => props.exists { case (k, e) => k == key }}
// replaced props
val rp = props.map { case (key, exp) =>
ep.find { case (k, e) => k == key }.getOrElse(key -> exp)
}
new JsObj {
def props = rp ::: nep
}
}
}
/**
* The companion object to JsExp that has some
* helpful conversions to/from Lift's JSON library
*/
object JsExp {
import json._
implicit def jValueToJsExp(jv: JValue): JsExp = new JsExp {
lazy val toJsCmd = compactRender(jv)
}
implicit def strToJsExp(str: String): JE.Str = JE.Str(str)
implicit def boolToJsExp(b: Boolean): JsExp = JE.boolToJsExp(b)
implicit def intToJsExp(in: Int): JE.Num = JE.Num(in)
implicit def longToJsExp(in: Long): JE.Num = JE.Num(in)
implicit def doubleToJsExp(in: Double): JE.Num = JE.Num(in)
implicit def floatToJsExp(in: Float): JE.Num = JE.Num(in)
implicit def numToJValue(in: JE.Num): JValue = in match {
case JE.Num(n) => JDouble(n.doubleValue())
}
implicit def strToJValue(in: JE.Str): JValue = JString(in.str)
}
/**
* The basic JavaScript expression
*/
trait JsExp extends HtmlFixer with ToJsCmd {
def toJsCmd: String
override def equals(other: Any): Boolean = {
other match {
case jx: JsExp => this.toJsCmd == jx.toJsCmd
case _ => super.equals(other)
}
}
override def toString = "JsExp("+toJsCmd+")"
def appendToParent(parentName: String): JsCmd = {
val ran = "v" + Helpers.nextFuncName
JsCmds.JsCrVar(ran, this) &
JE.JsRaw("if (" + ran + ".parentNode) " + ran + " = " + ran + ".cloneNode(true)").cmd &
JE.JsRaw("if (" + ran + ".nodeType) {" + parentName + ".appendChild(" + ran + ");} else {" +
parentName + ".appendChild(document.createTextNode(" + ran + "));}").cmd
}
/**
* ~> accesses a property in the current JsExp
*/
def ~>(right: JsMember): JsExp = new JsExp {
def toJsCmd = JsExp.this.toJsCmd + "." + right.toJsCmd
}
def ~>(right: Box[JsMember]): JsExp = right.dmap(this)(r => ~>(r))
def cmd: JsCmd = JsCmds.Run(toJsCmd + ";")
def +(right: JsExp): JsExp = new JsExp {
def toJsCmd = JsExp.this.toJsCmd + " + " + right.toJsCmd
}
def ===(right: JsExp): JsExp = new JsExp {
def toJsCmd = JsExp.this.toJsCmd + " = " + right.toJsCmd
}
}
trait JsMember {
def toJsCmd: String
}
/**
* JavaScript Expressions. To see these in action, check out
* sites/example/src/webapp/json.html
*/
object JE {
def boolToJsExp(in: Boolean): JsExp = if (in) JsTrue else JsFalse
/**
* The companion object to Num which has some helpful
* constructors
*/
object Num {
def apply(i: Int): Num = new Num(i)
def apply(lng: Long): Num = new Num(lng)
def apply(d: Double): Num = new Num(d)
def apply(f: Float): Num = new Num(f)
}
case class Num(n: Number) extends JsExp {
def toJsCmd = n.toString
}
case class Stringify(in: JsExp) extends JsExp {
def toJsCmd = "JSON.stringify(" + in.toJsCmd + ")"
}
case class JsArray(in: JsExp*) extends JsExp {
def toJsCmd = new JsExp {
def toJsCmd = in.map(_.toJsCmd).mkString("[", ", ", "]\\n")
}.toJsCmd
def this(in: List[JsExp]) = this (in: _*)
}
object JsArray {
def apply(in: List[JsExp]) = new JsArray(in: _*)
}
case class ValById(id: String) extends JsExp {
def toJsCmd = "(function() {if (document.getElementById(" + id.encJs + ")) {return document.getElementById(" + id.encJs + ").value;} else {return null;}})()"
}
/**
* Given the id of a checkbox, see if it's checked
*/
case class CheckedById(id: String) extends JsExp {
def toJsCmd = "(function() {if (document.getElementById(" + id.encJs + ")) {return document.getElementById(" + id.encJs + ").checked} else {return false;}})()"
}
/**
* gets the element by ID
*/
case class ElemById(id: String, thenStr: String*) extends JsExp {
override def toJsCmd = "document.getElementById(" + id.encJs + ")" + (
if (thenStr.isEmpty) "" else thenStr.mkString(".", ".", "")
)
}
/**
* Gives the parent node of the node denominated by the id
*
* @param id - the id of the node
*/
case class ParentOf(id: String) extends JsExp {
def toJsCmd = (ElemById(id) ~> Parent).toJsCmd
}
object LjSwappable {
def apply(visible: JsExp, hidden: JsExp): JxBase = {
new JxNodeBase {
def child = Nil
def appendToParent(name: String): JsCmd =
JsRaw(name + ".appendChild(lift$.swappable(" + visible.toJsCmd
+ ", " + hidden.toJsCmd + "))").cmd
}
}
def apply(visible: NodeSeq, hidden: NodeSeq): JxBase = {
new JxNodeBase {
def child = Nil
def appendToParent(name: String): JsCmd =
JsRaw(name + ".appendChild(lift$.swappable(" + AnonFunc(
JsCmds.JsCrVar("df", JsRaw("document.createDocumentFragment()")) &
addToDocFrag("df", visible.toList) &
JE.JsRaw("return df").cmd
).toJsCmd
+ "(), " + AnonFunc(JsCmds.JsCrVar("df", JsRaw("document.createDocumentFragment()")) &
addToDocFrag("df", hidden.toList) &
JE.JsRaw("return df").cmd).toJsCmd + "()))").cmd
}
}
}
object LjBuildIndex {
def apply(obj: String,
indexName: String, tables: (String, String)*): JsExp = new JsExp {
def toJsCmd = "lift$.buildIndex(" + obj + ", " + indexName.encJs +
(if (tables.isEmpty) "" else ", " +
tables.map {case (l, r) => "[" + l.encJs + ", " + r.encJs + "]"}.mkString(", ")) +
")"
}
def apply(obj: JsExp,
indexName: String, tables: (String, String)*): JsExp = new JsExp {
def toJsCmd = "lift$.buildIndex(" + obj.toJsCmd + ", " + indexName.encJs +
(if (tables.isEmpty) "" else ", " +
tables.map {case (l, r) => "[" + l.encJs + ", " + r.encJs + "]"}.mkString(", ")) +
")"
}
}
protected trait MostLjFuncs {
def funcName: String
def apply(obj: String, func: String): JsExp = new JsExp {
def toJsCmd = "lift$." + funcName + "(" + obj + ", " + func.encJs + ")"
}
def apply(obj: JsExp, func: JsExp): JsExp = new JsExp {
def toJsCmd = "lift$." + funcName + "(" + obj.toJsCmd + ", " + func.toJsCmd + ")"
}
}
object LjAlt {
def apply(obj: String, func: String, alt: String): JsExp = new JsExp {
def toJsCmd = "lift$.alt(" + obj + ", " + func.encJs + ", " + alt.encJs + ")"
}
def apply(obj: JsExp, func: JsExp, alt: String): JsExp = new JsExp {
def toJsCmd = "lift$.alt(" + obj.toJsCmd + ", " + func.toJsCmd + ", " + alt.encJs + ")"
}
def apply(obj: JsExp, func: JsExp, alt: JsExp): JsExp = new JsExp {
def toJsCmd = "lift$.alt(" + obj.toJsCmd + ", " + func.toJsCmd + ", " + alt.toJsCmd + ")"
}
}
object LjMagicUpdate {
def apply(obj: String, field: String, idField: String, toUpdate: JsExp): JsExp = new JsExp {
def toJsCmd = "lift$.magicUpdate(" + obj + ", " + field.encJs + ", " + idField.encJs + ", " + toUpdate.toJsCmd + ")"
}
def apply(obj: JsExp, field: String, idField: String, toUpdate: JsExp): JsExp = new JsExp {
def toJsCmd = "lift$.magicUpdate(" + obj.toJsCmd + ", " + field.encJs + ", " + idField.encJs + ", " + toUpdate.toJsCmd + ")"
}
}
object LjForeach extends MostLjFuncs {
def funcName: String = "foreach"
}
object LjFilter extends MostLjFuncs {
def funcName: String = "filter"
}
object LjMap extends MostLjFuncs {
def funcName: String = "map"
}
object LjFold {
def apply(what: JsExp, init1: JsExp, func: String): JsExp = new JsExp {
def toJsCmd = "lift$.fold(" + what.toJsCmd + ", " + init1.toJsCmd + ", " + func.encJs + ")"
}
def apply(what: JsExp, init1: JsExp, func: AnonFunc): JsExp = new JsExp {
def toJsCmd = "lift$.fold(" + what.toJsCmd + ", " + init1.toJsCmd + ", " + func.toJsCmd + ")"
}
}
object LjFlatMap extends MostLjFuncs {
def funcName: String = "flatMap"
}
object LjSort extends MostLjFuncs {
def funcName: String = "sort"
def apply(obj: String): JsExp = new JsExp {
def toJsCmd = "lift$." + funcName + "(" + obj + ")"
}
def apply(obj: JsExp): JsExp = new JsExp {
def toJsCmd = "lift$." + funcName + "(" + obj.toJsCmd + ")"
}
}
object FormToJSON {
def apply(formId: String) = new JsExp {
def toJsCmd = LiftRules.jsArtifacts.formToJSON(formId).toJsCmd;
}
}
/**
* A String (JavaScript encoded)
*/
case class Str(str: String) extends JsExp {
def toJsCmd = str.encJs
}
/**
* A JavaScript method that takes parameters
*
* JsFunc is very similar to Call but only the latter will be implicitly converted to a JsCmd.
* @see Call
*/
case class JsFunc(method: String, params: JsExp*) extends JsMember {
def toJsCmd = params.map(_.toJsCmd).mkString(method + "(", ", ", ")")
def cmd: JsCmd = JsCmds.Run(toJsCmd + ";")
}
/**
* Put any JavaScript expression you want in here and the result will be
* evaluated.
*/
case class JsRaw(rawJsCmd: String) extends JsExp {
def toJsCmd = rawJsCmd
}
case class JsVar(varName: String, andThen: String*) extends JsExp {
def toJsCmd = varName + (if (andThen.isEmpty) ""
else andThen.mkString(".", ".", ""))
}
/**
* A value that can be retrieved from an expression
*/
case class JsVal(valueName: String) extends JsMember {
def toJsCmd = valueName
}
case object Id extends JsMember {
def toJsCmd = "id"
}
case object Parent extends JsMember {
def toJsCmd = "parentNode"
}
case object Style extends JsMember {
def toJsCmd = "style"
}
case object Value extends JsMember {
def toJsCmd = "value"
}
case object JsFalse extends JsExp {
def toJsCmd = "false"
}
case object JsNull extends JsExp {
def toJsCmd = "null"
}
case object JsTrue extends JsExp {
def toJsCmd = "true"
}
/**
* A JavaScript method that takes parameters
*
* Call is very similar to JsFunc but only the former will be implicitly converted to a JsCmd.
* @see JsFunc
*/
case class Call(function: String, params: JsExp*) extends JsExp {
def toJsCmd = function + "(" + params.map(_.toJsCmd).mkString(",") + ")"
}
trait AnonFunc extends JsExp {
def applied: JsExp = new JsExp {
def toJsCmd = "(" + AnonFunc.this.toJsCmd + ")" + "()"
}
def applied(params: JsExp*): JsExp = new JsExp {
def toJsCmd = "(" + AnonFunc.this.toJsCmd + ")" +
params.map(_.toJsCmd).mkString("(", ",", ")")
}
}
object AnonFunc {
def apply(in: JsCmd): AnonFunc = new JsExp with AnonFunc {
def toJsCmd = "function() {" + in.toJsCmd + "}"
}
def apply(params: String, in: JsCmd): AnonFunc = new JsExp with AnonFunc {
def toJsCmd = "function(" + params + ") {" + in.toJsCmd + "}"
}
}
object JsObj {
def apply(members: (String, JsExp)*): JsObj =
new JsObj {
def props = members.toList
}
}
case class JsLt(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " < " + right.toJsCmd
}
case class JsGt(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " > " + right.toJsCmd
}
case class JsEq(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " == " + right.toJsCmd
}
case class JsNotEq(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " != " + right.toJsCmd
}
case class JsLtEq(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " <= " + right.toJsCmd
}
case class JsGtEq(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " >= " + right.toJsCmd
}
case class JsOr(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " || " + right.toJsCmd
}
case class JsAnd(left: JsExp, right: JsExp) extends JsExp {
def toJsCmd = left.toJsCmd + " && " + right.toJsCmd
}
case class JsNot(exp: JsExp) extends JsExp {
def toJsCmd = "!" + exp.toJsCmd
}
}
trait HtmlFixer {
/**
* Calls fixHtmlAndJs and if there's embedded script tags,
* construct a function that executes the contents of the scripts
* then evaluations to Expression. For use when converting
* a JsExp that contains HTML.
*/
def fixHtmlFunc(uid: String, content: NodeSeq)(f: String => String) =
fixHtmlAndJs(uid, content) match {
case (str, Nil) => f(str)
case (str, cmds) => "((function() {"+cmds.reduceLeft{_ & _}.toJsCmd+" return "+f(str)+";})())"
}
/**
* Calls fixHtmlAndJs and if there's embedded script tags,
* append the JsCmds to the String returned from applying
* the function to the enclosed HTML.
* For use when converting
* a JsCmd that contains HTML.
*/
def fixHtmlCmdFunc(uid: String, content: NodeSeq)(f: String => String) =
fixHtmlAndJs(uid, content) match {
case (str, Nil) => f(str)
case (str, cmds) => f(str)+"; "+cmds.reduceLeft(_ & _).toJsCmd
}
/**
* Super important... call fixHtml at instance creation time and only once
* This method must be run in the context of the thing creating the XHTML
* to capture the bound functions
*/
protected def fixHtmlAndJs(uid: String, content: NodeSeq): (String, List[JsCmd]) = {
import Helpers._
val w = new java.io.StringWriter
val xhtml = S.session.
map(s =>
s.fixHtml(s.processSurroundAndInclude("JS SetHTML id: "
+ uid,
content))).
openOr(content)
import scala.collection.mutable.ListBuffer
val lb = new ListBuffer[JsCmd]
val revised = ("script" #> nsFunc(ns => {
ns match {
case FindScript(e) => {
lb += JE.JsRaw(ns.text).cmd
NodeSeq.Empty
}
case x => x
}
})).apply(xhtml)
S.htmlProperties.htmlWriter(Group(revised), w)
(w.toString.encJs, lb.toList)
}
private object FindScript {
def unapply(in: NodeSeq): Option[Elem] = in match {
case e: Elem => {
e.attribute("type").map(_.text).filter(_ == "text/javascript").flatMap {
a =>
if (e.attribute("src").isEmpty) Some(e) else None
}
}
case _ => None
}
}
}
trait JsCmd extends HtmlFixer with ToJsCmd {
def &(other: JsCmd): JsCmd = JsCmds.CmdPair(this, other)
def toJsCmd: String
override def toString() = "JsCmd("+toJsCmd+")"
}
object JsCmd {
/**
* If you've got Unit and need a JsCmd, return a Noop
*/
implicit def unitToJsCmd(in: Unit): JsCmd = JsCmds.Noop
}
object JsCmds {
implicit def seqJsToJs(in: Seq[JsCmd]): JsCmd = in.foldLeft[JsCmd](Noop)(_ & _)
object Script {
def apply(script: JsCmd): Node = <script type="text/javascript">{Unparsed("""
// <![CDATA[
""" + fixEndScriptTag(script.toJsCmd) + """
// ]]>
""")}</script>
private def fixEndScriptTag(in: String): String =
"""\\<\\/script\\>""".r.replaceAllIn(in, """<\\\\/script>""")
}
def JsHideId(what: String): JsCmd = LiftRules.jsArtifacts.hide(what).cmd
def JsShowId(what: String): JsCmd = LiftRules.jsArtifacts.show(what).cmd
/**
* Replaces the node having the provided id with the markup given by node
*
* @param id - the id of the node that will be replaced
* @param node - the new node
*/
case class Replace(id: String, content: NodeSeq) extends JsCmd {
val toJsCmd = LiftRules.jsArtifacts.replace(id, Helpers.stripHead(content)).toJsCmd
}
/**
* Replaces the content of the node with the provided id with the markup given by content
*
* This is analogous to assigning a new value to a DOM object's innerHtml property in Javascript.
*
* @param id - the id of the node whose content will be replaced
* @param content - the new content
*/
case class SetHtml(uid: String, content: NodeSeq) extends JsCmd {
// we want eager evaluation of the snippets so they get evaluated in context
val toJsCmd = LiftRules.jsArtifacts.setHtml(uid, Helpers.stripHead(content)).toJsCmd
}
/**
* Makes the parameter the selected HTML element on load of the page
*
* @param in the element that should have focus
*
* @return the element and a script that will give the element focus
*/
object FocusOnLoad {
def apply(in: Elem): NodeSeq = {
val (elem, id) = findOrAddId(in)
elem ++ Script(LiftRules.jsArtifacts.onLoad(Run("if (document.getElementById(" + id.encJs + ")) {document.getElementById(" + id.encJs + ").focus();};")))
}
}
/**
* Sets the value of an element and sets the focus
*/
case class SetValueAndFocus(id: String, value: String) extends JsCmd {
def toJsCmd = "if (document.getElementById(" + id.encJs + ")) {document.getElementById(" + id.encJs + ").value = " +
value.encJs +
"; document.getElementById(" + id.encJs + ").focus();};"
}
/**
* Sets the focus on the element denominated by the id
*/
case class Focus(id: String) extends JsCmd {
def toJsCmd = "if (document.getElementById(" + id.encJs + ")) {document.getElementById(" + id.encJs + ").focus();};"
}
/**
* Creates a JavaScript function with a name, a parameters list and
* a function body
*/
object Function {
def apply(name: String, params: List[String], body: JsCmd): JsCmd =
new JsCmd {
def toJsCmd = "function " + name + "(" +
params.mkString(", ") + """) {
""" + body.toJsCmd + """
}
"""
}
}
/**
* Execute the 'what' code when the page is ready for use
*/
object OnLoad {
def apply(what: JsCmd): JsCmd = LiftRules.jsArtifacts.onLoad(what)
}
/**
* Sets the value to the element having the 'id' attribute with
* the result of the 'right' expression
*/
case class SetValById(id: String, right: JsExp) extends JsCmd {
def toJsCmd = "if (document.getElementById(" + id.encJs + ")) {document.getElementById(" + id.encJs + ").value = " +
right.toJsCmd + ";};"
}
/**
* Assigns the value computed by the 'right' expression to the
* 'left' expression.
*/
case class SetExp(left: JsExp, right: JsExp) extends JsCmd {
def toJsCmd = left.toJsCmd + " = " + right.toJsCmd + ";"
}
/**
* Creates a JavaScript var named by 'name' and assigns it the
* value of 'right' expression.
*/
case class JsCrVar(name: String, right: JsExp) extends JsCmd {
def toJsCmd = "var " + name + " = " + right.toJsCmd + ";"
}
/**
* Assigns the value of 'right' to the members of the element
* having this 'id', chained by 'then' sequences
*/
case class SetElemById(id: String, right: JsExp, thenStr: String*) extends JsCmd {
def toJsCmd = "if (document.getElementById(" + id.encJs + ")) {document.getElementById(" + id.encJs + ")" + (
if (thenStr.isEmpty) "" else thenStr.mkString(".", ".", "")
) + " = " + right.toJsCmd + ";};"
}
implicit def jsExpToJsCmd(in: JsExp) = in.cmd
case class CmdPair(left: JsCmd, right: JsCmd) extends JsCmd {
import scala.collection.mutable.ListBuffer;
def toJsCmd: String = {
val acc = new ListBuffer[JsCmd]()
appendDo(acc, left :: right :: Nil)
acc.map(_.toJsCmd).mkString("\\n")
}
@scala.annotation.tailrec
private def appendDo(acc: ListBuffer[JsCmd], cmds: List[JsCmd]) {
cmds match {
case Nil =>
case CmdPair(l, r) :: rest => appendDo(acc, l :: r :: rest)
case a :: rest => acc.append(a); appendDo(acc, rest)
}
}
}
trait HasTime {
def time: Box[TimeSpan]
def timeStr = time.map(_.millis.toString) openOr ""
}
case class After(time: TimeSpan, toDo: JsCmd) extends JsCmd {
def toJsCmd = "setTimeout(function() {" + toDo.toJsCmd + "}, " + time.millis + ");"
}
case class Alert(text: String) extends JsCmd {
def toJsCmd = "alert(" + text.encJs + ");"
}
case class Prompt(text: String, default: String = "") extends JsExp {
def toJsCmd = "prompt(" + text.encJs + "," + default.encJs + ")"
}
case class Confirm(text: String, yes: JsCmd) extends JsCmd {
def toJsCmd = "if (confirm(" + text.encJs + ")) {" + yes.toJsCmd + "}"
}
case class Run(text: String) extends JsCmd {
def toJsCmd = text
}
case object _Noop extends JsCmd {
def toJsCmd = ""
}
implicit def cmdToString(in: JsCmd): String = in.toJsCmd
def Noop: JsCmd = _Noop
case class JsTry(what: JsCmd, alert: Boolean) extends JsCmd {
def toJsCmd = "try { " + what.toJsCmd + " } catch (e) {" + (if (alert) "alert(e);" else "") + "}"
}
/**
* JsSchedule the execution of the JsCmd using setTimeout()
* @param what the code to execute
*/
case class JsSchedule(what: JsCmd) extends JsCmd {
def toJsCmd = s"""setTimeout(function()
{
${what.toJsCmd}
} , 0);"""
}
/**
* A companion object with a helpful alternative constructor
*/
object RedirectTo {
/**
* Redirect to a page and execute the function
* when the page is loaded (only if the page is on the
* same server, not going to some other server on the internet)
*/
def apply(where: String, func: () => Unit): RedirectTo =
S.session match {
case Full(liftSession) =>
new RedirectTo(liftSession.attachRedirectFunc(where, Full(func)))
case _ => new RedirectTo(where)
}
}
case class RedirectTo(where: String) extends JsCmd {
private val where2 = // issue 176
if (where.startsWith("/") &&
!LiftRules.excludePathFromContextPathRewriting.vend(where)) (S.contextPath + where) else where
def toJsCmd = "window.location = " + S.encodeURL(where2).encJs + ";"
}
/**
* Reload the current page
*/
case object Reload extends JsCmd {
def toJsCmd = "window.location.reload();"
}
/**
* Update a Select with new Options
*/
case class ReplaceOptions(select: String, opts: List[(String, String)], dflt: Box[String]) extends JsCmd {
def toJsCmd = """var x=document.getElementById(""" + select.encJs + """);
if (x) {
while (x.length > 0) {x.remove(0);}
var y = null;
""" +
opts.map {
case (value, text) =>
"y=document.createElement('option'); " +
"y.text = " + text.encJs + "; " +
"y.value = " + value.encJs + "; " +
(if (Full(value) == dflt) "y.selected = true; " else "") +
" try {x.add(y, null);} catch(e) {if (typeof(e) == 'object' && typeof(e.number) == 'number' && (e.number & 0xFFFF) == 5){ x.add(y,x.options.length); } } "
}.mkString("\\n")+"};"
}
case object JsIf {
def apply(condition: JsExp, body: JsCmd): JsCmd = JE.JsRaw("if ( " + condition.toJsCmd + " ) { " + body.toJsCmd + " }")
def apply(condition: JsExp, bodyTrue: JsCmd, bodyFalse: JsCmd): JsCmd =
JE.JsRaw("if ( " + condition.toJsCmd + " ) { " + bodyTrue.toJsCmd + " } else { " + bodyFalse.toJsCmd + " }")
def apply(condition: JsExp, body: JsExp): JsCmd = JE.JsRaw("if ( " + condition.toJsCmd + " ) { " + body.toJsCmd + " }")
def apply(condition: JsExp, bodyTrue: JsExp, bodyFalse: JsExp): JsCmd =
JE.JsRaw("if ( " + condition.toJsCmd + " ) { " + bodyTrue.toJsCmd + " } else { " + bodyFalse.toJsCmd + " }")
}
case class JsWhile(condition: JsExp, body: JsExp) extends JsCmd {
def toJsCmd = "while ( " + condition.toJsCmd + " ) { " + body.toJsCmd + " }"
}
case class JsWith(reference: String, body: JsExp) extends JsCmd {
def toJsCmd = "with ( " + reference + " ) { " + body.toJsCmd + " }"
}
case class JsDoWhile(body: JsExp, condition: JsExp) extends JsCmd {
def toJsCmd = "do { " + body.toJsCmd + " } while ( " + condition.toJsCmd + " )"
}
case class JsFor(initialExp: JsExp, condition: JsExp, incrementExp: JsExp, body: JsExp) extends JsCmd {
def toJsCmd = "for ( " + initialExp.toJsCmd + "; " +
condition.toJsCmd + "; " +
incrementExp.toJsCmd + " ) { " + body.toJsCmd + " }"
}
case class JsForIn(initialExp: JsExp, reference: String, body: JsCmd) extends JsCmd {
def toJsCmd = "for ( " + initialExp.toJsCmd + " in " + reference + ") { " + body.toJsCmd + " }"
}
case object JsBreak extends JsCmd {
def toJsCmd = "break"
}
case object JsContinue extends JsCmd {
def toJsCmd = "continue"
}
object JsReturn {
def apply(in: JsExp): JsCmd = new JsCmd {
def toJsCmd = "return " + in.toJsCmd
}
def apply(): JsCmd = new JsCmd {
def toJsCmd = "return "
}
}
}
/**
* A collection of defaults for JavaScript related stuff
*/
object JsRules {
/**
* The default duration for displaying FadeOut and FadeIn
* messages.
*/
//@deprecated
@volatile var prefadeDuration: Helpers.TimeSpan = 5.seconds
/**
* The default fade time for fading FadeOut and FadeIn
* messages.
*/
//@deprecated
@volatile var fadeTime: Helpers.TimeSpan = 1.second
}
| lzpfmh/framework-2 | web/webkit/src/main/scala/net/liftweb/http/js/JsCommands.scala | Scala | apache-2.0 | 29,323 |
package com.raquo.dombuilder.generic.modifiers
import com.raquo.domtypes.generic.Modifier
class ModifierSeq[N](
val modifiers: Iterable[Modifier[N]]
) extends Modifier[N] {
override def apply(node: N): Unit = {
modifiers.foreach(_(node))
}
}
| raquo/scala-dom-builder | shared/src/main/scala/com/raquo/dombuilder/generic/modifiers/ModifierSeq.scala | Scala | mit | 255 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule
package process
import platform.UThread
import channel.ROChan
import stream.{ IChan, OChan }
/**
* Instance of a ProcessType that has been bound to input and
* output channels but not yet running.
*
*/
trait Process[+R] { outer =>
/**
* Type of this process.
*
* @return the process type
*/
def ptype: ProcessType
/**
* Start this process by assigning it to a new user-level thread and a return channel.
*
* @param t the user-level thread
* @param rchan the return channel
* @return unit
*/
def start(t: UThread, rchan: ROChan[R]): Unit
/**
* Apply a function `f` to the result of this process.
*
* @param f the function to apply to result.
* @return a new process with the result
*/
def map[B](f: R => B): Process[B] = new Process[B] {
def ptype = outer.ptype
def start(t: UThread, rchan: ROChan[B]): Unit =
outer.start(t, rchan.map(f))
}
/**
* Apply a function `f` that starts a new process on the result of this process.
*
* @param f the function to apply to result.
* @return the new process created by `f`
*/
def flatMap[B](f: R => Process[B]): Process[B] = new Process[B] {
def ptype = outer.ptype
def start(t: UThread, rchan: ROChan[B]): Unit =
outer.start(t, new ROChan[R] {
def done(v: Either[Signal, R]) = v match {
case Right(r) => f(r).start(t, rchan)
case Left(signal) => rchan.done(Left(signal))
}
})
}
override final def toString =
ptype.name + "/" + System.identityHashCode(this)
}
abstract class Process0x0[R](
val ptype: ProcessType0x0[R]) extends Process[R]
/**
* A Process bound to channels that may be serialized and sent
* remotely (TBD)
*/
abstract class Process1x0[A, R](
val ptype: ProcessType1x0[A, R],
val ichan1: IChan[A]) extends Process[R]
abstract class Process0x1[A, R](
val ptype: ProcessType0x1[A, R],
val ochan1: OChan[A]) extends Process[R]
abstract class Process1x1[A, B, R](
val ptype: ProcessType1x1[A, B, R],
val ichan1: IChan[A],
val ochan1: OChan[B]) extends Process[R]
abstract class Process2x0[A, B, R](
val ptype: ProcessType2x0[A, B, R],
val ichan1: IChan[A],
val ichan2: IChan[B]) extends Process[R]
abstract class Process2x1[A, B, C, R](
val ptype: ProcessType2x1[A, B, C, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ochan1: OChan[C]) extends Process[R]
abstract class Process0x2[C, D, R](
val ptype: ProcessType0x2[C, D, R],
val ochan1: OChan[C],
val ochan2: OChan[D]) extends Process[R]
abstract class Process1x2[A, C, D, R](
val ptype: ProcessType1x2[A, C, D, R],
val ichan1: IChan[A],
val ochan1: OChan[C],
val ochan2: OChan[D]) extends Process[R]
abstract class Process2x2[A, B, C, D, R](
val ptype: ProcessType2x2[A, B, C, D, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ochan1: OChan[C],
val ochan2: OChan[D]) extends Process[R]
abstract class Process3x0[A, B, C, R](
val ptype: ProcessType3x0[A, B, C, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C]) extends Process[R]
abstract class Process3x1[A, B, C, D, R](
val ptype: ProcessType3x1[A, B, C, D, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ochan1: OChan[D]) extends Process[R]
abstract class Process3x2[A, B, C, D, E, R](
val ptype: ProcessType3x2[A, B, C, D, E, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ochan1: OChan[D],
val ochan2: OChan[E]) extends Process[R]
abstract class Process0x3[D, E, F, R](
val ptype: ProcessType0x3[D, E, F, R],
val ochan1: OChan[D],
val ochan2: OChan[E],
val ochan3: OChan[F]) extends Process[R]
abstract class Process1x3[A, D, E, F, R](
val ptype: ProcessType1x3[A, D, E, F, R],
val ichan1: IChan[A],
val ochan1: OChan[D],
val ochan2: OChan[E],
val ochan3: OChan[F]) extends Process[R]
abstract class Process2x3[A, B, D, E, F, R](
val ptype: ProcessType2x3[A, B, D, E, F, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ochan1: OChan[D],
val ochan2: OChan[E],
val ochan3: OChan[F]) extends Process[R]
abstract class Process3x3[A, B, C, D, E, F, R](
val ptype: ProcessType3x3[A, B, C, D, E, F, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ochan1: OChan[D],
val ochan2: OChan[E],
val ochan3: OChan[F]) extends Process[R]
abstract class Process4x0[A, B, C, D, R](
val ptype: ProcessType4x0[A, B, C, D, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ichan4: IChan[D]) extends Process[R]
abstract class Process4x1[A, B, C, D, E, R](
val ptype: ProcessType4x1[A, B, C, D, E, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ichan4: IChan[D],
val ochan1: OChan[E]) extends Process[R]
abstract class Process4x2[A, B, C, D, E, F, R](
val ptype: ProcessType4x2[A, B, C, D, E, F, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ichan4: IChan[D],
val ochan1: OChan[E],
val ochan2: OChan[F]) extends Process[R]
abstract class Process4x3[A, B, C, D, E, F, G, R](
val ptype: ProcessType4x3[A, B, C, D, E, F, G, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ichan4: IChan[D],
val ochan1: OChan[E],
val ochan2: OChan[F],
val ochan3: OChan[G]) extends Process[R]
abstract class Process0x4[E, F, G, H, R](
val ptype: ProcessType0x4[E, F, G, H, R],
val ochan1: OChan[E],
val ochan2: OChan[F],
val ochan3: OChan[G],
val ochan4: OChan[H]) extends Process[R]
abstract class Process1x4[A, E, F, G, H, R](
val ptype: ProcessType1x4[A, E, F, G, H, R],
val ichan1: IChan[A],
val ochan1: OChan[E],
val ochan2: OChan[F],
val ochan3: OChan[G],
val ochan4: OChan[H]) extends Process[R]
abstract class Process2x4[A, B, E, F, G, H, R](
val ptype: ProcessType2x4[A, B, E, F, G, H, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ochan1: OChan[E],
val ochan2: OChan[F],
val ochan3: OChan[G],
val ochan4: OChan[H]) extends Process[R]
abstract class Process3x4[A, B, C, E, F, G, H, R](
val ptype: ProcessType3x4[A, B, C, E, F, G, H, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ochan1: OChan[E],
val ochan2: OChan[F],
val ochan3: OChan[G],
val ochan4: OChan[H]) extends Process[R]
abstract class Process4x4[A, B, C, D, E, F, G, H, R](
val ptype: ProcessType4x4[A, B, C, D, E, F, G, H, R],
val ichan1: IChan[A],
val ichan2: IChan[B],
val ichan3: IChan[C],
val ichan4: IChan[D],
val ochan1: OChan[E],
val ochan2: OChan[F],
val ochan3: OChan[G],
val ochan4: OChan[H]) extends Process[R]
| molecule-labs/molecule | molecule-core/src/main/scala/molecule/process/Process.scala | Scala | apache-2.0 | 7,442 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.commons.services
import com.flipkart.connekt.commons.iomodels.ConnektRequest
import com.flipkart.connekt.commons.metrics.MetricRegistry
import com.flipkart.connekt.commons.services.SchedulerService.ScheduledRequest
import com.flipkart.connekt.commons.utils.StringUtils._
import flipkart.cp.convert.chronosQ.client.SchedulerClient
import flipkart.cp.convert.chronosQ.core.SchedulerEntry
import flipkart.cp.convert.chronosQ.core.impl.{MurmurHashPartioner, SecondGroupedTimeBucket}
import flipkart.cp.convert.chronosQ.impl.hbase.HbaseSchedulerStore
import org.apache.hadoop.hbase.client.Connection
class SchedulerService( hConnection:Connection) extends TService {
lazy val schedulerStore = ConnektConfig.getString("tables.hbase.scheduler.store").get
lazy val schedulerStore_CF = ConnektConfig.getOrElse("scheduler.hbase.store.columnFamily", "d")
val client = new SchedulerClient.Builder[ScheduledRequest]()
.withStore(new HbaseSchedulerStore(hConnection,schedulerStore,schedulerStore_CF,"lo")) //app name as short as possible as this is part of row key
.withTimeBucket(new SecondGroupedTimeBucket(ConnektConfig.getInt("scheduler.priority.lo.time.bucket").getOrElse(600))) // 30 min bucket
.withPartitioner(new MurmurHashPartioner(ConnektConfig.getInt("scheduler.priority.lo.partitions").getOrElse(96))) //Increasing partitions is not issue but for decreasing we need to move scheduled entry in higher partitions to new partition distribution
.withMetricRegistry(MetricRegistry.REGISTRY)
.buildOrGet
}
object SchedulerService {
private[commons] val delimiter = "$"
case class ScheduledRequest(request: ConnektRequest, queueName: String) extends SchedulerEntry {
override def getStringValue: String = {
queueName + delimiter + request.getJson
}
}
}
| Flipkart/connekt | commons/src/main/scala/com/flipkart/connekt/commons/services/SchedulerService.scala | Scala | mit | 2,444 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package org.openapitools.client.model
case class ListView(
`class`: Option[String] = None,
description: Option[String] = None,
jobs: Option[Seq[FreeStyleProject]] = None,
name: Option[String] = None,
url: Option[String] = None
)
| cliffano/swaggy-jenkins | clients/scala-sttp/generated/src/main/scala/org/openapitools/client/model/ListView.scala | Scala | mit | 589 |
object Foo {
enum MyEnum {
case Red
case Blue(msg: String)
}
export MyEnum._
}
object Bar {
type Blue = Foo.Blue
}
import Foo._
def foo(a: MyEnum): Seq[Bar.Blue] = a match {
case Red => Seq.empty
case m: Foo.Blue => Seq(m)
}
| som-snytt/dotty | tests/pos-special/fatal-warnings/i7219.scala | Scala | apache-2.0 | 248 |
package com.arcusys.valamis.lesson.generator
import java.util.UUID
import com.arcusys.valamis.file.service.FileService
import com.arcusys.valamis.lesson.generator.scorm.ScormPackageGenerator
import com.arcusys.valamis.lesson.generator.tincan.TinCanPackageGeneratorProperties
import com.arcusys.valamis.lesson.generator.tincan.file.TinCanQuizPackageGenerator
import com.arcusys.valamis.lesson.scorm.storage.ScormPackagesStorage
import com.arcusys.valamis.lesson.service.{PackageService, PackageUploadManager}
import com.arcusys.valamis.lesson.tincan.storage.TincanPackageStorage
import com.arcusys.valamis.quiz.model.QuizInfo
import com.arcusys.valamis.quiz.service.QuizService
import com.arcusys.valamis.quiz.storage.QuizQuestionCategoryStorage
import com.arcusys.valamis.uri.model.ValamisURIType
import com.arcusys.valamis.uri.service.URIServiceContract
import com.arcusys.valamis.util.serialization.JsonHelper
import com.escalatesoft.subcut.inject.{ BindingModule, Injectable }
class QuizPublishManager(implicit val bindingModule: BindingModule) extends Injectable {
private def categoryStorage = inject[QuizQuestionCategoryStorage]
private def fileService = inject[FileService]
private def scormPackageRepository = inject[ScormPackagesStorage]
private def tincanPackageRepository = inject[TincanPackageStorage]
private def quizService = inject[QuizService]
private val packageService = inject[PackageService]
lazy val uploadManager = new PackageUploadManager
private val uriService = inject[URIServiceContract]
def publishQuizAsTincan(quizId: Int, userId: Long, courseId: Long, properties: TinCanPackageGeneratorProperties) = {
val quiz = quizService.getQuiz(quizId)
if (quizService.getQuestionsCount(quizId) == 0) {
throw new Exception("can`t publish empty quiz, quizId: " + quizId)
}
val uriContent = Option(JsonHelper.toJson(new QuizInfo(quiz)))
val rootActivityId = uriService.getOrCreate(uriService.getLocalURL(), UUID.randomUUID.toString, ValamisURIType.Course, uriContent)
val generator = new TinCanQuizPackageGenerator(quiz, rootActivityId.uri, properties)
val packageZipFile = generator.generateZip(quiz.courseID)
val quizLogo = quiz.logo
val packageId = uploadManager.uploadTincanPackage(quiz.title, quiz.description, packageZipFile, courseId, userId)
if (quizLogo.nonEmpty) {
fileService.copyFile("quiz_logo_" + quizId, quizLogo, "package_logo_" + packageId, quizLogo)
tincanPackageRepository.setLogo(packageId, Option(quizLogo))
}
packageZipFile.delete()
}
def publishQuizAsScorm(quizId: Int, userId: Long, courseId: Long) = {
val quiz = quizService.getQuiz(quizId)
if (quizService.getQuestionsCount(quizId) == 0) {
throw new Exception("can`t publish empty quiz, quizId: " + quizId)
}
val generator = new ScormPackageGenerator(quiz)
val packageZipFile = generator.generateZip(quiz.courseID)
val quizLogo = quiz.logo
val packageId = uploadManager.uploadScormPackage(quiz.title, quiz.description, packageZipFile, courseId, userId)
if (quizLogo.nonEmpty) {
fileService.copyFile("quiz_logo_" + quizId, quizLogo, "package_logo_" + packageId, quizLogo)
scormPackageRepository.setLogo(packageId, Option(quizLogo))
}
packageZipFile.delete()
}
}
| ViLPy/Valamis | valamis-lesson-generator/src/main/scala/com/arcusys/valamis/lesson/generator/QuizPublishManager.scala | Scala | lgpl-3.0 | 3,320 |
package unfiltered.request
import org.specs2.mutable._
class MimeSpec extends Specification {
"Mime" should {
"match strings with known extensions" in {
("test.json" match {
case Mime(mime) => Some(mime)
case _ => None
}) must beSome("application/json")
}
"match strings with multiple extensions" in {
("test.ext.json" match {
case Mime(mime) => Some(mime)
case _ => None
}) must beSome("application/json")
}
"not match strings with no extensions" in {
("test" match {
case Mime(mime) => Some(mime)
case _ => None
}) must beNone
}
"not match strings with unknown extensions" in {
("test.dson" match {
case Mime(mime) => Some(mime)
case _ => None
}) must beNone
}
}
}
| omarkilani/unfiltered | library/src/test/scala/MimeSpec.scala | Scala | mit | 817 |
package io.iohk.ethereum.network.p2p.messages
import akka.util.ByteString
import io.iohk.ethereum.domain._
import io.iohk.ethereum.mpt.{MptNode, MptTraversals}
import io.iohk.ethereum.network.p2p.{Message, MessageSerializableImplicit}
import io.iohk.ethereum.rlp.RLPImplicitConversions._
import io.iohk.ethereum.rlp.RLPImplicits._
import io.iohk.ethereum.rlp._
import org.bouncycastle.util.encoders.Hex
object PV63 {
object GetNodeData {
implicit class GetNodeDataEnc(val underlyingMsg: GetNodeData)
extends MessageSerializableImplicit[GetNodeData](underlyingMsg)
with RLPSerializable {
override def code: Int = Codes.GetNodeDataCode
override def toRLPEncodable: RLPEncodeable = toRlpList(msg.mptElementsHashes)
}
implicit class GetNodeDataDec(val bytes: Array[Byte]) extends AnyVal {
def toGetNodeData: GetNodeData = rawDecode(bytes) match {
case rlpList: RLPList => GetNodeData(fromRlpList[ByteString](rlpList))
case _ => throw new RuntimeException("Cannot decode GetNodeData")
}
}
}
case class GetNodeData(mptElementsHashes: Seq[ByteString]) extends Message {
override def code: Int = Codes.GetNodeDataCode
override def toString: String =
s"GetNodeData{ hashes: ${mptElementsHashes.map(e => Hex.toHexString(e.toArray[Byte]))} }"
}
object AccountImplicits {
import UInt256RLPImplicits._
implicit class AccountEnc(val account: Account) extends RLPSerializable {
override def toRLPEncodable: RLPEncodeable = {
import account._
RLPList(nonce.toRLPEncodable, balance.toRLPEncodable, storageRoot, codeHash)
}
}
implicit class AccountDec(val bytes: Array[Byte]) extends AnyVal {
def toAccount: Account = rawDecode(bytes) match {
case RLPList(nonce, balance, storageRoot, codeHash) =>
Account(nonce.toUInt256, balance.toUInt256, storageRoot, codeHash)
case _ => throw new RuntimeException("Cannot decode Account")
}
}
}
object MptNodeEncoders {
val BranchNodeChildLength = 16
val BranchNodeIndexOfValue = 16
val ExtensionNodeLength = 2
val LeafNodeLength = 2
val MaxNodeValueSize = 31
val HashLength = 32
implicit class MptNodeEnc(obj: MptNode) extends RLPSerializable {
def toRLPEncodable: RLPEncodeable = MptTraversals.encode(obj)
}
implicit class MptNodeDec(val bytes: Array[Byte]) extends AnyVal {
def toMptNode: MptNode = MptTraversals.decodeNode(bytes)
}
implicit class MptNodeRLPEncodableDec(val rlp: RLPEncodeable) extends AnyVal {
def toMptNode: MptNode = MptTraversals.decodeNode(rlp)
}
}
object NodeData {
implicit class NodeDataEnc(val underlyingMsg: NodeData)
extends MessageSerializableImplicit[NodeData](underlyingMsg)
with RLPSerializable {
import MptNodeEncoders._
override def code: Int = Codes.NodeDataCode
override def toRLPEncodable: RLPEncodeable = msg.values
@throws[RLPException]
def getMptNode(index: Int): MptNode = msg.values(index).toArray[Byte].toMptNode
}
implicit class NodeDataDec(val bytes: Array[Byte]) extends AnyVal {
def toNodeData: NodeData = rawDecode(bytes) match {
case rlpList: RLPList => NodeData(rlpList.items.map { e => e: ByteString })
case _ => throw new RuntimeException("Cannot decode NodeData")
}
}
}
case class NodeData(values: Seq[ByteString]) extends Message {
override def code: Int = Codes.NodeDataCode
override def toString: String =
s"NodeData{ values: ${values.map(b => Hex.toHexString(b.toArray[Byte]))} }"
}
object GetReceipts {
implicit class GetReceiptsEnc(val underlyingMsg: GetReceipts)
extends MessageSerializableImplicit[GetReceipts](underlyingMsg)
with RLPSerializable {
override def code: Int = Codes.GetReceiptsCode
override def toRLPEncodable: RLPEncodeable = msg.blockHashes: RLPList
}
implicit class GetReceiptsDec(val bytes: Array[Byte]) extends AnyVal {
def toGetReceipts: GetReceipts = rawDecode(bytes) match {
case rlpList: RLPList => GetReceipts(fromRlpList[ByteString](rlpList))
case _ => throw new RuntimeException("Cannot decode GetReceipts")
}
}
}
case class GetReceipts(blockHashes: Seq[ByteString]) extends Message {
override def code: Int = Codes.GetReceiptsCode
override def toString: String = {
s"GetReceipts{ blockHashes: ${blockHashes.map(e => Hex.toHexString(e.toArray[Byte]))} } "
}
}
object TxLogEntryImplicits {
implicit class TxLogEntryEnc(logEntry: TxLogEntry) extends RLPSerializable {
override def toRLPEncodable: RLPEncodeable = {
import logEntry._
RLPList(loggerAddress.bytes, logTopics, data)
}
}
implicit class TxLogEntryDec(rlp: RLPEncodeable) {
def toTxLogEntry: TxLogEntry = rlp match {
case RLPList(loggerAddress, logTopics: RLPList, data) =>
TxLogEntry(Address(loggerAddress: ByteString), fromRlpList[ByteString](logTopics), data)
case _ => throw new RuntimeException("Cannot decode TransactionLog")
}
}
}
object ReceiptImplicits {
import TxLogEntryImplicits._
implicit class ReceiptEnc(receipt: Receipt) extends RLPSerializable {
override def toRLPEncodable: RLPEncodeable = {
import receipt._
val stateHash: RLPEncodeable = postTransactionStateHash match {
case HashOutcome(hash) => hash
case SuccessOutcome => 1.toByte
case _ => 0.toByte
}
RLPList(stateHash, cumulativeGasUsed, logsBloomFilter, RLPList(logs.map(_.toRLPEncodable): _*))
}
}
implicit class ReceiptSeqEnc(receipts: Seq[Receipt]) extends RLPSerializable {
override def toRLPEncodable: RLPEncodeable = RLPList(receipts.map(_.toRLPEncodable): _*)
}
implicit class ReceiptDec(val bytes: Array[Byte]) extends AnyVal {
def toReceipt: Receipt = ReceiptRLPEncodableDec(rawDecode(bytes)).toReceipt
def toReceipts: Seq[Receipt] = rawDecode(bytes) match {
case RLPList(items @ _*) => items.map(_.toReceipt)
case _ => throw new RuntimeException("Cannot decode Receipts")
}
}
implicit class ReceiptRLPEncodableDec(val rlpEncodeable: RLPEncodeable) extends AnyVal {
def toReceipt: Receipt = rlpEncodeable match {
case RLPList(postTransactionStateHash, cumulativeGasUsed, logsBloomFilter, logs: RLPList) =>
val stateHash = postTransactionStateHash match {
case RLPValue(bytes) if bytes.length > 1 => HashOutcome(ByteString(bytes))
case RLPValue(bytes) if bytes.length == 1 && bytes.head == 1 => SuccessOutcome
case RLPValue(bytes) if bytes.isEmpty => FailureOutcome
}
Receipt(stateHash, cumulativeGasUsed, logsBloomFilter, logs.items.map(_.toTxLogEntry))
case _ => throw new RuntimeException("Cannot decode Receipt")
}
}
}
object Receipts {
implicit class ReceiptsEnc(val underlyingMsg: Receipts)
extends MessageSerializableImplicit[Receipts](underlyingMsg)
with RLPSerializable {
import ReceiptImplicits._
override def code: Int = Codes.ReceiptsCode
override def toRLPEncodable: RLPEncodeable = RLPList(
msg.receiptsForBlocks.map((rs: Seq[Receipt]) => RLPList(rs.map((r: Receipt) => r.toRLPEncodable): _*)): _*
)
}
implicit class ReceiptsDec(val bytes: Array[Byte]) extends AnyVal {
import ReceiptImplicits._
def toReceipts: Receipts = rawDecode(bytes) match {
case rlpList: RLPList => Receipts(rlpList.items.collect { case r: RLPList => r.items.map(_.toReceipt) })
case _ => throw new RuntimeException("Cannot decode Receipts")
}
}
}
case class Receipts(receiptsForBlocks: Seq[Seq[Receipt]]) extends Message {
override def code: Int = Codes.ReceiptsCode
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/network/p2p/messages/PV63.scala | Scala | mit | 7,963 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package enrichments
package registry
// Java
import java.net.URI
// Scala
import scala.collection.JavaConversions._
import scala.reflect.BeanProperty
// Utils
import org.apache.http.client.utils.URLEncodedUtils
// Maven Artifact
import org.apache.maven.artifact.versioning.DefaultArtifactVersion
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s.JValue
// Iglu
import iglu.client.SchemaKey
// This project
import utils.{ConversionUtils => CU}
import utils.MapTransformer.{
SourceMap,
TransformFunc
}
import utils.ScalazJson4sUtils
/**
* Companion object. Lets us create a
* CampaignAttributionEnrichment from a JValue
*/
object CampaignAttributionEnrichment extends ParseableEnrichment {
val supportedSchemaKey = SchemaKey("com.snowplowanalytics.snowplow", "campaign_attribution", "jsonschema", "1-0-0")
/**
* Creates a CampaignAttributionEnrichment instance from a JValue.
*
* @param config The referer_parser enrichment JSON
* @param schemaKey The SchemaKey provided for the enrichment
* Must be a supported SchemaKey for this enrichment
* @return a configured CampaignAttributionEnrichment instance
*/
def parse(config: JValue, schemaKey: SchemaKey): ValidatedNelMessage[CampaignAttributionEnrichment] = {
isParseable(config, schemaKey).flatMap( conf => {
(for {
medium <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "fields", "mktMedium")
source <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "fields", "mktSource")
term <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "fields", "mktTerm")
content <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "fields", "mktContent")
campaign <- ScalazJson4sUtils.extract[List[String]](config, "parameters", "fields", "mktCampaign")
enrich = CampaignAttributionEnrichment(medium, source, term, content, campaign)
} yield enrich).toValidationNel
})
}
}
/**
* Class for a marketing campaign
*
* @param medium Campaign medium
* @param source Campaign source
* @param term Campaign term
* @param content Campaign content
* @param campaign Campaign name
*
*/
case class MarketingCampaign(
medium: Option[String],
source: Option[String],
term: Option[String],
content: Option[String],
campaign: Option[String]
)
/**
* Config for a campaign_attribution enrichment
*
* @param mktMedium List of marketing medium parameters
* @param mktSource List of marketing source parameters
* @param mktTerm List of marketing term parameters
* @param mktContent List of marketing content parameters
* @param mktCampaign List of marketing campaign parameters
*/
case class CampaignAttributionEnrichment(
mktMedium: List[String],
mktSource: List[String],
mktTerm: List[String],
mktContent: List[String],
mktCampaign: List[String]
) extends Enrichment {
val version = new DefaultArtifactVersion("0.1.0")
/**
* Find the first string in parameterList which is a key of
* sourceMap and return the value of that key in sourceMap.
*
* @param parameterList List of accepted campaign parameter
* names in order of decreasing precedence
* @param sourceMap Map of key-value pairs in URI querystring
* @return Option boxing the value of the campaign parameter
*/
private def getFirstParameter(parameterList: List[String], sourceMap: SourceMap): Option[String] =
parameterList.find(sourceMap.contains(_)).map(sourceMap(_))
/**
* Extract the marketing fields from a URL.
*
* @param uri The URI to extract
* marketing fields from
* @param encoding The encoding of
* the URI being parsed
* @return the MarketingCampaign
* or an error message,
* boxed in a Scalaz
* Validation
*/
def extractMarketingFields(uri: URI, encoding: String): ValidationNel[String, MarketingCampaign] = {
val parameters = try {
URLEncodedUtils.parse(uri, encoding)
} catch {
case _ => return "Could not parse uri [%s]".format(uri).failNel[MarketingCampaign]
}
// Querystring map
val sourceMap: SourceMap = parameters.map(p => (p.getName -> p.getValue)).toList.toMap
val decodeString: TransformFunc = CU.decodeString(encoding, _, _)
val medium = getFirstParameter(mktMedium, sourceMap)
val source = getFirstParameter(mktSource, sourceMap)
val term = getFirstParameter(mktTerm, sourceMap)
val content = getFirstParameter(mktContent, sourceMap)
val campaign = getFirstParameter(mktCampaign, sourceMap)
MarketingCampaign(medium, source, term, content, campaign).success.toValidationNel
}
}
| artsy/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/enrichments/registry/CampaignAttributionEnrichment.scala | Scala | apache-2.0 | 5,525 |
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2014 Adobe Systems Incorporated. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////
package com.adobe.spark_parquet_thrift
// Scala collections.
import scala.collection.mutable.ArrayBuffer
// Spark.
import org.apache.spark
import spark.{SparkConf,SparkContext}
import spark.rdd.RDD
import org.apache.spark.SparkContext._
// Map Reduce.
import org.apache.hadoop.{conf,fs,mapreduce}
import fs.{FileSystem,Path}
import mapreduce.Job
import conf.Configuration
// File.
import com.google.common.io.Files
import java.io.File
// Parquet and Thrift support.
import parquet.hadoop.{ParquetOutputFormat, ParquetInputFormat}
import parquet.hadoop.thrift.{
ParquetThriftInputFormat,ParquetThriftOutputFormat,
ThriftReadSupport,ThriftWriteSupport
}
object SparkParquetThriftApp {
def main(args: Array[String]) {
val mem = "30g"
println("Initializing Spark context.")
println(" Memory: " + mem)
val sparkConf = new SparkConf()
.setAppName("SparkParquetThrift")
.setMaster("local[1]")
.setSparkHome("/usr/lib/spark")
.setJars(Seq())
.set("spark.executor.memory", mem)
val sc = new SparkContext(sparkConf)
println("Creating sample Thrift data.")
val sampleData = Range(1,10).toSeq.map{ v: Int =>
new SampleThriftObject("a"+v,"b"+v,"c"+v)
}
println(sampleData.map(" - " + _).mkString("\\n"))
val job = new Job()
val parquetStore = "hdfs://server_address.com:8020/sample_store"
println("Writing sample data to Parquet.")
println(" - ParquetStore: " + parquetStore)
ParquetThriftOutputFormat.setThriftClass(job, classOf[SampleThriftObject])
ParquetOutputFormat.setWriteSupportClass(job, classOf[SampleThriftObject])
sc.parallelize(sampleData)
.map(obj => (null, obj))
.saveAsNewAPIHadoopFile(
parquetStore,
classOf[Void],
classOf[SampleThriftObject],
classOf[ParquetThriftOutputFormat[SampleThriftObject]],
job.getConfiguration
)
println("Reading 'col_a' and 'col_b' from Parquet data store.")
ParquetInputFormat.setReadSupportClass(
job,
classOf[ThriftReadSupport[SampleThriftObject]]
)
job.getConfiguration.set("parquet.thrift.column.filter", "col_a;col_b")
val parquetData = sc.newAPIHadoopFile(
parquetStore,
classOf[ParquetThriftInputFormat[SampleThriftObject]],
classOf[Void],
classOf[SampleThriftObject],
job.getConfiguration
).map{case (void,obj) => obj}
println(parquetData.collect().map(" - " + _).mkString("\\n"))
}
}
| alexland/spark-parquet-thrift-example | src/main/scala/SparkParquetThriftApp.scala | Scala | apache-2.0 | 3,257 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.niflheim
import quasar.precog.common._
import quasar.precog.common.ingest.EventId
import quasar.precog.util._
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.pattern.{AskSupport, GracefulStopSupport}
import akka.util.Timeout
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import org.slf4s.Logging
import quasar.blueeyes.json._
import quasar.blueeyes.json.serialization._
import quasar.blueeyes.json.serialization.DefaultSerialization._
import quasar.blueeyes.json.serialization.IsoSerialization._
import quasar.blueeyes.json.serialization.Extractor._
import scalaz._
import scalaz.effect.IO
import scalaz.Validation._
import scalaz.syntax.monad._
import java.io.File
import java.util.concurrent.ScheduledExecutorService
import java.util.concurrent.atomic._
import scala.collection.immutable.SortedMap
import shapeless._
case class Insert(batch: Seq[NIHDB.Batch], responseRequested: Boolean)
case object GetSnapshot
case class Block(id: Long, segments: Seq[Segment], stable: Boolean)
case object GetStatus
case class Status(cooked: Int, pending: Int, rawSize: Int)
case object GetStructure
case class Structure(columns: Set[(CPath, CType)])
sealed trait InsertResult
case class Inserted(offset: Long, size: Int) extends InsertResult
case object Skipped extends InsertResult
case object Cook
case object Quiesce
object NIHDB {
import scala.concurrent.ExecutionContext.Implicits.global // TODO!!!!
case class Batch(offset: Long, values: Seq[JValue])
final val projectionIdGen = new AtomicInteger()
final def create(chef: ActorRef, baseDir: File, cookThreshold: Int, timeout: FiniteDuration, txLogScheduler: ScheduledExecutorService)(implicit actorSystem: ActorSystem): IO[Validation[Error, NIHDB]] =
NIHDBActor.create(chef, baseDir, cookThreshold, timeout, txLogScheduler) map { _ map { actor => new NIHDBImpl(actor, timeout) } }
final def open(chef: ActorRef, baseDir: File, cookThreshold: Int, timeout: FiniteDuration, txLogScheduler: ScheduledExecutorService)(implicit actorSystem: ActorSystem) =
NIHDBActor.open(chef, baseDir, cookThreshold, timeout, txLogScheduler) map { _ map { _ map { actor => new NIHDBImpl(actor, timeout) } } }
final def hasProjection(dir: File) = NIHDBActor.hasProjection(dir)
}
trait NIHDB {
def insert(batch: Seq[NIHDB.Batch]): IO[Unit]
def insertVerified(batch: Seq[NIHDB.Batch]): Future[InsertResult]
def getSnapshot(): Future[NIHDBSnapshot]
def getBlockAfter(id: Option[Long], cols: Option[Set[ColumnRef]]): Future[Option[Block]]
def getBlock(id: Option[Long], cols: Option[Set[CPath]]): Future[Option[Block]]
def length: Future[Long]
def projectionId: Int
def status: Future[Status]
def structure: Future[Set[ColumnRef]]
/**
* Returns the total number of defined objects for a given `CPath` *mask*.
* Since this punches holes in our rows, it is not simply the length of the
* block. Instead we count the number of rows that have at least one defined
* value at each path (and their children).
*/
def count(paths0: Option[Set[CPath]]): Future[Long]
/**
* Forces the chef to cook the current outstanding commit log. This should only
* be called in the event that an ingestion is believed to be 100% complete, since
* it will result in a "partial" block (i.e. a block that is not of maximal length).
* Note that the append log is visible to snapshots, meaning that this function
* should be unnecessary in nearly all circumstances.
*/
def cook: Future[Unit]
def quiesce: Future[Unit]
def close(implicit actorSystem: ActorSystem): Future[Unit]
}
private[niflheim] class NIHDBImpl private[niflheim] (actor: ActorRef, timeout: Timeout)(implicit executor: ExecutionContext) extends NIHDB with GracefulStopSupport with AskSupport {
private implicit val impFiniteDuration = timeout
val projectionId = NIHDB.projectionIdGen.getAndIncrement
def insert(batch: Seq[NIHDB.Batch]): IO[Unit] =
IO(actor ! Insert(batch, false))
def insertVerified(batch: Seq[NIHDB.Batch]): Future[InsertResult] =
(actor ? Insert(batch, true)).mapTo[InsertResult]
def getSnapshot(): Future[NIHDBSnapshot] =
(actor ? GetSnapshot).mapTo[NIHDBSnapshot]
def getBlockAfter(id: Option[Long], cols: Option[Set[ColumnRef]]): Future[Option[Block]] =
getSnapshot().map(_.getBlockAfter(id, cols))
def getBlock(id: Option[Long], cols: Option[Set[CPath]]): Future[Option[Block]] =
getSnapshot().map(_.getBlock(id, cols))
def length: Future[Long] =
getSnapshot().map(_.count())
def status: Future[Status] =
(actor ? GetStatus).mapTo[Status]
def structure: Future[Set[ColumnRef]] =
getSnapshot().map(_.structure)
def count(paths0: Option[Set[CPath]]): Future[Long] =
getSnapshot().map(_.count(paths0))
def cook: Future[Unit] =
(actor ? Cook).mapTo[Unit]
def quiesce: Future[Unit] =
(actor ? Quiesce).mapTo[Unit]
def close(implicit actorSystem: ActorSystem): Future[Unit] =
gracefulStop(actor, timeout.duration).map(_ => ())
}
private[niflheim] object NIHDBActor extends Logging {
final val descriptorFilename = "NIHDBDescriptor.json"
final val cookedSubdir = "cooked_blocks"
final val rawSubdir = "raw_blocks"
final val lockName = "NIHDBProjection"
private[niflheim] final val internalDirs =
Set(cookedSubdir, rawSubdir, descriptorFilename, CookStateLog.logName + "_1.log", CookStateLog.logName + "_2.log", lockName + ".lock", CookStateLog.lockName + ".lock")
final def create(chef: ActorRef, baseDir: File, cookThreshold: Int, timeout: FiniteDuration, txLogScheduler: ScheduledExecutorService)(implicit actorSystem: ActorSystem): IO[Validation[Error, ActorRef]] = {
val descriptorFile = new File(baseDir, descriptorFilename)
val currentState: IO[Validation[Error, ProjectionState]] =
if (descriptorFile.exists) {
ProjectionState.fromFile(descriptorFile)
} else {
val state = ProjectionState.empty
for {
_ <- IO { log.info("No current descriptor found for " + baseDir + ", creating fresh descriptor") }
_ <- ProjectionState.toFile(state, descriptorFile)
} yield {
success(state)
}
}
currentState map { _ map { s => actorSystem.actorOf(Props(new NIHDBActor(s, baseDir, chef, cookThreshold, txLogScheduler))) } }
}
final def readDescriptor(baseDir: File): IO[Option[Validation[Error, ProjectionState]]] = {
val descriptorFile = new File(baseDir, descriptorFilename)
if (descriptorFile.exists) {
ProjectionState.fromFile(descriptorFile) map { Some(_) }
} else {
log.warn("No projection found at " + baseDir)
IO { None }
}
}
final def open(chef: ActorRef, baseDir: File, cookThreshold: Int, timeout: FiniteDuration, txLogScheduler: ScheduledExecutorService)(implicit actorSystem: ActorSystem): IO[Option[Validation[Error, ActorRef]]] = {
val currentState: IO[Option[Validation[Error, ProjectionState]]] = readDescriptor(baseDir)
currentState map { _ map { _ map { s => actorSystem.actorOf(Props(new NIHDBActor(s, baseDir, chef, cookThreshold, txLogScheduler))) } } }
}
final def hasProjection(dir: File) = (new File(dir, descriptorFilename)).exists
private case class BlockState(cooked: List[CookedReader], pending: Map[Long, StorageReader], rawLog: RawHandler)
private class State(val txLog: CookStateLog, var blockState: BlockState, var currentBlocks: SortedMap[Long, StorageReader])
}
private[niflheim] class NIHDBActor private (private var currentState: ProjectionState, baseDir: File, chef: ActorRef, cookThreshold: Int, txLogScheduler: ScheduledExecutorService)
extends Actor
with Logging {
import NIHDBActor._
assert(cookThreshold > 0)
assert(cookThreshold < (1 << 16))
private[this] val workLock = FileLock(baseDir, lockName)
private[this] val cookedDir = new File(baseDir, cookedSubdir)
private[this] val rawDir = new File(baseDir, rawSubdir)
private[this] val descriptorFile = new File(baseDir, descriptorFilename)
private[this] val cookSequence = new AtomicLong
private[this] var actorState: Option[State] = None
private def state = {
import scalaz.syntax.effect.id._
actorState getOrElse open.flatMap(_.tap(s => IO(actorState = Some(s)))).unsafePerformIO
}
private def initDirs(f: File) = IO {
if (!f.isDirectory) {
if (!f.mkdirs) {
throw new Exception("Failed to create dir: " + f)
}
}
}
private def initActorState = IO {
log.debug("Opening log in " + baseDir)
val txLog = new CookStateLog(baseDir, txLogScheduler)
log.debug("Current raw block id = " + txLog.currentBlockId)
// We'll need to update our current thresholds based on what we read out of any raw logs we open
var maxOffset = currentState.maxOffset
val currentRawFile = rawFileFor(txLog.currentBlockId)
val (currentLog, rawLogOffsets) = if (currentRawFile.exists) {
val (handler, offsets, ok) = RawHandler.load(txLog.currentBlockId, currentRawFile)
if (!ok) {
log.warn("Corruption detected and recovery performed on " + currentRawFile)
}
(handler, offsets)
} else {
(RawHandler.empty(txLog.currentBlockId, currentRawFile), Seq.empty[Long])
}
rawLogOffsets.sortBy(- _).headOption.foreach { newMaxOffset =>
maxOffset = maxOffset max newMaxOffset
}
val pendingCooks = txLog.pendingCookIds.map { id =>
val (reader, offsets, ok) = RawHandler.load(id, rawFileFor(id))
if (!ok) {
log.warn("Corruption detected and recovery performed on " + currentRawFile)
}
maxOffset = math.max(maxOffset, offsets.max)
(id, reader)
}.toMap
this.currentState = currentState.copy(maxOffset = maxOffset)
// Restore the cooked map
val cooked = currentState.readers(cookedDir)
val blockState = BlockState(cooked, pendingCooks, currentLog)
val currentBlocks = computeBlockMap(blockState)
log.debug("Initial block state = " + blockState)
// Re-fire any restored pending cooks
blockState.pending.foreach {
case (id, reader) =>
log.debug("Restarting pending cook on block %s:%d".format(baseDir, id))
chef ! Prepare(id, cookSequence.getAndIncrement, cookedDir, reader, () => ())
}
new State(txLog, blockState, currentBlocks)
}
private def open = actorState.map(IO(_)) getOrElse {
for {
_ <- initDirs(cookedDir)
_ <- initDirs(rawDir)
state <- initActorState
} yield state
}
private def cook(responseRequested: Boolean) = IO {
state.blockState.rawLog.close
val toCook = state.blockState.rawLog
val newRaw = RawHandler.empty(toCook.id + 1, rawFileFor(toCook.id + 1))
state.blockState = state.blockState.copy(pending = state.blockState.pending + (toCook.id -> toCook), rawLog = newRaw)
state.txLog.startCook(toCook.id)
val target = sender
val onComplete = if (responseRequested)
() => target ! (())
else
() => ()
chef ! Prepare(toCook.id, cookSequence.getAndIncrement, cookedDir, toCook, onComplete)
}
private def quiesce = IO {
actorState foreach { s =>
log.debug("Releasing resources for projection in " + baseDir)
s.blockState.rawLog.close
s.txLog.close
ProjectionState.toFile(currentState, descriptorFile)
actorState = None
}
}
private def close = {
IO(log.debug("Closing projection in " + baseDir)) >> quiesce
} except { case t: Throwable =>
IO { log.error("Error during close", t) }
} ensuring {
IO { workLock.release }
}
override def postStop() = {
close.unsafePerformIO
}
def getSnapshot(): NIHDBSnapshot = NIHDBSnapshot(state.currentBlocks)
private def rawFileFor(seq: Long) = new File(rawDir, "%06x.raw".format(seq))
private def computeBlockMap(current: BlockState) = {
val allBlocks: List[StorageReader] = (current.cooked ++ current.pending.values :+ current.rawLog)
SortedMap(allBlocks.map { r => r.id -> r }.toSeq: _*)
}
def updatedThresholds(current: Map[Int, Int], ids: Seq[Long]): Map[Int, Int] = {
(current.toSeq ++ ids.map {
i => val EventId(p, s) = EventId.fromLong(i); (p -> s)
}).groupBy(_._1).map { case (p, ids) => (p -> ids.map(_._2).max) }
}
override def receive = {
case GetSnapshot =>
sender ! getSnapshot()
case Spoilt(_, _, onComplete) =>
onComplete()
case Cooked(id, _, _, file, onComplete) =>
// This could be a replacement for an existing id, so we
// ned to remove/close any existing cooked block with the same
// ID
//TODO: LENSES!!!!!!!~
state.blockState = state.blockState.copy(
cooked = CookedReader.load(cookedDir, file) :: state.blockState.cooked.filterNot(_.id == id),
pending = state.blockState.pending - id
)
state.currentBlocks = computeBlockMap(state.blockState)
currentState = currentState.copy(
cookedMap = currentState.cookedMap + (id -> file.getPath)
)
log.debug("Cook complete on %d".format(id))
ProjectionState.toFile(currentState, descriptorFile).unsafePerformIO
state.txLog.completeCook(id)
onComplete()
case Insert(batch, responseRequested) =>
if (batch.isEmpty) {
log.warn("Skipping insert with an empty batch on %s".format(baseDir.getCanonicalPath))
if (responseRequested) sender ! Skipped
} else {
val (skipValues, keepValues) = batch.partition(_.offset <= currentState.maxOffset)
if (keepValues.isEmpty) {
log.warn("Skipping entirely seen batch of %d rows prior to offset %d".format(batch.flatMap(_.values).size, currentState.maxOffset))
if (responseRequested) sender ! Skipped
} else {
val values = keepValues.flatMap(_.values)
val offset = keepValues.map(_.offset).max
log.debug("Inserting %d rows, skipping %d rows at offset %d for %s".format(values.length, skipValues.length, offset, baseDir.getCanonicalPath))
state.blockState.rawLog.write(offset, values)
// Update the producer thresholds for the rows. We know that ids only has one element due to the initial check
currentState = currentState.copy(maxOffset = offset)
if (state.blockState.rawLog.length >= cookThreshold) {
log.debug("Starting cook on %s after threshold exceeded".format(baseDir.getCanonicalPath))
cook(false).unsafePerformIO
}
log.debug("Insert complete on %d rows at offset %d for %s".format(values.length, offset, baseDir.getCanonicalPath))
if (responseRequested) sender ! Inserted(offset, values.length)
}
}
case Cook =>
cook(true).unsafePerformIO
case GetStatus =>
sender ! Status(state.blockState.cooked.length, state.blockState.pending.size, state.blockState.rawLog.length)
case Quiesce =>
quiesce.unsafePerformIO
sender ! (())
}
}
private[niflheim] case class ProjectionState(maxOffset: Long, cookedMap: Map[Long, String]) {
def readers(baseDir: File): List[CookedReader] =
cookedMap.map {
case (id, metadataFile) =>
CookedReader.load(baseDir, new File(metadataFile))
}.toList
}
private[niflheim] object ProjectionState {
import Extractor.Error
def empty = ProjectionState(-1L, Map.empty)
// FIXME: Add version for this format
val v1Schema = "maxOffset" :: "cookedMap" :: HNil
implicit val stateDecomposer = decomposer[ProjectionState](v1Schema)
implicit val stateExtractor = extractor[ProjectionState](v1Schema)
def fromFile(input: File): IO[Validation[Error, ProjectionState]] = IO {
JParser.parseFromFile(input).bimap(Extractor.Thrown(_): Extractor.Error, x => x).flatMap { jv =>
jv.validated[ProjectionState]
}
}
def toFile(state: ProjectionState, output: File): IO[Boolean] = {
IOUtils.safeWriteToFile(state.serialize.renderCompact, output)
}
}
| jedesah/Quasar | niflheim/src/main/scala/quasar/niflheim/NIHDBActor.scala | Scala | apache-2.0 | 16,632 |
package examples.rules
//https://github.com/davegurnell/functional-data-validation
object Validation {
sealed trait Result[+A] {
def ap[B](fn: Result[A => B]): Result[B] =
(this, fn) match {
case (Pass(a), Pass(b)) => Pass(b(a))
case (Fail(a), Pass(b)) => Fail(a)
case (Pass(a), Fail(b)) => Fail(b)
case (Fail(a), Fail(b)) => Fail(a ++ b)
}
def &[B, C](that: Result[B])(func: (A, B) => C): Result[C] =
ap(that.map((b: B) => (a: A) => func(a, b)))
def map[B](func: A => B) = this match {
case Pass(a) => Pass(func(a))
case Fail(a) => Fail(a)
}
def flatMap[B](func: A => Result[B]) = this match {
case Pass(a) => func(a)
case Fail(a) => Fail(a)
}
}
final case class Pass[A](value: A) extends Result[A]
final case class Fail(messages: List[String]) extends Result[Nothing]
type Rule[-A, +B] = A => Result[B]
implicit class RuleOps[A, B](rule: Rule[A, B]) {
def map[C](func: B => C): Rule[A, C] =
(a: A) => rule(a) map func
def flatMap[C](rule2: Rule[B, C]): Rule[A, C] =
(a: A) => rule(a) flatMap rule2
def and[C, D](rule2: Rule[A, C])(func: (B, C) => D): Rule[A, D] =
(a: A) => (rule(a) & rule2(a))(func)
}
def validator[A]: Rule[A, A] = (input: A) => Pass(input)
def main(args: Array[String]) = {
case class Event(number: Int, source: String, sink: String)
val nonEmpty: Rule[String, String] = (str: String) =>
if (str.isEmpty) Fail(List("Empty string")) else Pass(str)
def capitalize(str: String): String = str(0).toUpper +: str.substring(1)
def gte(min: Int) =
(num: Int) =>
if (num < min) Fail(List("Too small number")) else Pass(num)
val checkNumber: Rule[Event, Int] =
validator[Event] map (_.number) flatMap gte(124)
val checkStreet: Rule[Event, String] =
validator[Event] map (_.source) flatMap nonEmpty map capitalize
val checkZip: Rule[Event, String] =
validator[Event] map (_.sink) flatMap nonEmpty
val checkAddress: Rule[Event, Event] = (address: Event) =>
checkZip(address).ap {
checkStreet(address).ap {
checkNumber(address).ap {
Pass { (number: Int) => (street: String) => (zipCode: String) =>
Event(number, street, zipCode)
}
}
}
}
/*
val read: Rule[Event, Event] =
(address: Event) =>
//could be read from somewhere
Pass(address)
read(Event(123, "Twitter", "Cassandra")).flatMap(checkAddress)
*/
println(checkAddress(Event(123, "NewYork", "")))
}
}
| haghard/shapeless-playbook | src/main/scala/examples/rules/Validation.scala | Scala | apache-2.0 | 2,631 |
package com.enkidu.lignum.parsers.ast.statement.parameter
import com.enkidu.lignum.parsers.ast.expression.types.Type
import com.enkidu.lignum.parsers.ast.expression.types.annotations.Annotation
case class VariableArityParameter(annotations: Seq[Annotation], isFinal: Boolean,
`type`: Type, name: String) extends Parameter {
override def dispatch(visitor: Visitor): Unit = {
annotations.dispatch(visitor)
`type`.dispatch(visitor)
apply(visitor)
}
} | marek1840/java-parser | src/main/scala/com/enkidu/lignum/parsers/ast/statement/parameter/VariableArityParameter.scala | Scala | mit | 499 |
/*
* Copyright 2011-2021 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import io.gatling.http.funspec.GatlingHttpFunSpec
import GatlingFunSpecExample._
//#example-test
class GatlingFunSpecExample extends GatlingHttpFunSpec { // (1)
val baseUrl = "http://example.com" // (2)
override def httpProtocol = super.httpProtocol.header("MyHeader", "MyValue") // (3)
spec { // (4)
http("Example index.html test") // (5)
.get("/index.html") // (6)
.check(pageHeader.exists) // (7)
}
}
object GatlingFunSpecExample {
def pageHeader = css("h1") // (8)
}
//#example-test
| gatling/gatling | src/docs/content/reference/current/extensions/functional_specs/code/GatlingFunSpecSample.scala | Scala | apache-2.0 | 1,202 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import java.util.NoSuchElementException
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{BinaryNode, SparkPlan}
import org.apache.spark.util.collection.CompactBuffer
/**
* :: DeveloperApi ::
* Performs an sort merge join of two child relations.
*/
@DeveloperApi
case class SortMergeJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
left: SparkPlan,
right: SparkPlan) extends BinaryNode {
override def output: Seq[Attribute] = left.output ++ right.output
override def outputPartitioning: Partitioning = left.outputPartitioning
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: Nil
// this is to manually construct an ordering that can be used to compare keys from both sides
private val keyOrdering: RowOrdering = RowOrdering.forSchema(leftKeys.map(_.dataType))
override def outputOrdering: Seq[SortOrder] = requiredOrders(leftKeys)
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
requiredOrders(leftKeys) :: requiredOrders(rightKeys) :: Nil
@transient protected lazy val leftKeyGenerator = newProjection(leftKeys, left.output)
@transient protected lazy val rightKeyGenerator = newProjection(rightKeys, right.output)
private def requiredOrders(keys: Seq[Expression]): Seq[SortOrder] =
keys.map(SortOrder(_, Ascending))
protected override def doExecute(): RDD[Row] = {
val leftResults = left.execute().map(_.copy())
val rightResults = right.execute().map(_.copy())
leftResults.zipPartitions(rightResults) { (leftIter, rightIter) =>
new Iterator[Row] {
// Mutable per row objects.
private[this] val joinRow = new JoinedRow5
private[this] var leftElement: Row = _
private[this] var rightElement: Row = _
private[this] var leftKey: Row = _
private[this] var rightKey: Row = _
private[this] var rightMatches: CompactBuffer[Row] = _
private[this] var rightPosition: Int = -1
private[this] var stop: Boolean = false
private[this] var matchKey: Row = _
// initialize iterator
initialize()
override final def hasNext: Boolean = nextMatchingPair()
override final def next(): Row = {
if (hasNext) {
// we are using the buffered right rows and run down left iterator
val joinedRow = joinRow(leftElement, rightMatches(rightPosition))
rightPosition += 1
if (rightPosition >= rightMatches.size) {
rightPosition = 0
fetchLeft()
if (leftElement == null || keyOrdering.compare(leftKey, matchKey) != 0) {
stop = false
rightMatches = null
}
}
joinedRow
} else {
// no more result
throw new NoSuchElementException
}
}
private def fetchLeft() = {
if (leftIter.hasNext) {
leftElement = leftIter.next()
leftKey = leftKeyGenerator(leftElement)
} else {
leftElement = null
}
}
private def fetchRight() = {
if (rightIter.hasNext) {
rightElement = rightIter.next()
rightKey = rightKeyGenerator(rightElement)
} else {
rightElement = null
}
}
private def initialize() = {
fetchLeft()
fetchRight()
}
/**
* Searches the right iterator for the next rows that have matches in left side, and store
* them in a buffer.
*
* @return true if the search is successful, and false if the right iterator runs out of
* tuples.
*/
private def nextMatchingPair(): Boolean = {
if (!stop && rightElement != null) {
// run both side to get the first match pair
while (!stop && leftElement != null && rightElement != null) {
val comparing = keyOrdering.compare(leftKey, rightKey)
// for inner join, we need to filter those null keys
stop = comparing == 0 && !leftKey.anyNull
if (comparing > 0 || rightKey.anyNull) {
fetchRight()
} else if (comparing < 0 || leftKey.anyNull) {
fetchLeft()
}
}
rightMatches = new CompactBuffer[Row]()
if (stop) {
stop = false
// iterate the right side to buffer all rows that matches
// as the records should be ordered, exit when we meet the first that not match
while (!stop && rightElement != null) {
rightMatches += rightElement
fetchRight()
stop = keyOrdering.compare(leftKey, rightKey) != 0
}
if (rightMatches.size > 0) {
rightPosition = 0
matchKey = leftKey
}
}
}
rightMatches != null && rightMatches.size > 0
}
}
}
}
}
| andrewor14/iolap | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoin.scala | Scala | apache-2.0 | 6,227 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.util.Locale
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.{JobContext, TaskAttemptContext}
import org.apache.parquet.column.{Encoding, ParquetProperties}
import org.apache.parquet.example.data.{Group, GroupWriter}
import org.apache.parquet.example.data.simple.SimpleGroup
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.api.WriteSupport
import org.apache.parquet.hadoop.api.WriteSupport.WriteContext
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.parquet.io.api.RecordConsumer
import org.apache.parquet.schema.{MessageType, MessageTypeParser}
import org.apache.spark.{SPARK_VERSION_SHORT, SparkException}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, ScalaReflection}
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeRow}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
// Write support class for nested groups: ParquetWriter initializes GroupWriteSupport
// with an empty configuration (it is after all not intended to be used in this way?)
// and members are private so we need to make our own in order to pass the schema
// to the writer.
private[parquet] class TestGroupWriteSupport(schema: MessageType) extends WriteSupport[Group] {
var groupWriter: GroupWriter = null
override def prepareForWrite(recordConsumer: RecordConsumer): Unit = {
groupWriter = new GroupWriter(recordConsumer, schema)
}
override def init(configuration: Configuration): WriteContext = {
new WriteContext(schema, new java.util.HashMap[String, String]())
}
override def write(record: Group): Unit = {
groupWriter.write(record)
}
}
/**
* A test suite that tests basic Parquet I/O.
*/
class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession {
import testImplicits._
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}
test("basic data types (without binary)") {
val data = (1 to 4).map { i =>
(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble)
}
checkParquetFile(data)
}
test("raw binary") {
val data = (1 to 4).map(i => Tuple1(Array.fill(3)(i.toByte)))
withParquetDataFrame(data) { df =>
assertResult(data.map(_._1.mkString(",")).sorted) {
df.collect().map(_.getAs[Array[Byte]](0).mkString(",")).sorted
}
}
}
test("SPARK-11694 Parquet logical types are not being tested properly") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 a(INT_8);
| required int32 b(INT_16);
| required int32 c(DATE);
| required int32 d(DECIMAL(1,0));
| required int64 e(DECIMAL(10,0));
| required binary f(UTF8);
| required binary g(ENUM);
| required binary h(DECIMAL(32,0));
| required fixed_len_byte_array(32) i(DECIMAL(32,0));
| required int64 j(TIMESTAMP_MILLIS);
| required int64 k(TIMESTAMP_MICROS);
|}
""".stripMargin)
val expectedSparkTypes = Seq(ByteType, ShortType, DateType, DecimalType(1, 0),
DecimalType(10, 0), StringType, StringType, DecimalType(32, 0), DecimalType(32, 0),
TimestampType, TimestampType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
readParquetFile(path.toString)(df => {
val sparkTypes = df.schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
})
}
}
test("string") {
val data = (1 to 4).map(i => Tuple1(i.toString))
// Property spark.sql.parquet.binaryAsString shouldn't affect Parquet files written by Spark SQL
// as we store Spark SQL schema in the extra metadata.
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "false")(checkParquetFile(data))
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true")(checkParquetFile(data))
}
testStandardAndLegacyModes("fixed-length decimals") {
def makeDecimalRDD(decimal: DecimalType): DataFrame = {
spark
.range(1000)
// Parquet doesn't allow column names with spaces, have to add an alias here.
// Minus 500 here so that negative decimals are also tested.
.select((('id - 500) / 100.0) cast decimal as 'dec)
.coalesce(1)
}
val combinations = Seq((5, 2), (1, 0), (1, 1), (18, 10), (18, 17), (19, 0), (38, 37))
for ((precision, scale) <- combinations) {
withTempPath { dir =>
val data = makeDecimalRDD(DecimalType(precision, scale))
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df => {
checkAnswer(df, data.collect().toSeq)
}}
}
}
}
test("date type") {
def makeDateRDD(): DataFrame =
sparkContext
.parallelize(0 to 1000)
.map(i => Tuple1(DateTimeUtils.toJavaDate(i)))
.toDF()
.select($"_1")
withTempPath { dir =>
val data = makeDateRDD()
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df =>
checkAnswer(df, data.collect().toSeq)
}
}
}
testStandardAndLegacyModes("map") {
val data = (1 to 4).map(i => Tuple1(Map(i -> s"val_$i")))
checkParquetFile(data)
}
testStandardAndLegacyModes("array") {
val data = (1 to 4).map(i => Tuple1(Seq(i, i + 1)))
checkParquetFile(data)
}
testStandardAndLegacyModes("array and double") {
val data = (1 to 4).map(i => (i.toDouble, Seq(i.toDouble, (i + 1).toDouble)))
checkParquetFile(data)
}
testStandardAndLegacyModes("struct") {
val data = (1 to 4).map(i => Tuple1((i, s"val_$i")))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("array of struct") {
val data = (1 to 4).map { i =>
Tuple1(
Seq(
Tuple1(s"1st_val_$i"),
Tuple1(s"2nd_val_$i")
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(array) =>
Row(array.map(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
testStandardAndLegacyModes("array of nested struct") {
val data = (1 to 4).map { i =>
Tuple1(
Seq(
Tuple1(
Tuple1(s"1st_val_$i")),
Tuple1(
Tuple1(s"2nd_val_$i"))
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(array) =>
Row(array.map { case Tuple1(Tuple1(str)) => Row(Row(str))})
})
}
}
testStandardAndLegacyModes("nested struct with array of array as field") {
val data = (1 to 4).map(i => Tuple1((i, Seq(Seq(s"val_$i")))))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("nested map with struct as key type") {
val data = (1 to 4).map { i =>
Tuple1(
Map(
(i, s"kA_$i") -> s"vA_$i",
(i, s"kB_$i") -> s"vB_$i"
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.map { case (k, v) => Row(k.productIterator.toSeq: _*) -> v })
})
}
}
testStandardAndLegacyModes("nested map with struct as value type") {
val data = (1 to 4).map { i =>
Tuple1(
Map(
s"kA_$i" -> ((i, s"vA_$i")),
s"kB_$i" -> ((i, s"vB_$i"))
)
)
}
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.mapValues(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
test("nulls") {
val allNulls = (
null.asInstanceOf[java.lang.Boolean],
null.asInstanceOf[Integer],
null.asInstanceOf[java.lang.Long],
null.asInstanceOf[java.lang.Float],
null.asInstanceOf[java.lang.Double])
withParquetDataFrame(allNulls :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(5)(null): _*))
}
}
test("nones") {
val allNones = (
None.asInstanceOf[Option[Int]],
None.asInstanceOf[Option[Long]],
None.asInstanceOf[Option[String]])
withParquetDataFrame(allNones :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(3)(null): _*))
}
}
test("SPARK-10113 Support for unsigned Parquet logical types") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 c(UINT_32);
|}
""".stripMargin)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val errorMessage = intercept[Throwable] {
spark.read.parquet(path.toString).printSchema()
}.toString
assert(errorMessage.contains("Parquet type not supported"))
}
}
test("SPARK-11692 Support for Parquet logical types, JSON and BSON (embedded types)") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required binary a(JSON);
| required binary b(BSON);
|}
""".stripMargin)
val expectedSparkTypes = Seq(StringType, BinaryType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val sparkTypes = spark.read.parquet(path.toString).schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
}
}
test("compression codec") {
val hadoopConf = spark.sessionState.newHadoopConf()
def compressionCodecFor(path: String, codecName: String): String = {
val codecs = for {
footer <- readAllFootersWithoutSummaryFiles(new Path(path), hadoopConf)
block <- footer.getParquetMetadata.getBlocks.asScala
column <- block.getColumns.asScala
} yield column.getCodec.name()
assert(codecs.distinct === Seq(codecName))
codecs.head
}
val data = (0 until 10).map(i => (i, i.toString))
def checkCompressionCodec(codec: CompressionCodecName): Unit = {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> codec.name()) {
withParquetFile(data) { path =>
assertResult(spark.conf.get(SQLConf.PARQUET_COMPRESSION).toUpperCase(Locale.ROOT)) {
compressionCodecFor(path, codec.name())
}
}
}
}
// Checks default compression codec
checkCompressionCodec(
CompressionCodecName.fromConf(spark.conf.get(SQLConf.PARQUET_COMPRESSION)))
checkCompressionCodec(CompressionCodecName.UNCOMPRESSED)
checkCompressionCodec(CompressionCodecName.GZIP)
checkCompressionCodec(CompressionCodecName.SNAPPY)
}
test("read raw Parquet file") {
def makeRawParquetFile(path: Path): Unit = {
val schema = MessageTypeParser.parseMessageType(
"""
|message root {
| required boolean _1;
| required int32 _2;
| required int64 _3;
| required float _4;
| required double _5;
|}
""".stripMargin)
val testWriteSupport = new TestGroupWriteSupport(schema)
/**
* Provide a builder for constructing a parquet writer - after PARQUET-248 directly
* constructing the writer is deprecated and should be done through a builder. The default
* builders include Avro - but for raw Parquet writing we must create our own builder.
*/
class ParquetWriterBuilder() extends
ParquetWriter.Builder[Group, ParquetWriterBuilder](path) {
override def getWriteSupport(conf: Configuration) = testWriteSupport
override def self() = this
}
val writer = new ParquetWriterBuilder().build()
(0 until 10).foreach { i =>
val record = new SimpleGroup(schema)
record.add(0, i % 2 == 0)
record.add(1, i)
record.add(2, i.toLong)
record.add(3, i.toFloat)
record.add(4, i.toDouble)
writer.write(record)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (0 until 10).map { i =>
Row(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble) })
}
}
}
test("write metadata") {
val hadoopConf = spark.sessionState.newHadoopConf()
withTempPath { file =>
val path = new Path(file.toURI.toString)
val fs = FileSystem.getLocal(hadoopConf)
val schema = StructType.fromAttributes(ScalaReflection.attributesFor[(Int, String)])
writeMetadata(schema, path, hadoopConf)
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_COMMON_METADATA_FILE)))
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_METADATA_FILE)))
val expectedSchema = new SparkToParquetSchemaConverter().convert(schema)
val actualSchema = readFooter(path, hadoopConf).getFileMetaData.getSchema
actualSchema.checkContains(expectedSchema)
expectedSchema.checkContains(actualSchema)
}
}
test("save - overwrite") {
withParquetFile((1 to 10).map(i => (i, i.toString))) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Overwrite).save(file)
readParquetFile(file) { df =>
checkAnswer(df, newData.map(Row.fromTuple))
}
}
}
test("save - ignore") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Ignore).save(file)
readParquetFile(file) { df =>
checkAnswer(df, data.map(Row.fromTuple))
}
}
}
test("save - throw") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
val errorMessage = intercept[Throwable] {
newData.toDF().write.format("parquet").mode(SaveMode.ErrorIfExists).save(file)
}.getMessage
assert(errorMessage.contains("already exists"))
}
}
test("save - append") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Append).save(file)
readParquetFile(file) { df =>
checkAnswer(df, (data ++ newData).map(Row.fromTuple))
}
}
}
test("SPARK-6315 regression test") {
// Spark 1.1 and prior versions write Spark schema as case class string into Parquet metadata.
// This has been deprecated by JSON format since 1.2. Notice that, 1.3 further refactored data
// types API, and made StructType.fields an array. This makes the result of StructType.toString
// different from prior versions: there's no "Seq" wrapping the fields part in the string now.
val sparkSchema =
"StructType(Seq(StructField(a,BooleanType,false),StructField(b,IntegerType,false)))"
// The Parquet schema is intentionally made different from the Spark schema. Because the new
// Parquet data source simply falls back to the Parquet schema once it fails to parse the Spark
// schema. By making these two different, we are able to assert the old style case class string
// is parsed successfully.
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 c;
|}
""".stripMargin)
withTempPath { location =>
val extraMetadata = Map(ParquetReadSupport.SPARK_METADATA_KEY -> sparkSchema.toString)
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf, extraMetadata)
readParquetFile(path.toString) { df =>
assertResult(df.schema) {
StructType(
StructField("a", BooleanType, nullable = true) ::
StructField("b", IntegerType, nullable = true) ::
Nil)
}
}
}
}
test("SPARK-8121: spark.sql.parquet.output.committer.class shouldn't be overridden") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
val extraOptions = Map(
SQLConf.OUTPUT_COMMITTER_CLASS.key -> classOf[ParquetOutputCommitter].getCanonicalName,
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key ->
classOf[JobCommitFailureParquetOutputCommitter].getCanonicalName
)
withTempPath { dir =>
val message = intercept[SparkException] {
spark.range(0, 1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(message === "Intentional exception for testing purposes")
}
}
}
test("SPARK-6330 regression test") {
// In 1.3.0, save to fs other than file: without configuring core-site.xml would get:
// IllegalArgumentException: Wrong FS: hdfs://..., expected: file:///
intercept[Throwable] {
spark.read.parquet("file:///nonexistent")
}
val errorMessage = intercept[Throwable] {
spark.read.parquet("hdfs://nonexistent")
}.toString
assert(errorMessage.contains("UnknownHostException"))
}
test("SPARK-7837 Do not close output writer twice when commitTask() fails") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
// Using a output committer that always fail when committing a task, so that both
// `commitTask()` and `abortTask()` are invoked.
val extraOptions = Map[String, String](
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key ->
classOf[TaskCommitFailureParquetOutputCommitter].getCanonicalName
)
// Before fixing SPARK-7837, the following code results in an NPE because both
// `commitTask()` and `abortTask()` try to close output writers.
withTempPath { dir =>
val m1 = intercept[SparkException] {
spark.range(1).coalesce(1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m1.contains("Intentional exception for testing purposes"))
}
withTempPath { dir =>
val m2 = intercept[SparkException] {
val df = spark.range(1).select('id as 'a, 'id as 'b).coalesce(1)
df.write.partitionBy("a").options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m2.contains("Intentional exception for testing purposes"))
}
}
}
test("SPARK-11044 Parquet writer version fixed as version1 ") {
withSQLConf(SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName) {
// For dictionary encoding, Parquet changes the encoding types according to its writer
// version. So, this test checks one of the encoding types in order to ensure that
// the file is written with writer version2.
val extraOptions = Map[String, String](
// Write a Parquet file with writer version2.
ParquetOutputFormat.WRITER_VERSION -> ParquetProperties.WriterVersion.PARQUET_2_0.toString,
// By default, dictionary encoding is enabled from Parquet 1.2.0 but
// it is enabled just in case.
ParquetOutputFormat.ENABLE_DICTIONARY -> "true"
)
val hadoopConf = spark.sessionState.newHadoopConfWithOptions(extraOptions)
withSQLConf(ParquetOutputFormat.JOB_SUMMARY_LEVEL -> "ALL") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part-r-0.parquet"
spark.range(1 << 16).selectExpr("(id % 4) AS i")
.coalesce(1).write.options(extraOptions).mode("overwrite").parquet(path)
val blockMetadata = readFooter(new Path(path), hadoopConf).getBlocks.asScala.head
val columnChunkMetadata = blockMetadata.getColumns.asScala.head
// If the file is written with version2, this should include
// Encoding.RLE_DICTIONARY type. For version1, it is Encoding.PLAIN_DICTIONARY
assert(columnChunkMetadata.getEncodings.contains(Encoding.RLE_DICTIONARY))
}
}
}
}
test("null and non-null strings") {
// Create a dataset where the first values are NULL and then some non-null values. The
// number of non-nulls needs to be bigger than the ParquetReader batch size.
val data: Dataset[String] = spark.range(200).map (i =>
if (i < 150) null
else "a"
)
val df = data.toDF("col")
assert(df.agg("col" -> "count").collect().head.getLong(0) == 50)
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/data"
df.write.parquet(path)
readParquetFile(path) { df2 =>
assert(df2.agg("col" -> "count").collect().head.getLong(0) == 50)
}
}
}
test("read dictionary encoded decimals written as INT32") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i32.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(5, 2) as 'i32_dec))
}
}
test("read dictionary encoded decimals written as INT64") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i64.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'i64_dec))
}
}
test("read dictionary encoded decimals written as FIXED_LEN_BYTE_ARRAY") {
withAllParquetReaders {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-fixed-len.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'fixed_len_dec))
}
}
test("read dictionary and plain encoded timestamp_millis written as INT64") {
withAllParquetReaders {
checkAnswer(
// timestamp column in this file is encoded using combination of plain
// and dictionary encodings.
readResourceParquetFile("test-data/timemillis-in-i64.parquet"),
(1 to 3).map(i => Row(new java.sql.Timestamp(10))))
}
}
test("SPARK-12589 copy() on rows returned from reader works for strings") {
withTempPath { dir =>
val data = (1, "abc") ::(2, "helloabcde") :: Nil
data.toDF().write.parquet(dir.getCanonicalPath)
var hash1: Int = 0
var hash2: Int = 0
(false :: true :: Nil).foreach { v =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> v.toString) {
val df = spark.read.parquet(dir.getCanonicalPath)
val rows = df.queryExecution.toRdd.map(_.copy()).collect()
val unsafeRows = rows.map(_.asInstanceOf[UnsafeRow])
if (!v) {
hash1 = unsafeRows(0).hashCode()
hash2 = unsafeRows(1).hashCode()
} else {
assert(hash1 == unsafeRows(0).hashCode())
assert(hash2 == unsafeRows(1).hashCode())
}
}
}
}
}
test("VectorizedParquetRecordReader - direct path read") {
val data = (0 to 10).map(i => (i, (i + 'a').toChar.toString))
withTempPath { dir =>
spark.createDataFrame(data).repartition(1).write.parquet(dir.getCanonicalPath)
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0);
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, null)
val result = mutable.ArrayBuffer.empty[(Int, String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getInt(0), row.getString(1))
result += v
}
assert(data.toSet == result.toSet)
} finally {
reader.close()
}
}
// Project just one column
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, ("_2" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
result += row.getString(0)
}
assert(data.map(_._2).toSet == result.toSet)
} finally {
reader.close()
}
}
// Project columns in opposite order
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, ("_2" :: "_1" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String, Int)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getString(0), row.getInt(1))
result += v
}
assert(data.map { x => (x._2, x._1) }.toSet == result.toSet)
} finally {
reader.close()
}
}
// Empty projection
{
val conf = sqlContext.conf
val reader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
try {
reader.initialize(file, List[String]().asJava)
var result = 0
while (reader.nextKeyValue()) {
result += 1
}
assert(result == data.length)
} finally {
reader.close()
}
}
}
}
test("VectorizedParquetRecordReader - partition column types") {
withTempPath { dir =>
Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath)
val dataTypes =
Seq(StringType, BooleanType, ByteType, BinaryType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType)
val constantValues =
Seq(
UTF8String.fromString("a string"),
true,
1.toByte,
"Spark SQL".getBytes,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75D,
Decimal("1234.23456"),
DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")),
DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123")))
dataTypes.zip(constantValues).foreach { case (dt, v) =>
val schema = StructType(StructField("pcol", dt) :: Nil)
val conf = sqlContext.conf
val vectorizedReader = new VectorizedParquetRecordReader(
conf.offHeapColumnVectorEnabled, conf.parquetVectorizedReaderBatchSize)
val partitionValues = new GenericInternalRow(Array(v))
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0)
try {
vectorizedReader.initialize(file, null)
vectorizedReader.initBatch(schema, partitionValues)
vectorizedReader.nextKeyValue()
val row = vectorizedReader.getCurrentValue.asInstanceOf[InternalRow]
// Use `GenericMutableRow` by explicitly copying rather than `ColumnarBatch`
// in order to use get(...) method which is not implemented in `ColumnarBatch`.
val actual = row.copy().get(1, dt)
val expected = v
if (dt.isInstanceOf[BinaryType]) {
assert(actual.asInstanceOf[Array[Byte]] sameElements expected.asInstanceOf[Array[Byte]])
} else {
assert(actual == expected)
}
} finally {
vectorizedReader.close()
}
}
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> "snappy") {
val option = new ParquetOptions(Map("Compression" -> "uncompressed"), spark.sessionState.conf)
assert(option.compressionCodecClassName == "UNCOMPRESSED")
}
}
test("SPARK-23173 Writing a file with data converted from JSON with and incorrect user schema") {
withTempPath { file =>
val jsonData =
"""{
| "a": 1,
| "c": "foo"
|}
|""".stripMargin
val jsonSchema = new StructType()
.add("a", LongType, nullable = false)
.add("b", StringType, nullable = false)
.add("c", StringType, nullable = false)
spark.range(1).select(from_json(lit(jsonData), jsonSchema) as "input")
.write.parquet(file.getAbsolutePath)
checkAnswer(spark.read.parquet(file.getAbsolutePath), Seq(Row(Row(1, null, "foo"))))
}
}
test("Write Spark version into Parquet metadata") {
withTempPath { dir =>
spark.range(1).repartition(1).write.parquet(dir.getAbsolutePath)
assert(getMetaData(dir)(SPARK_VERSION_METADATA_KEY) === SPARK_VERSION_SHORT)
}
}
}
class JobCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitJob(jobContext: JobContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
class TaskCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitTask(context: TaskAttemptContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala | Scala | apache-2.0 | 32,139 |
package reactivemongo.api.commands
import reactivemongo.api.SerializationPack
trait DistinctCommand[P <: SerializationPack] extends ImplicitCommandHelpers[P] {
case class Distinct(
//{ distinct: <collection>, key: <key>, query: <query> }
keyString: String,
query: Option[pack.Document]) extends CollectionCommand with CommandWithPack[pack.type] with CommandWithResult[DistinctResult]
case class DistinctResult(values: List[pack.Value])
}
| viktortnk/ReactiveMongo | driver/src/main/scala/api/commands/distinct.scala | Scala | apache-2.0 | 457 |
package com.karasiq.bootstrap.table
import scala.language.postfixOps
import rx.{Rx, Var}
import com.karasiq.bootstrap.context.{ClassModifiers, RenderingContext}
import com.karasiq.bootstrap.utils.Utils
trait Tables extends TableRows with TableStyles { self: RenderingContext with ClassModifiers with Utils ⇒
import scalaTags.all._
type Table <: AbstractTable with BootstrapHtmlComponent
val Table: TableFactory
trait AbstractTable {
def heading: Rx[Seq[Modifier]]
def content: Rx[Seq[TableRow]]
}
trait TableFactory {
def apply(heading: Rx[Seq[Modifier]], content: Rx[Seq[TableRow]]): Table
def static(heading: Seq[Modifier], content: Seq[TableRow]): Table = {
apply(Var(heading), Var(content))
}
}
}
| Karasiq/scalajs-bootstrap | library/shared/src/main/scala/com/karasiq/bootstrap/table/Tables.scala | Scala | mit | 751 |
/*
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*/
package org.locationtech.geomesa.convert.xml
import java.io.StringWriter
import javax.xml.transform.dom.DOMSource
import javax.xml.transform.stream.StreamResult
import javax.xml.transform.{OutputKeys, TransformerFactory, Transformer}
import org.apache.avro.generic.GenericRecord
import org.locationtech.geomesa.convert.Transformers.EvaluationContext
import org.locationtech.geomesa.convert.{TransformerFn, TransformerFunctionFactory}
import org.locationtech.geomesa.utils.cache.SoftThreadLocal
import org.w3c.dom.Element
class XmlFunctionFactory extends TransformerFunctionFactory {
override def functions = Seq(xml2string)
val xml2string = new TransformerFn {
private val transformers = new SoftThreadLocal[Transformer]
override val names = Seq("xml2string", "xmlToString")
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
val element = args.head.asInstanceOf[Element]
val transformer = transformers.getOrElseUpdate {
val t = TransformerFactory.newInstance().newTransformer()
t.setOutputProperty(OutputKeys.ENCODING, "utf-8")
t.setOutputProperty(OutputKeys.INDENT, "no")
t.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes")
t
}
val result = new StreamResult(new StringWriter())
val source = new DOMSource(element)
transformer.transform(source, result)
result.getWriter.toString
}
}
}
| vpipkt/geomesa | geomesa-convert/geomesa-convert-xml/src/main/scala/org/locationtech/geomesa/convert/xml/XmlFunctionFactory.scala | Scala | apache-2.0 | 1,757 |
/**
* Copyright (c) 2013 Saddle Development Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
package org.saddle.scalar
import org.saddle._
import org.saddle.vec.VecDouble
import org.saddle.mat.MatDouble
import org.saddle.buffer.BufferDouble
import org.saddle.index.IndexDouble
import org.saddle.locator.LocatorDouble
import org.saddle.array.Sorter
/**
* Double ScalarTag
*/
object ScalarTagDouble extends ScalarTag[Double] {
def missing: Double = Double.NaN
def isMissing(v: Double): Boolean = (v != v)
def notMissing(v: Double): Boolean = (v == v)
// note, consider N/A's equal
def compare(x: Double, y: Double)(implicit ev: ORD[Double]) =
if (x == y) 0 else if (x > y) 1 else if (x < y) -1 else 0
def toDouble(t: Double)(implicit ev: NUM[Double]): Double = t
override def isDouble = true
def zero(implicit ev: NUM[Double]) = 0d
def one(implicit ev: NUM[Double]) = 1d
def inf(implicit ev: NUM[Double]) = Double.PositiveInfinity
def negInf(implicit ev: NUM[Double]) = Double.NegativeInfinity
def show(v: Double) = if (isMissing(v)) "%s" format "NA" else "%.4f" format(v)
override def runtimeClass = classOf[Double]
def makeBuf(sz: Int = Buffer.INIT_CAPACITY) = new BufferDouble(sz)
def makeLoc(sz: Int = Buffer.INIT_CAPACITY) = new LocatorDouble(sz)
def makeVec(arr: Array[Double]) = new VecDouble(arr)
def makeMat(r: Int, c: Int, arr: Array[Double]) = new MatDouble(r, c, arr)
def makeIndex(vec: Vec[Double])(implicit ord: ORD[Double]): Index[Double] = new IndexDouble(vec)
def makeSorter(implicit ord: ORD[Double]): Sorter[Double] = Sorter.doubleSorter
def concat(arrs: IndexedSeq[Vec[Double]]): Vec[Double] = Vec(array.flatten(arrs.map(_.toArray)))
override def toString = "ScalarTagDouble"
} | jyt109/saddle | saddle-core/src/main/scala/org/saddle/scalar/ScalarTagDouble.scala | Scala | apache-2.0 | 2,274 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.errors
import org.apache.spark.{SparkException, SparkIllegalArgumentException, SparkRuntimeException, SparkUnsupportedOperationException}
import org.apache.spark.sql.{DataFrame, QueryTest}
import org.apache.spark.sql.functions.{lit, lower, struct, sum}
import org.apache.spark.sql.test.SharedSparkSession
class QueryExecutionErrorsSuite extends QueryTest with SharedSparkSession {
import testImplicits._
private def getAesInputs(): (DataFrame, DataFrame) = {
val encryptedText16 = "4Hv0UKCx6nfUeAoPZo1z+w=="
val encryptedText24 = "NeTYNgA+PCQBN50DA//O2w=="
val encryptedText32 = "9J3iZbIxnmaG+OIA9Amd+A=="
val encryptedEmptyText16 = "jmTOhz8XTbskI/zYFFgOFQ=="
val encryptedEmptyText24 = "9RDK70sHNzqAFRcpfGM5gQ=="
val encryptedEmptyText32 = "j9IDsCvlYXtcVJUf4FAjQQ=="
val df1 = Seq("Spark", "").toDF
val df2 = Seq(
(encryptedText16, encryptedText24, encryptedText32),
(encryptedEmptyText16, encryptedEmptyText24, encryptedEmptyText32)
).toDF("value16", "value24", "value32")
(df1, df2)
}
test("INVALID_PARAMETER_VALUE: invalid key lengths in AES functions") {
val (df1, df2) = getAesInputs()
def checkInvalidKeyLength(df: => DataFrame): Unit = {
val e = intercept[SparkException] {
df.collect
}.getCause.asInstanceOf[SparkRuntimeException]
assert(e.getErrorClass === "INVALID_PARAMETER_VALUE")
assert(e.getSqlState === "22023")
assert(e.getMessage.matches(
"The value of parameter\\\\(s\\\\) 'key' in the aes_encrypt/aes_decrypt function is invalid: " +
"expects a binary value with 16, 24 or 32 bytes, but got \\\\d+ bytes."))
}
// Encryption failure - invalid key length
checkInvalidKeyLength(df1.selectExpr("aes_encrypt(value, '12345678901234567')"))
checkInvalidKeyLength(df1.selectExpr("aes_encrypt(value, binary('123456789012345'))"))
checkInvalidKeyLength(df1.selectExpr("aes_encrypt(value, binary(''))"))
// Decryption failure - invalid key length
Seq("value16", "value24", "value32").foreach { colName =>
checkInvalidKeyLength(df2.selectExpr(
s"aes_decrypt(unbase64($colName), '12345678901234567')"))
checkInvalidKeyLength(df2.selectExpr(
s"aes_decrypt(unbase64($colName), binary('123456789012345'))"))
checkInvalidKeyLength(df2.selectExpr(
s"aes_decrypt(unbase64($colName), '')"))
checkInvalidKeyLength(df2.selectExpr(
s"aes_decrypt(unbase64($colName), binary(''))"))
}
}
test("INVALID_PARAMETER_VALUE: AES decrypt failure - key mismatch") {
val (_, df2) = getAesInputs()
Seq(
("value16", "1234567812345678"),
("value24", "123456781234567812345678"),
("value32", "12345678123456781234567812345678")).foreach { case (colName, key) =>
val e = intercept[SparkException] {
df2.selectExpr(s"aes_decrypt(unbase64($colName), binary('$key'), 'ECB')").collect
}.getCause.asInstanceOf[SparkRuntimeException]
assert(e.getErrorClass === "INVALID_PARAMETER_VALUE")
assert(e.getSqlState === "22023")
assert(e.getMessage ===
"The value of parameter(s) 'expr, key' in the aes_encrypt/aes_decrypt function " +
"is invalid: Detail message: " +
"Given final block not properly padded. " +
"Such issues can arise if a bad key is used during decryption.")
}
}
test("INVALID_PARAMETER_VALUE: invalid unit passed to timestampadd/timestampdiff") {
Seq(
"timestampadd" ->
"select timestampadd('nanosecond', 100, timestamp'2022-02-13 18:00:00')",
"timestampdiff" ->
"""select timestampdiff(
| 'nanosecond',
| timestamp'2022-02-13 18:00:00',
| timestamp'2022-02-22 12:52:00')""".stripMargin
).foreach { case (funcName, sqlStmt) =>
val e = intercept[SparkIllegalArgumentException] {
sql(sqlStmt).collect()
}
assert(e.getErrorClass === "INVALID_PARAMETER_VALUE")
assert(e.getSqlState === "22023")
assert(e.getMessage ===
s"The value of parameter(s) 'unit' in $funcName is invalid: nanosecond")
}
}
test("UNSUPPORTED_FEATURE: unsupported combinations of AES modes and padding") {
val key16 = "abcdefghijklmnop"
val key32 = "abcdefghijklmnop12345678ABCDEFGH"
val (df1, df2) = getAesInputs()
def checkUnsupportedMode(df: => DataFrame): Unit = {
val e = intercept[SparkException] {
df.collect
}.getCause.asInstanceOf[SparkRuntimeException]
assert(e.getErrorClass === "UNSUPPORTED_FEATURE")
assert(e.getSqlState === "0A000")
assert(e.getMessage.matches("""The feature is not supported: AES-\\w+ with the padding \\w+""" +
" by the aes_encrypt/aes_decrypt function."))
}
// Unsupported AES mode and padding in encrypt
checkUnsupportedMode(df1.selectExpr(s"aes_encrypt(value, '$key16', 'CBC')"))
checkUnsupportedMode(df1.selectExpr(s"aes_encrypt(value, '$key16', 'ECB', 'NoPadding')"))
// Unsupported AES mode and padding in decrypt
checkUnsupportedMode(df2.selectExpr(s"aes_decrypt(value16, '$key16', 'GSM')"))
checkUnsupportedMode(df2.selectExpr(s"aes_decrypt(value16, '$key16', 'GCM', 'PKCS')"))
checkUnsupportedMode(df2.selectExpr(s"aes_decrypt(value32, '$key32', 'ECB', 'None')"))
}
test("UNSUPPORTED_FEATURE: unsupported types (map and struct) in lit()") {
def checkUnsupportedTypeInLiteral(v: Any): Unit = {
val e1 = intercept[SparkRuntimeException] { lit(v) }
assert(e1.getErrorClass === "UNSUPPORTED_FEATURE")
assert(e1.getSqlState === "0A000")
assert(e1.getMessage.matches("""The feature is not supported: literal for '.+' of .+\\."""))
}
checkUnsupportedTypeInLiteral(Map("key1" -> 1, "key2" -> 2))
checkUnsupportedTypeInLiteral(("mike", 29, 1.0))
val e2 = intercept[SparkRuntimeException] {
trainingSales
.groupBy($"sales.year")
.pivot(struct(lower(trainingSales("sales.course")), trainingSales("training")))
.agg(sum($"sales.earnings"))
.collect()
}
assert(e2.getMessage === "The feature is not supported: pivoting by the value" +
""" '[dotnet,Dummies]' of the column data type 'struct<col1:string,training:string>'.""")
}
test("UNSUPPORTED_FEATURE: unsupported pivot operations") {
val e1 = intercept[SparkUnsupportedOperationException] {
trainingSales
.groupBy($"sales.year")
.pivot($"sales.course")
.pivot($"training")
.agg(sum($"sales.earnings"))
.collect()
}
assert(e1.getErrorClass === "UNSUPPORTED_FEATURE")
assert(e1.getSqlState === "0A000")
assert(e1.getMessage === "The feature is not supported: Repeated pivots.")
val e2 = intercept[SparkUnsupportedOperationException] {
trainingSales
.rollup($"sales.year")
.pivot($"training")
.agg(sum($"sales.earnings"))
.collect()
}
assert(e2.getErrorClass === "UNSUPPORTED_FEATURE")
assert(e2.getSqlState === "0A000")
assert(e2.getMessage === "The feature is not supported: Pivot not after a groupBy.")
}
}
| srowen/spark | sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala | Scala | apache-2.0 | 7,947 |
package io.buoyant.router.http
import com.twitter.finagle.buoyant.Dst
import com.twitter.finagle.http.{Request, Version}
import com.twitter.finagle.{Dtab, Path}
import com.twitter.util.Future
import io.buoyant.router.RoutingFactory
import io.buoyant.router.RoutingFactory.{IdentifiedRequest, RequestIdentification, UnidentifiedRequest}
object MethodAndHostIdentifier {
def mk(
prefix: Path,
baseDtab: () => Dtab = () => Dtab.base
): RoutingFactory.Identifier[Request] = MethodAndHostIdentifier(prefix, false, baseDtab)
}
case class MethodAndHostIdentifier(
prefix: Path,
uris: Boolean = false,
baseDtab: () => Dtab = () => Dtab.base
) extends RoutingFactory.Identifier[Request] {
private[this] def suffix(req: Request): Path =
if (uris) Path.read(req.path) else Path.empty
private[this] def mkPath(path: Path): Dst.Path =
Dst.Path(prefix ++ path, baseDtab(), Dtab.local)
def apply(req: Request): Future[RequestIdentification[Request]] = req.version match {
case Version.Http10 =>
val dst = mkPath(Path.Utf8("1.0", req.method.toString) ++ suffix(req))
Future.value(new IdentifiedRequest(dst, req))
case Version.Http11 =>
req.host match {
case Some(host) if host.nonEmpty =>
val dst = mkPath(Path.Utf8("1.1", req.method.toString, host.toLowerCase) ++ suffix(req))
Future.value(new IdentifiedRequest(dst, req))
case _ =>
Future.value(
new UnidentifiedRequest(
s"${Version.Http11} request missing hostname"
)
)
}
}
}
| hhtpcd/linkerd | router/http/src/main/scala/io/buoyant/router/http/MethodAndHostIdentifier.scala | Scala | apache-2.0 | 1,580 |
package lara.epfl.scalasca.rules
import lara.epfl.scalasca.core._
import scala.tools.nsc._
import scala.collection.mutable.LinkedList
case class UnfreedResources(unfreedResources: Set[ControlFlowGraphNode]) extends RuleResult {
override def warning = Warning("MEM_MISSING_RESOURCE_CLOSING",
"Some resources seem not to be closed on all execution paths",
Console.GREEN + "No open resources found" + Console.RESET,
GeneralCategory())
override def toString: String =
if (unfreedResources.size > 0)
unfreedResources.foldLeft("")((acc, res) => acc + (res.node match {
case Some(n) => n.pos.showError(warning.formattedWarning)
case None => ""
}))
else
warning.formattedDefaultMessage
override def isSuccess: Boolean = unfreedResources.size == 0
}
/**
* MEM_MISSING_RESOURCE_CLOSING
*
* Flags any call to openMethodName for which there exists at least one execution path where closeMethodName is not called.
*
* TODO allow type specification for objects on which the methods are called
*
*/
class UnfreedResourcesControlFlowAnalysis[T <: Global](val global: T, _openMethodName: Global#TermName, _closeMethodName: Global#TermName, computedResults: List[RuleResult]) extends Rule {
import global._
type RR = UnfreedResources
override val ruleName: String = "MEM_MISSING_RESOURCE_CLOSING"
private val openMethodName = _openMethodName.asInstanceOf[global.TermName]
private val closeMethodName = _closeMethodName.asInstanceOf[global.TermName]
private val cfgResults: List[IntraProceduralControlFlowGraphMap] =
computedResults.partition(_.isInstanceOf[IntraProceduralControlFlowGraphMap])._1.asInstanceOf[List[IntraProceduralControlFlowGraphMap]]
private val cfgOption =
if (cfgResults.length > 0)
Some(cfgResults.head)
else
None
private def getEquation(cfg: ControlFlowGraph): DataflowEquation = {
// println(cfg)
val allNodes = cfg.getAllNodes()
val equationMap = allNodes.zipWithIndex.foldLeft(Map[ControlFlowGraphNode, (Array[EquationVariable] => Set[ControlFlowGraphNode], Set[ControlFlowGraphNode])]())((acc, tuple) => {
val (cfgNode, index) = tuple
val previousNodes = cfg.prevNodesOf(cfgNode).toSet
acc + (cfgNode match {
//Resource opening
case MethodCall(_, targets, m) if m.asInstanceOf[global.TermName] == openMethodName && targets.size == 1 =>
val target = targets.toList.head
(cfgNode, ((args: Array[EquationVariable]) => {
val argsPreviousNodes = args.filter(arg => previousNodes.contains(arg.variable))
if (argsPreviousNodes.isEmpty)
Set[ControlFlowGraphNode]()
else
argsPreviousNodes.foldLeft(Set[ControlFlowGraphNode]())((acc, a) => acc ++ a.latticeElement.set) + cfgNode
}, previousNodes))
//Resource closing
case MethodCall(_, targets, m) if m.asInstanceOf[global.TermName] == closeMethodName && targets.size == 1 =>
val target = targets.toList.head
(cfgNode, ((args: Array[EquationVariable]) => {
val argsPreviousNodes = args.filter(arg => previousNodes.contains(arg.variable))
if (argsPreviousNodes.isEmpty)
Set[ControlFlowGraphNode]()
else
argsPreviousNodes.foldLeft(Set[ControlFlowGraphNode]())((acc, a) => acc ++ a.latticeElement.set).filter(_.node match {
case Some(n) => n != target
case None => true
})
}, previousNodes))
//All other cases
case _ =>
(cfgNode, ((args: Array[EquationVariable]) => {
val argsPreviousNodes = args.filter(arg => previousNodes.contains(arg.variable))
if (argsPreviousNodes.isEmpty)
Set[ControlFlowGraphNode]()
else
argsPreviousNodes.foldLeft(Set[ControlFlowGraphNode]())((acc, a) => acc ++ a.latticeElement.set)
}, previousNodes))
})
})
new DataflowEquation(equationMap)
}
def apply(syntaxTree: Tree): RR = {
val cfg =
if (cfgOption.isEmpty) {
val cfgGenerator = new IntraProceduralControlFlowGraphGenerator(global, computedResults)
cfgGenerator.apply(syntaxTree.asInstanceOf[cfgGenerator.global.Tree])
}
else
cfgOption.get
UnfreedResources(
cfg.methodCFGMap.foldLeft(Set[ControlFlowGraphNode]())((acc, c) => {
val unclosedResourcesMap = getEquation(c._2).solve(10000)
unclosedResourcesMap match {
case Some(map) => c._2.exitNodes.toList.flatMap(en => map.get(en) match {
case Some(n) => n.latticeElement.set.toList
case None => List()
}).toSet ++ acc
case None => acc
}
}))
}
} | jean-andre-gauthier/scalasca | src/main/scala/lara/epfl/scalasca/rules/UnfreedResourcesControlFlowAnalysis.scala | Scala | bsd-3-clause | 4,480 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.server
import java.net._
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try
import org.ensime.AkkaBackCompat
import org.ensime.fixture._
import org.ensime.util.EnsimeSpec
import org.ensime.util.ensimefile.Implicits.DefaultCharset
import org.ensime.util.path._
class ServerStartupSpec
extends EnsimeSpec
with IsolatedEnsimeConfigFixture
with IsolatedTestKitFixture
with AkkaBackCompat {
val original = EnsimeConfigFixture.EmptyTestProject
"Server" should "start up and bind to random ports" in {
withEnsimeConfig { implicit config =>
withTestKit { implicit tk =>
import tk._
val protocol = new SwankProtocol
system.actorOf(ServerActor.props(protocol), "ensime-main")
eventually(timeout(scaled(10 seconds)), interval(scaled(1 second))) {
PortUtil.port(config.cacheDir.file, "http").isDefined
PortUtil.port(config.cacheDir.file, "port").isDefined
}
}
}
}
it should "start up and bind to preferred ports" in {
withEnsimeConfig { implicit config =>
withTestKit { implicit tk =>
import tk._
// this can fail randomly. No general solution.
val preferredHttp = 10001
val preferredTcp = 10002
(config.cacheDir.file / "http").write(preferredHttp.toString)
(config.cacheDir.file / "port").write(preferredTcp.toString)
val protocol = new SwankProtocol
system.actorOf(ServerActor.props(protocol), "ensime-main")
eventually(timeout(scaled(10 seconds)), interval(scaled(1 second))) {
val http = new Socket
val tcp = new Socket
try {
http.connect(new InetSocketAddress("127.0.0.1", preferredHttp))
tcp.connect(new InetSocketAddress("127.0.0.1", preferredTcp))
http.isConnected() && tcp.isConnected()
} finally {
Try(http.close())
Try(tcp.close())
}
}
}
}
}
it should "shutdown if preferred TCP port is not available" in {
withEnsimeConfig { implicit config =>
withTestKit { implicit tk =>
import tk._
val preferredTcp = 10004
(config.cacheDir.file / "port").write(preferredTcp.toString)
val socket = new ServerSocket()
try {
val tcpHog =
socket.bind(new InetSocketAddress("127.0.0.1", preferredTcp))
eventually { assert(socket.isBound()) }
val protocol = new SwankProtocol
system.actorOf(ServerActor.props(protocol), "ensime-main")
Await.result(system.whenTerminated, akkaTimeout.duration)
} finally {
socket.close()
}
}
}
}
it should "shutdown if preferred HTTP port is not available" in {
withEnsimeConfig { implicit config =>
withTestKit { implicit tk =>
import tk._
val preferredHttp = 10003
(config.cacheDir.file / "http").write(preferredHttp.toString)
val socket = new ServerSocket()
try {
val httpHog =
socket.bind(new InetSocketAddress("127.0.0.1", preferredHttp))
eventually { assert(socket.isBound()) }
val protocol = new SwankProtocol
system.actorOf(ServerActor.props(protocol), "ensime-main")
Await.result(system.whenTerminated, akkaTimeout.duration)
} finally {
socket.close()
}
}
}
}
}
| yyadavalli/ensime-server | server/src/it/scala/org/ensime/server/ServerStartupSpec.scala | Scala | gpl-3.0 | 3,619 |
/*
* Copyright 2013 - 2017 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.morpheus.sql
import com.outworkers.morpheus.engine.query._
import com.outworkers.morpheus.builder.SQLBuiltQuery
import com.outworkers.morpheus.column.{AbstractColumn, AbstractModifyColumn, Column, SelectColumn}
import com.outworkers.morpheus.{Row => MorpheusRow}
import shapeless.HNil
private[morpheus] trait DefaultSQLImplicits extends JoinImplicits {
implicit class SelectColumnRequired[Owner <: BaseTable[Owner, Record, TableRow], Record, TableRow <: MorpheusRow, T](
col: Column[Owner, Record, TableRow, T]
) extends SelectColumn[T](SQLBuiltQuery(col.name)) {
def apply(r: MorpheusRow): T = col.apply(r)
}
implicit class ModifyColumn[RR: DataType](col: AbstractColumn[RR]) extends AbstractModifyColumn[RR](col)
implicit class OrderingColumn[RR: DataType](col: AbstractColumn[RR]) extends AbstractOrderingColumn[RR](col)
implicit def selectOperatorClauseToSelectColumn[T](clause: SelectOperatorClause[T]): SelectColumn[T] = new SelectColumn[T](clause.qb) {
def apply(row: MorpheusRow): T = clause.fromRow(row)
}
/**
* This defines an implicit conversion from a RootUpdateQuery to an UpdateQuery, making the UPDATE syntax block invisible to the end user.
* Much like a decision block, a UpdateSyntaxBlock needs a decision branch to follow, may that be nothing, LOW_PRIORITY or IGNORE.
*
* The one catch is that this form of "exit" from an un-executable RootUpdateQuery will directly translate the query to an "UPDATE tableName"
* query, meaning no UPDATE operators will be used in the default serialisation.
*
* The simple assumption made here is that since the user didn't use any other provided method, such as "lowPriority" or "ignore" the desired behaviour is
* a full select.
*
* @param root The RootSelectQuery to convert.
* @tparam T The table owning the record.
* @tparam R The record type.
* @return An executable SelectQuery.
*/
implicit def rootUpdateQueryToUpdateQuery[T <: BaseTable[T, R, DefaultRow], R](root: DefaultRootUpdateQuery[T, R]): UpdateQuery[T, R, DefaultRow, Ungroupped,
Unordered,
Unlimited,
Unchainned, AssignUnchainned, HNil
] = {
new UpdateQuery(
root.table,
root.st.all,
root.rowFunc
)
}
/**
* This defines an implicit conversion from a RootUpdateQuery to an UpdateQuery, making the UPDATE syntax block invisible to the end user.
* Much like a decision block, a UpdateSyntaxBlock needs a decision branch to follow, may that be nothing, LOW_PRIORITY or IGNORE.
*
* The one catch is that this form of "exit" from an un-executable RootUpdateQuery will directly translate the query to an "UPDATE tableName"
* query, meaning no UPDATE operators will be used in the default serialisation.
*
* The simple assumption made here is that since the user didn't use any other provided method, such as "lowPriority" or "ignore" the desired behaviour is
* a full select.
*
* @param root The RootSelectQuery to convert.
* @tparam T The table owning the record.
* @tparam R The record type.
* @return An executable SelectQuery.
*/
implicit def rootDeleteQueryToDeleteQuery[T <: BaseTable[T, R, TR], R, TR <: MorpheusRow](root: RootDeleteQuery[T, R, TR]): DeleteQuery[T,
R,
TR,
Ungroupped,
Unordered,
Unlimited,
Unchainned,
AssignUnchainned,
HNil
] = {
new DeleteQuery(
root.table,
root.st.all,
root.rowFunc
)
}
/**
* This defines an implicit conversion from a RootSelectQuery to a SelectQuery, making the SELECT syntax block invisible to the end user.
* Much like a decision block, a SelectSyntaxBlock needs a decision branch to follow, may that be DISTINCT, ALL or DISTINCTROW as per the SQL spec.
*
* The one catch is that this form of "exit" from an un-executable RootSelectQuery will directly translate the query to a "SELECT fields* FROM tableName"
* query, meaning no SELECT operators will be used in the serialisation.
*
* The simple assumption made here is that since the user didn't use any other provided method, such as "all", "distinct" or "distinctrow",
* the desired behaviour is a full select.
*
* @param root The RootSelectQuery to convert.
* @tparam T The table owning the record.
* @tparam R The record type.
* @return An executable SelectQuery.
*/
implicit def rootSelectQueryToSelectQuery[T <: BaseTable[T, _, DefaultRow], R](root: DefaultRootSelectQuery[T, R]): SelectQuery[T, R, DefaultRow, Ungroupped,
Unordered, Unlimited, Unchainned, AssignUnchainned, HNil] = root.all
/**
* This defines an implicit conversion from a RootInsertQuery to an InsertQuery, making the INSERT syntax block invisible to the end user.
* This is used to automatically "exit" the INSERT syntax block with the default "INSERT INTO" option, while picking no other SQL options such as IGNORE or
* LOW_PRIORITY.
*
* This is making the following queries equivalent:
* - Table.insert.into.queryString = "INSERT INTO table"
* - Table.insert = "INSERT INTO table"
* @param root The RootSelectQuery to convert.
* @tparam T The table owning the record.
* @tparam R The record type.
* @return An executable SelectQuery.
*/
implicit def rootInsertQueryToQuery[T <: BaseTable[T, R, DefaultRow], R](root: DefaultRootInsertQuery[T, R]): InsertQuery[T, R, DefaultRow, Ungroupped,
Unordered,
Unlimited, Unchainned, AssignUnchainned, HNil] = {
new InsertQuery(
root.table,
root.st.into,
root.rowFunc
)
}
/**
* This defines an implicit conversion from a RootInsertQuery to an InsertQuery, making the INSERT syntax block invisible to the end user.
* This allows chaining a "value" method call directly after "Table.insert".
*
* @param root The RootSelectQuery to convert.
* @tparam T The table owning the record.
* @tparam R The record type.
* @return An executable SelectQuery.
*/
implicit def rootCreateQueryToCreateQuery[T <: BaseTable[T, R, TR], R, TR <: MorpheusRow](root: RootCreateQuery[T, R, TR]): CreateQuery[T, R, TR, Ungroupped,
Unordered, Unlimited, Unchainned, AssignUnchainned, HNil] = {
new CreateQuery(
root.table,
root.st.default,
root.rowFunc
)
}
}
| websudos/morpheus | morpheus-dsl/src/main/scala/com/outworkers/morpheus/sql/DefaultSQLImplicits.scala | Scala | bsd-2-clause | 6,914 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.api.java
import java.lang.{Long => JLong, Iterable => JIterable}
import java.util.{List => JList}
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import com.google.common.base.Optional
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.{JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat}
import org.apache.spark.Partitioner
import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.{JavaPairRDD, JavaSparkContext, JavaUtils}
import org.apache.spark.api.java.JavaPairRDD._
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.api.java.function.{Function => JFunction, Function2 => JFunction2}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.DStream
/**
* A Java-friendly interface to a DStream of key-value pairs, which provides extra methods
* like `reduceByKey` and `join`.
*/
class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
implicit val kManifest: ClassTag[K],
implicit val vManifest: ClassTag[V])
extends AbstractJavaDStreamLike[(K, V), JavaPairDStream[K, V], JavaPairRDD[K, V]] {
override def wrapRDD(rdd: RDD[(K, V)]): JavaPairRDD[K, V] = JavaPairRDD.fromRDD(rdd)
// =======================================================================
// Methods common to all DStream's
// =======================================================================
/** Return a new DStream containing only the elements that satisfy a predicate. */
def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairDStream[K, V] =
dstream.filter((x => f.call(x).booleanValue()))
/** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */
def cache(): JavaPairDStream[K, V] = dstream.cache()
/** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */
def persist(): JavaPairDStream[K, V] = dstream.persist()
/** Persist the RDDs of this DStream with the given storage level */
def persist(storageLevel: StorageLevel): JavaPairDStream[K, V] = dstream.persist(storageLevel)
/**
* Return a new DStream with an increased or decreased level of parallelism. Each RDD in the
* returned DStream has exactly numPartitions partitions.
*/
def repartition(numPartitions: Int): JavaPairDStream[K, V] = dstream.repartition(numPartitions)
/** Method that generates a RDD for the given Duration */
def compute(validTime: Time): JavaPairRDD[K, V] = {
dstream.compute(validTime) match {
case Some(rdd) => new JavaPairRDD(rdd)
case None => null
}
}
/**
* Return a new DStream which is computed based on windowed batches of this DStream.
* The new DStream generates RDDs with the same interval as this DStream.
* @param windowDuration width of the window; must be a multiple of this DStream's interval.
* @return
*/
def window(windowDuration: Duration): JavaPairDStream[K, V] =
dstream.window(windowDuration)
/**
* Return a new DStream which is computed based on windowed batches of this DStream.
* @param windowDuration duration (i.e., width) of the window;
* must be a multiple of this DStream's interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's interval
*/
def window(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, V] =
dstream.window(windowDuration, slideDuration)
/**
* Return a new DStream by unifying data of another DStream with this DStream.
* @param that Another DStream having the same interval (i.e., slideDuration) as this DStream.
*/
def union(that: JavaPairDStream[K, V]): JavaPairDStream[K, V] =
dstream.union(that.dstream)
// =======================================================================
// Methods only for PairDStream's
// =======================================================================
/**
* Return a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
*/
def groupByKey(): JavaPairDStream[K, JIterable[V]] =
dstream.groupByKey().mapValues(_.asJava)
/**
* Return a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to
* generate the RDDs with `numPartitions` partitions.
*/
def groupByKey(numPartitions: Int): JavaPairDStream[K, JIterable[V]] =
dstream.groupByKey(numPartitions).mapValues(_.asJava)
/**
* Return a new DStream by applying `groupByKey` on each RDD of `this` DStream.
* Therefore, the values for each key in `this` DStream's RDDs are grouped into a
* single sequence to generate the RDDs of the new DStream. org.apache.spark.Partitioner
* is used to control the partitioning of each RDD.
*/
def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JIterable[V]] =
dstream.groupByKey(partitioner).mapValues(_.asJava)
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the associative reduce function. Hash partitioning is used to generate the RDDs
* with Spark's default number of partitions.
*/
def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] =
dstream.reduceByKey(func)
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the supplied reduce function. Hash partitioning is used to generate the RDDs
* with `numPartitions` partitions.
*/
def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] =
dstream.reduceByKey(func, numPartitions)
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the supplied reduce function. org.apache.spark.Partitioner is used to control
* thepartitioning of each RDD.
*/
def reduceByKey(func: JFunction2[V, V, V], partitioner: Partitioner): JavaPairDStream[K, V] = {
dstream.reduceByKey(func, partitioner)
}
/**
* Combine elements of each key in DStream's RDDs using custom function. This is similar to the
* combineByKey for RDDs. Please refer to combineByKey in
* org.apache.spark.rdd.PairRDDFunctions for more information.
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C],
partitioner: Partitioner
): JavaPairDStream[K, C] = {
implicit val cm: ClassTag[C] = fakeClassTag
dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner)
}
/**
* Combine elements of each key in DStream's RDDs using custom function. This is similar to the
* combineByKey for RDDs. Please refer to combineByKey in
* org.apache.spark.rdd.PairRDDFunctions for more information.
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C],
partitioner: Partitioner,
mapSideCombine: Boolean
): JavaPairDStream[K, C] = {
implicit val cm: ClassTag[C] = fakeClassTag
dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner, mapSideCombine)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window. This is similar to
* `DStream.groupByKey()` but applies it over a sliding window. The new DStream generates RDDs
* with the same interval as this DStream. Hash partitioning is used to generate the RDDs with
* Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
*/
def groupByKeyAndWindow(windowDuration: Duration): JavaPairDStream[K, JIterable[V]] = {
dstream.groupByKeyAndWindow(windowDuration).mapValues(_.asJava)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window. Similar to
* `DStream.groupByKey()`, but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration)
: JavaPairDStream[K, JIterable[V]] = {
dstream.groupByKeyAndWindow(windowDuration, slideDuration).mapValues(_.asJava)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window on `this` DStream.
* Similar to `DStream.groupByKey()`, but applies it over a sliding window.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions Number of partitions of each RDD in the new DStream.
*/
def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int)
: JavaPairDStream[K, JIterable[V]] = {
dstream.groupByKeyAndWindow(windowDuration, slideDuration, numPartitions).mapValues(_.asJava)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window on `this` DStream.
* Similar to `DStream.groupByKey()`, but applies it over a sliding window.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
*/
def groupByKeyAndWindow(
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner
): JavaPairDStream[K, JIterable[V]] = {
dstream.groupByKeyAndWindow(windowDuration, slideDuration, partitioner).mapValues(_.asJava)
}
/**
* Create a new DStream by applying `reduceByKey` over a sliding window on `this` DStream.
* Similar to `DStream.reduceByKey()`, but applies it over a sliding window. The new DStream
* generates RDDs with the same interval as this DStream. Hash partitioning is used to generate
* the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
*/
def reduceByKeyAndWindow(reduceFunc: JFunction2[V, V, V], windowDuration: Duration)
: JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration)
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
* `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByKeyAndWindow(
reduceFunc: JFunction2[V, V, V],
windowDuration: Duration,
slideDuration: Duration
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
* `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with `numPartitions` partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions Number of partitions of each RDD in the new DStream.
*/
def reduceByKeyAndWindow(
reduceFunc: JFunction2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
numPartitions: Int
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, numPartitions)
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. Similar to
* `DStream.reduceByKey()`, but applies it over a sliding window.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
*/
def reduceByKeyAndWindow(
reduceFunc: JFunction2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, partitioner)
}
/**
* Return a new DStream by reducing over a using incremental computation.
* The reduced value of over a new window is calculated using the old window's reduce value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient that reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByKeyAndWindow(
reduceFunc: JFunction2[V, V, V],
invReduceFunc: JFunction2[V, V, V],
windowDuration: Duration,
slideDuration: Duration
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream by applying incremental `reduceByKey` over a sliding window.
* The reduced value of over a new window is calculated using the old window's reduce value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient that reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions number of partitions of each RDD in the new DStream.
* @param filterFunc function to filter expired key-value pairs;
* only pairs that satisfy the function are retained
* set this to null if you do not want to filter
*/
def reduceByKeyAndWindow(
reduceFunc: JFunction2[V, V, V],
invReduceFunc: JFunction2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
numPartitions: Int,
filterFunc: JFunction[(K, V), java.lang.Boolean]
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(
reduceFunc,
invReduceFunc,
windowDuration,
slideDuration,
numPartitions,
(p: (K, V)) => filterFunc(p).booleanValue()
)
}
/**
* Return a new DStream by applying incremental `reduceByKey` over a sliding window.
* The reduced value of over a new window is calculated using the old window's reduce value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient that reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
* @param filterFunc function to filter expired key-value pairs;
* only pairs that satisfy the function are retained
* set this to null if you do not want to filter
*/
def reduceByKeyAndWindow(
reduceFunc: JFunction2[V, V, V],
invReduceFunc: JFunction2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner,
filterFunc: JFunction[(K, V), java.lang.Boolean]
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(
reduceFunc,
invReduceFunc,
windowDuration,
slideDuration,
partitioner,
(p: (K, V)) => filterFunc(p).booleanValue()
)
}
/**
* :: Experimental ::
* Return a [[JavaMapWithStateDStream]] by applying a function to every key-value element of
* `this` stream, while maintaining some state data for each unique key. The mapping function
* and other specification (e.g. partitioners, timeouts, initial state data, etc.) of this
* transformation can be specified using [[StateSpec]] class. The state data is accessible in
* as a parameter of type [[State]] in the mapping function.
*
* Example of using `mapWithState`:
* {{{
* // A mapping function that maintains an integer state and return a string
* Function3<String, Optional<Integer>, State<Integer>, String> mappingFunction =
* new Function3<String, Optional<Integer>, State<Integer>, String>() {
* @Override
* public Optional<String> call(Optional<Integer> value, State<Integer> state) {
* // Use state.exists(), state.get(), state.update() and state.remove()
* // to manage state, and return the necessary string
* }
* };
*
* JavaMapWithStateDStream<String, Integer, Integer, String> mapWithStateDStream =
* keyValueDStream.mapWithState(StateSpec.function(mappingFunc));
*}}}
*
* @param spec Specification of this transformation
* @tparam StateType Class type of the state data
* @tparam MappedType Class type of the mapped data
*/
@Experimental
def mapWithState[StateType, MappedType](spec: StateSpec[K, V, StateType, MappedType]):
JavaMapWithStateDStream[K, V, StateType, MappedType] = {
new JavaMapWithStateDStream(dstream.mapWithState(spec)(
JavaSparkContext.fakeClassTag,
JavaSparkContext.fakeClassTag))
}
private def convertUpdateStateFunction[S](in: JFunction2[JList[V], Optional[S], Optional[S]]):
(Seq[V], Option[S]) => Option[S] = {
val scalaFunc: (Seq[V], Option[S]) => Option[S] = (values, state) => {
val list: JList[V] = values.asJava
val scalaState: Optional[S] = JavaUtils.optionToOptional(state)
val result: Optional[S] = in.apply(list, scalaState)
result.isPresent match {
case true => Some(result.get())
case _ => None
}
}
scalaFunc
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @tparam S State type
*/
def updateStateByKey[S](updateFunc: JFunction2[JList[V], Optional[S], Optional[S]])
: JavaPairDStream[K, S] = {
implicit val cm: ClassTag[S] = fakeClassTag
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc))
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param numPartitions Number of partitions of each RDD in the new DStream.
* @tparam S State type
*/
def updateStateByKey[S](
updateFunc: JFunction2[JList[V], Optional[S], Optional[S]],
numPartitions: Int)
: JavaPairDStream[K, S] = {
implicit val cm: ClassTag[S] = fakeClassTag
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), numPartitions)
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of the key.
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
* @tparam S State type
*/
def updateStateByKey[S](
updateFunc: JFunction2[JList[V], Optional[S], Optional[S]],
partitioner: Partitioner
): JavaPairDStream[K, S] = {
implicit val cm: ClassTag[S] = fakeClassTag
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), partitioner)
}
/**
* Return a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of the key.
* org.apache.spark.Partitioner is used to control the partitioning of each RDD.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new
* DStream.
* @param initialRDD initial state value of each key.
* @tparam S State type
*/
def updateStateByKey[S](
updateFunc: JFunction2[JList[V], Optional[S], Optional[S]],
partitioner: Partitioner,
initialRDD: JavaPairRDD[K, S]
): JavaPairDStream[K, S] = {
implicit val cm: ClassTag[S] = fakeClassTag
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), partitioner, initialRDD)
}
/**
* Return a new DStream by applying a map function to the value of each key-value pairs in
* 'this' DStream without changing the key.
*/
def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = {
implicit val cm: ClassTag[U] = fakeClassTag
dstream.mapValues(f)
}
/**
* Return a new DStream by applying a flatmap function to the value of each key-value pairs in
* 'this' DStream without changing the key.
*/
def flatMapValues[U](f: JFunction[V, java.lang.Iterable[U]]): JavaPairDStream[K, U] = {
import scala.collection.JavaConverters._
def fn: (V) => Iterable[U] = (x: V) => f.apply(x).asScala
implicit val cm: ClassTag[U] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[U]]
dstream.flatMapValues(fn)
}
/**
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with Spark's default number
* of partitions.
*/
def cogroup[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (JIterable[V], JIterable[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
dstream.cogroup(other.dstream).mapValues(t => (t._1.asJava, t._2.asJava))
}
/**
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
*/
def cogroup[W](
other: JavaPairDStream[K, W],
numPartitions: Int
): JavaPairDStream[K, (JIterable[V], JIterable[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
dstream.cogroup(other.dstream, numPartitions).mapValues(t => (t._1.asJava, t._2.asJava))
}
/**
* Return a new DStream by applying 'cogroup' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
*/
def cogroup[W](
other: JavaPairDStream[K, W],
partitioner: Partitioner
): JavaPairDStream[K, (JIterable[V], JIterable[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
dstream.cogroup(other.dstream, partitioner).mapValues(t => (t._1.asJava, t._2.asJava))
}
/**
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
*/
def join[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (V, W)] = {
implicit val cm: ClassTag[W] = fakeClassTag
dstream.join(other.dstream)
}
/**
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
*/
def join[W](other: JavaPairDStream[K, W], numPartitions: Int): JavaPairDStream[K, (V, W)] = {
implicit val cm: ClassTag[W] = fakeClassTag
dstream.join(other.dstream, numPartitions)
}
/**
* Return a new DStream by applying 'join' between RDDs of `this` DStream and `other` DStream.
* The supplied org.apache.spark.Partitioner is used to control the partitioning of each RDD.
*/
def join[W](
other: JavaPairDStream[K, W],
partitioner: Partitioner
): JavaPairDStream[K, (V, W)] = {
implicit val cm: ClassTag[W] = fakeClassTag
dstream.join(other.dstream, partitioner)
}
/**
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with Spark's default
* number of partitions.
*/
def leftOuterJoin[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (V, Optional[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.leftOuterJoin(other.dstream)
joinResult.mapValues{case (v, w) => (v, JavaUtils.optionToOptional(w))}
}
/**
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
*/
def leftOuterJoin[W](
other: JavaPairDStream[K, W],
numPartitions: Int
): JavaPairDStream[K, (V, Optional[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.leftOuterJoin(other.dstream, numPartitions)
joinResult.mapValues{case (v, w) => (v, JavaUtils.optionToOptional(w))}
}
/**
* Return a new DStream by applying 'left outer join' between RDDs of `this` DStream and
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def leftOuterJoin[W](
other: JavaPairDStream[K, W],
partitioner: Partitioner
): JavaPairDStream[K, (V, Optional[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.leftOuterJoin(other.dstream, partitioner)
joinResult.mapValues{case (v, w) => (v, JavaUtils.optionToOptional(w))}
}
/**
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with Spark's default
* number of partitions.
*/
def rightOuterJoin[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (Optional[V], W)] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.rightOuterJoin(other.dstream)
joinResult.mapValues{case (v, w) => (JavaUtils.optionToOptional(v), w)}
}
/**
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
*/
def rightOuterJoin[W](
other: JavaPairDStream[K, W],
numPartitions: Int
): JavaPairDStream[K, (Optional[V], W)] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.rightOuterJoin(other.dstream, numPartitions)
joinResult.mapValues{case (v, w) => (JavaUtils.optionToOptional(v), w)}
}
/**
* Return a new DStream by applying 'right outer join' between RDDs of `this` DStream and
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def rightOuterJoin[W](
other: JavaPairDStream[K, W],
partitioner: Partitioner
): JavaPairDStream[K, (Optional[V], W)] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.rightOuterJoin(other.dstream, partitioner)
joinResult.mapValues{case (v, w) => (JavaUtils.optionToOptional(v), w)}
}
/**
* Return a new DStream by applying 'full outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with Spark's default
* number of partitions.
*/
def fullOuterJoin[W](other: JavaPairDStream[K, W])
: JavaPairDStream[K, (Optional[V], Optional[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.fullOuterJoin(other.dstream)
joinResult.mapValues{ case (v, w) =>
(JavaUtils.optionToOptional(v), JavaUtils.optionToOptional(w))
}
}
/**
* Return a new DStream by applying 'full outer join' between RDDs of `this` DStream and
* `other` DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
*/
def fullOuterJoin[W](
other: JavaPairDStream[K, W],
numPartitions: Int
): JavaPairDStream[K, (Optional[V], Optional[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.fullOuterJoin(other.dstream, numPartitions)
joinResult.mapValues{ case (v, w) =>
(JavaUtils.optionToOptional(v), JavaUtils.optionToOptional(w))
}
}
/**
* Return a new DStream by applying 'full outer join' between RDDs of `this` DStream and
* `other` DStream. The supplied org.apache.spark.Partitioner is used to control
* the partitioning of each RDD.
*/
def fullOuterJoin[W](
other: JavaPairDStream[K, W],
partitioner: Partitioner
): JavaPairDStream[K, (Optional[V], Optional[W])] = {
implicit val cm: ClassTag[W] = fakeClassTag
val joinResult = dstream.fullOuterJoin(other.dstream, partitioner)
joinResult.mapValues{ case (v, w) =>
(JavaUtils.optionToOptional(v), JavaUtils.optionToOptional(w))
}
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsHadoopFiles(prefix: String, suffix: String) {
dstream.saveAsHadoopFiles(prefix, suffix)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsHadoopFiles[F <: OutputFormat[_, _]](
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F]) {
dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsHadoopFiles[F <: OutputFormat[_, _]](
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F],
conf: JobConf) {
dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles(prefix: String, suffix: String) {
dstream.saveAsNewAPIHadoopFiles(prefix, suffix)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[_, _]](
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F]) {
dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[_, _]](
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F],
conf: Configuration = dstream.context.sparkContext.hadoopConfiguration) {
dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf)
}
/** Convert to a JavaDStream */
def toJavaDStream(): JavaDStream[(K, V)] = {
new JavaDStream[(K, V)](dstream)
}
override val classTag: ClassTag[(K, V)] = fakeClassTag
}
object JavaPairDStream {
implicit def fromPairDStream[K: ClassTag, V: ClassTag](dstream: DStream[(K, V)])
: JavaPairDStream[K, V] = {
new JavaPairDStream[K, V](dstream)
}
def fromJavaDStream[K, V](dstream: JavaDStream[(K, V)]): JavaPairDStream[K, V] = {
implicit val cmk: ClassTag[K] = fakeClassTag
implicit val cmv: ClassTag[V] = fakeClassTag
new JavaPairDStream[K, V](dstream.dstream)
}
def scalaToJavaLong[K: ClassTag](dstream: JavaPairDStream[K, Long])
: JavaPairDStream[K, JLong] = {
DStream.toPairDStreamFunctions(dstream.dstream).mapValues(new JLong(_))
}
}
| chenc10/Spark-PAF | streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaPairDStream.scala | Scala | apache-2.0 | 37,846 |
package com.twitter.finagle.service
import org.junit.runner.RunWith
import org.mockito.Mockito.{verify, when, times}
import org.mockito.Matchers._
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import com.twitter.finagle.{WriteException, Service, Status}
import com.twitter.util.{Await, Promise, Future}
@RunWith(classOf[JUnitRunner])
class CloseOnReleaseServiceTest extends FunSuite with MockitoSugar {
class Helper {
val service = mock[Service[Any, Any]]
when(service.close(any)) thenReturn Future.Done
val promise = new Promise[Any]
when(service(any)) thenReturn promise
when(service.status) thenReturn Status.Open
val wrapper = new CloseOnReleaseService(service)
}
test("only call release on the underlying service once") {
val h = new Helper
import h._
assert(wrapper.isAvailable)
verify(service, times(1)).status
wrapper.close()
verify(service, times(1)).close(any)
wrapper.close()
verify(service, times(1)).close(any)
assert(!wrapper.isAvailable)
verify(service, times(1)).status
}
test("throw a write exception if we attempt to use a released service") {
val h = new Helper
import h._
wrapper.close()
intercept[WriteException] {
Await.result(wrapper(132))
}
}
}
| mkhq/finagle | finagle-core/src/test/scala/com/twitter/finagle/service/CloseOnReleaseServiceTest.scala | Scala | apache-2.0 | 1,342 |
package net.atos.kjc.fruitshop.checkoutsystem
import org.scalatest.{FreeSpec, Matchers}
class CheckoutSpec extends FreeSpec with Matchers {
val checkoutName = "Eco Fruit shop checkout system"
"Checkout system should" - {
"Present right name" - {
Checkout.name shouldBe checkoutName
}
}
}
| kjcaputa/hmrc-test | src/test/scala/net/atos/kjc/fruitshop/checkoutsystem/Checkout.scala | Scala | apache-2.0 | 311 |
package org.refptr.iscala
trait Compatibility
trait InterpreterCompatibility extends Compatibility { self: Interpreter => }
| nkhuyu/IScala | src/main/scala_2.11/Compatibility.scala | Scala | mit | 125 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.ast
import org.neo4j.cypher.internal.frontend.v2_3.DummyPosition
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
class MultiplyTest extends InfixExpressionTestBase(Multiply(_, _)(DummyPosition(0))) {
// Infix specializations:
// 1 * 1 => 1
// 1 * 1.1 => 1.1
// 1.1 * 1 => 1.1
// 1.1 * 1.1 => 1.21
test("shouldHandleAllSpecializations") {
testValidTypes(CTInteger, CTInteger)(CTInteger)
testValidTypes(CTInteger, CTFloat)(CTFloat)
testValidTypes(CTFloat, CTInteger)(CTFloat)
testValidTypes(CTFloat, CTFloat)(CTFloat)
}
test("shouldHandleCombinedSpecializations") {
testValidTypes(CTFloat | CTInteger, CTFloat | CTInteger)(CTFloat | CTInteger)
}
test("shouldFailTypeCheckWhenAddingIncompatible") {
testInvalidApplication(CTInteger, CTBoolean)(
"Type mismatch: expected Float or Integer but was Boolean"
)
testInvalidApplication(CTBoolean, CTInteger)(
"Type mismatch: expected Float or Integer but was Boolean"
)
}
}
| HuangLS/neo4j | community/cypher/frontend-2.3/src/test/scala/org/neo4j/cypher/internal/frontend/v2_3/ast/MultiplyTest.scala | Scala | apache-2.0 | 1,843 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.server.actors
import java.util.concurrent.{CancellationException, Executor}
import akka.actor.{Actor, ActorRef, Props}
import com.stratio.common.utils.concurrent.Cancellable
import com.stratio.crossdata.common.result.{ErrorSQLResult, SuccessfulSQLResult}
import com.stratio.crossdata.common.{QueryCancelledReply, SQLCommand, SQLReply}
import com.stratio.crossdata.server.actors.JobActor.Commands.{CancelJob, GetJobStatus, StartJob}
import com.stratio.crossdata.server.actors.JobActor.Events.{JobCompleted, JobFailed}
import com.stratio.crossdata.server.actors.JobActor.{ProlificExecutor, Task}
import org.apache.log4j.Logger
import org.apache.spark.sql.crossdata.{XDContext, XDDataFrame, XDSession}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, ExecutionException}
import scala.util.{Failure, Success}
object JobActor {
trait JobStatus
object JobStatus {
case object Idle extends JobStatus
case object Running extends JobStatus
case object Completed extends JobStatus
case object Cancelled extends JobStatus
case class Failed(reason: Throwable) extends JobStatus
}
trait JobEvent
object Events {
case object JobCompleted extends JobEvent
case class JobFailed(err: Throwable) extends JobEvent
}
object Commands {
trait JobCommand
case object GetJobStatus
case object CancelJob
case object StartJob
}
case class Task(command: SQLCommand, requester: ActorRef, timeout: Option[FiniteDuration])
/**
* The [[JobActor]] state is directly given by the running task which can be: None (Idle st) or a Running, Completed,
* Cancelled or Failed task.
* @param runningTask [[Cancellable]] wrapping a [[scala.concurrent.Future]] which acts as a Spark driver.
*/
case class State(runningTask: Option[Cancellable[SQLReply]]) {
import JobStatus._
def getStatus: JobStatus = runningTask map { task =>
task.future.value map {
case Success(_) => Completed
case Failure(_: CancellationException) => Cancelled
case Failure(err) => Failed(err)
} getOrElse Running
} getOrElse Idle
}
def props(xdSession: XDSession, command: SQLCommand, requester: ActorRef, timeout: Option[FiniteDuration]): Props =
Props(new JobActor(xdSession, Task(command, requester, timeout)))
/**
* Executor class which runs each command in a brand new thread each time
*/
class ProlificExecutor extends Executor { override def execute(command: Runnable): Unit = new Thread(command) start }
}
class JobActor(
val xdContext: XDContext,
val task: Task
) extends Actor {
import JobActor.JobStatus._
import JobActor.State
import task._
lazy val logger = Logger.getLogger(classOf[ServerActor])
override def receive: Receive = receive(State(None))
private def receive(st: State): Receive = {
// Commands
case StartJob if st.getStatus == Idle =>
logger.debug(s"Starting Job under ${context.parent.path}")
import context.dispatcher
val runningTask = launchTask
runningTask.future onComplete {
case Success(queryRes) =>
requester ! queryRes
self ! JobCompleted
case Failure(_: CancellationException) => // Job cancellation
requester ! QueryCancelledReply(command.requestId)
self ! JobCompleted
case Failure(e: ExecutionException) => self ! JobFailed(e.getCause) // Spark exception
case Failure(reason) => self ! JobFailed(reason) // Job failure
}
val isRunning = runningTask.future.value.isEmpty
timeout.filter(_ => isRunning).foreach {
context.system.scheduler.scheduleOnce(_, self, CancelJob)
}
context.become(receive(st.copy(runningTask = Some(runningTask))))
case CancelJob =>
st.runningTask.foreach{ tsk =>
logger.debug(s"Cancelling ${self.path}'s task ")
tsk.cancel()
}
case GetJobStatus =>
sender ! st.getStatus
// Events
case event @ JobFailed(e) if sender == self =>
logger.debug(s"Task failed at ${self.path}")
context.parent ! event
requester ! SQLReply(command.requestId, ErrorSQLResult(e.getMessage, Some(new Exception(e.getMessage))))
throw e //Let It Crash: It'll be managed by its supervisor
case JobCompleted if sender == self =>
logger.debug(s"Completed or cancelled ${self.path} task")
context.parent ! JobCompleted
}
private def launchTask: Cancellable[SQLReply] = {
implicit val _: ExecutionContext = ExecutionContext.fromExecutor(new ProlificExecutor)
Cancellable {
val df = xdContext.sql(command.sql)
val rows = if (command.flattenResults)
df.asInstanceOf[XDDataFrame].flattenedCollect() //TODO: Replace this cast by an implicit conversion
else df.collect()
SQLReply(command.requestId, SuccessfulSQLResult(rows, df.schema))
}
}
}
| pmadrigal/Crossdata | server/src/main/scala/com/stratio/crossdata/server/actors/JobActor.scala | Scala | apache-2.0 | 5,627 |
package com.github.mijicd.waes.domain
import com.github.mijicd.waes.TestSpec
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class DecoderSpec extends TestSpec with Decoder {
"Decoder" should "decode base64 content" in {
val content ="dGVzdA=="
decode(content) should equal("test")
}
it should "throw an exception given malformed data" in {
intercept[IllegalArgumentException] {
val content = "dGVzdA="
decode(content) should equal("test")
}
}
}
| mijicd/spray-json-diff | src/test/scala/com/github/mijicd/waes/domain/DecoderSpec.scala | Scala | mit | 539 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import com.intellij.psi.PsiElement
import types.ScType
import lexer.ScalaTokenTypes
import types.nonvalue.Parameter
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
/**
* This class can be used in two ways:
* 1. foo(a, b, c)
* 2. foo {expr}
* In second way there is no parentheses, just one block expression.
*/
trait ScArgumentExprList extends ScArguments {
/**
* Expressions applied to appropriate method call (@see ScMethodCall).
*/
def exprs: Seq[ScExpression] = collection.immutable.Seq(findChildrenByClassScala(classOf[ScExpression]).toSeq: _*)
//TODO java helper (should be removed later)
def exprsArray = exprs.toArray
/**
* Number of clause.
* For example: foo()()'()'()
* then this method return 3.
*/
def invocationCount: Int
/**
* Reference from which started to invoke method calls.
*/
def callReference: Option[ScReferenceExpression]
/**
* Expression from which we try to invoke call, or apply method.
*/
def callExpression: ScExpression
/**
* Generic call for this argument list if exist
*/
def callGeneric: Option[ScGenericCall]
/**
* Mapping from argument expressions to corresponding parameters, as found during
* applicability checking.
*/
def matchedParameters: Option[Seq[(ScExpression, Parameter)]]
def parameterOf(argExpr: ScExpression): Option[Parameter] = matchedParameters.flatMap {
case params =>
argExpr match {
case a: ScAssignStmt =>
params.find(_._1 == argExpr).map(_._2).orElse(parameterOf(a.getRExpression.getOrElse(return None)))
case _ => params.find(_._1 == argExpr).map(_._2)
}
}
/**
* Return possible applications without using resolve of reference to this call (to avoid SOE)
*/
def possibleApplications: Array[Array[(String, ScType)]]
def missedLastExpr: Boolean = {
var child = getLastChild
while (child != null && child.getNode.getElementType != ScalaTokenTypes.tCOMMA) {
if (child.isInstanceOf[ScExpression]) return false
child = child.getPrevSibling
}
child != null && child.getNode.getElementType == ScalaTokenTypes.tCOMMA
}
def addExpr(expr: ScExpression): ScArgumentExprList
def addExprAfter(expr: ScExpression, anchor: PsiElement): ScArgumentExprList
def isBraceArgs: Boolean = findChild(classOf[ScBlock]) != None
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScArgumentExprList.scala | Scala | apache-2.0 | 2,515 |
/*
* The MIT License
*
* Copyright 2015 misakura.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package jp.gr.java_conf.kgd.library.water.scala.core.value
import scala.math.Numeric
import jp.gr.java_conf.kgd.library.water.scala.core.util.DefaultValue._
case class SimplePoint4[@specialized T: Numeric](override var x: T, override var y: T, override var z: T, override var w: T) extends MutablePoint4[T] {
def this(point: Point4[_ <: T]) = this(point.x, point.y, point.z, point.w)
def this() = this(default[T], default[T], default[T], default[T])
override def getXAsInt(): Int = implicitly[Numeric[T]].toInt(x)
override def getYAsInt(): Int = implicitly[Numeric[T]].toInt(y)
override def getZAsInt(): Int = implicitly[Numeric[T]].toInt(z)
override def getWAsInt(): Int = implicitly[Numeric[T]].toInt(w)
override def getXAsLong(): Long = implicitly[Numeric[T]].toLong(x)
override def getYAsLong(): Long = implicitly[Numeric[T]].toLong(y)
override def getZAsLong(): Long = implicitly[Numeric[T]].toLong(z)
override def getWAsLong(): Long = implicitly[Numeric[T]].toLong(w)
override def getXAsDouble(): Double = implicitly[Numeric[T]].toDouble(x)
override def getYAsDouble(): Double = implicitly[Numeric[T]].toDouble(y)
override def getZAsDouble(): Double = implicitly[Numeric[T]].toDouble(z)
override def getWAsDouble(): Double = implicitly[Numeric[T]].toDouble(w)
}
| t-kgd/library-water | water-scala-core/src/main/scala/jp/gr/java_conf/kgd/library/water/scala/core/value/SimplePoint4.scala | Scala | mit | 2,446 |
/*
* Copyright © 2015 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package co.cask.cdap.examples.loganalysis
import java.util.HashMap
import java.util.concurrent.TimeUnit
import co.cask.cdap.api.common.Bytes
import co.cask.cdap.api.dataset.lib.TimePartitionedFileSetArguments
import co.cask.cdap.api.spark.{SparkExecutionContext, SparkMain}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConversions._
/**
* A spark program which counts the total number of responses for every unique response code
*/
class ResponseCounterProgram extends SparkMain {
override def run(implicit sec: SparkExecutionContext): Unit = {
val sc = new SparkContext
val endTime = sec.getLogicalStartTime
val startTime = endTime - TimeUnit.MINUTES.toMillis(60)
val logsData: RDD[String] = sc.fromStream(LogAnalysisApp.LOG_STREAM, startTime, endTime)
val parsedLogs: RDD[ApacheAccessLog] = logsData.map(x => ApacheAccessLog.parseFromLogLine(x))
parsedLogs
.map(x => (x.getResponseCode, 1L))
.reduceByKey(_ + _)
.map(x => (Bytes.toBytes(x._1), Bytes.toBytes(x._2)))
.saveAsDataset(LogAnalysisApp.RESPONSE_COUNT_STORE)
val outputArgs = new HashMap[String, String]()
TimePartitionedFileSetArguments.setOutputPartitionTime(outputArgs, endTime)
parsedLogs
.map(x => (x.getIpAddress, 1L))
.reduceByKey(_ + _)
.saveAsDataset(LogAnalysisApp.REQ_COUNT_STORE, outputArgs.toMap)
}
}
/**
* Companion object for holding static fields
*/
object ResponseCounterProgram {
private val LOG: Logger = LoggerFactory.getLogger(classOf[ResponseCounterProgram])
} | caskdata/cdap | cdap-examples/LogAnalysis/src/main/scala/co/cask/cdap/examples/loganalysis/ResponseCounterProgram.scala | Scala | apache-2.0 | 2,235 |
package se.ramn.bottfarmen.runner.screen
import com.badlogic.gdx.Gdx
import com.badlogic.gdx.Game
import com.badlogic.gdx.Screen
import com.badlogic.gdx.graphics.GL20
import com.badlogic.gdx.graphics.OrthographicCamera
import se.ramn.bottfarmen.runner.BottfarmenGuiRunner
class MainMenuScreen(val game: BottfarmenGuiRunner) extends ScreenWithVoidImpl {
val camera = new OrthographicCamera
camera.setToOrtho(false, game.width, game.height)
def render(delta: Float): Unit = {
Gdx.gl.glClearColor(0, 0, 0.2f, 1)
Gdx.gl.glClear(GL20.GL_COLOR_BUFFER_BIT)
camera.update()
game.batch.setProjectionMatrix(camera.combined)
game.batch.begin()
game.font.draw(game.batch, "Welcome to Bottfarmen! ", 100, 150)
game.font.draw(game.batch, "Click anywhere to begin", 100, 100)
game.batch.end()
if (Gdx.input.isTouched) {
game.setScreen(new GameScreen(game))
dispose()
}
}
}
| ramn/bottfarmen | common/src/main/scala/runner/screen/MainMenuScreen.scala | Scala | gpl-3.0 | 927 |
package services.analysis
import models.analysis.events.AnalysisResults.{DatingResult, GenericResult}
import models.analysis.events.{Analysis, AnalysisCollection}
import models.analysis.events.EventCategories.Genetic
import no.uio.musit.models.{ActorId, EventId}
import no.uio.musit.models.MuseumCollections.Archeology
import no.uio.musit.security.{AuthenticatedUser, SessionUUID, UserInfo, UserSession}
import no.uio.musit.time.dateTimeNow
import no.uio.musit.test.MusitSpecWithAppPerSuite
import no.uio.musit.test.matchers.{DateTimeMatchers, MusitResultValues}
import org.scalatest.Inspectors.forAll
import org.scalatest.OptionValues
import utils.{AnalysisGenerators, AnalysisValidators}
class AnalysisServiceSpec
extends MusitSpecWithAppPerSuite
with DateTimeMatchers
with MusitResultValues
with OptionValues
with AnalysisGenerators
with AnalysisValidators {
private val defaultUserId = ActorId.generate()
implicit val dummyUser = AuthenticatedUser(
session = UserSession(uuid = SessionUUID.generate()),
userInfo = UserInfo(
id = defaultUserId,
secondaryIds = Some(Seq("[email protected]")),
name = Some("Darth Vader"),
email = None,
picture = None
),
groups = Seq.empty
)
val service = fromInstanceCache[AnalysisService]
"The AnalysisService" should {
"return all known event types" in {
val res = service.getAllTypes.futureValue.successValue
res.size mustBe 107
}
"return all known event types for a given event category" in {
val res = service.getTypesFor(Genetic).futureValue.successValue
res.size mustBe 7
}
"return all known event types for a museum collection" in {
val res = service.getTypesFor(Archeology.uuid).futureValue.successValue
res.size mustBe 83
}
"successfully add a new Analysis" in {
val cmd = dummySaveAnalysisCmd()
service.add(cmd).futureValue.successValue mustBe EventId(1L)
}
"successfully add a new AnalysisCollection" in {
val cmd = dummySaveAnalysisCollectionCmd(oids = Seq(oid1, oid2, oid3))
service.add(cmd).futureValue.successValue mustBe EventId(2L)
}
"return an analysis by its EventId" in {
val res = service.findById(EventId(1L)).futureValue.successValue.value
res.analysisTypeId mustBe dummyAnalysisTypeId
res.doneBy mustBe Some(dummyActorId)
res.doneDate mustApproximate Some(dateTimeNow)
res.note mustBe Some("This is from a SaveAnalysis command")
res.objectId must not be empty
res.administrator mustBe Some(dummyActorId)
res.responsible mustBe Some(dummyActorId)
res.completedBy mustBe empty
res.completedDate mustBe empty
}
"return all child Analysis events for an AnalyisCollection" in {
val res = service.childrenFor(EventId(2L)).futureValue.successValue
res.size mustBe 3
forAll(res) { r =>
r.analysisTypeId mustBe dummyAnalysisTypeId
r.doneBy mustBe Some(dummyActorId)
r.doneDate mustApproximate Some(dateTimeNow)
r.note mustBe Some("This is from a SaveAnalysisCollection command")
r.objectId must not be empty
r.administrator mustBe Some(dummyActorId)
r.responsible mustBe Some(dummyActorId)
r.completedBy mustBe empty
r.completedDate mustBe empty
}
}
"return all analysis events associated with the given ObjectUUID" in {
val res = service.findByObject(oid1).futureValue.successValue
res.size mustBe 2
forAll(res) { r =>
r.analysisTypeId mustBe dummyAnalysisTypeId
r.doneBy mustBe Some(dummyActorId)
r.doneDate mustApproximate Some(dateTimeNow)
r.note must not be empty
r.note.value must startWith("This is from a SaveAnalysis")
r.objectId must not be empty
r.administrator mustBe Some(dummyActorId)
r.responsible mustBe Some(dummyActorId)
r.completedBy mustBe empty
r.completedDate mustBe empty
}
}
"successfully add a result to an Analysis" in {
val gr = dummyGenericResult(
extRef = Some(Seq("foobar", "fizzbuzz")),
comment = Some("This is a generic result")
)
service.addResult(EventId(1L), gr).futureValue.successValue mustBe EventId(1L)
val ares = service.findById(EventId(1L)).futureValue.successValue.value
ares match {
case a: Analysis =>
a.result must not be empty
validateResult(a.result.value, gr, Some(defaultUserId), Some(dateTimeNow))
case other =>
fail(s"Expected an ${classOf[Analysis]} but got ${other.getClass}")
}
}
"successfully add a result to an AnalysisCollection and its children" in {
val dr = dummyDatingResult(
extRef = Some(Seq("foobar", "fizzbuzz")),
comment = Some("This is a generic result"),
age = Some("really old")
)
service.addResult(EventId(2L), dr).futureValue.successValue mustBe EventId(2L)
val ares = service.findById(EventId(2L)).futureValue.successValue.value
ares match {
case a: AnalysisCollection =>
a.result must not be empty
a.result.value match {
case r: DatingResult =>
validateResult(r, dr, Some(defaultUserId), Some(dateTimeNow))
case boo =>
fail(s"Expected a ${classOf[DatingResult]} but got ${boo.getClass}")
}
forAll(a.events)(_.result mustBe empty)
case other =>
fail(s"Expected an ${classOf[AnalysisCollection]} but got ${other.getClass}")
}
}
"successfully update the result for an Analysis" in {
val eid = EventId(1L)
val orig = service.findById(eid).futureValue.successValue.value
orig mustBe an[Analysis]
val origRes = orig.asInstanceOf[Analysis].result.value
origRes mustBe a[GenericResult]
val upd = origRes.asInstanceOf[GenericResult].copy(comment = Some("updated"))
service.updateResult(eid, upd).futureValue.isSuccess mustBe true
val updRes = service.findById(eid).futureValue.successValue.value
updRes mustBe an[Analysis]
updRes.asInstanceOf[Analysis].result.value match {
case gr: GenericResult =>
gr mustBe upd
case err =>
fail(s"Expected ${classOf[GenericResult]}, got ${err.getClass}")
}
}
"successfully update the result for an AnalysisCollection" in {
val eid = EventId(2L)
val orig = service.findById(eid).futureValue.successValue.value
orig mustBe an[AnalysisCollection]
val origRes = orig.asInstanceOf[AnalysisCollection].result.value
origRes mustBe a[DatingResult]
val upd = origRes.asInstanceOf[DatingResult].copy(comment = Some("updated"))
service.updateResult(eid, upd).futureValue.isSuccess mustBe true
val updRes = service.findById(eid).futureValue.successValue.value
updRes mustBe an[AnalysisCollection]
updRes.asInstanceOf[AnalysisCollection].result.value match {
case gr: DatingResult =>
gr mustBe upd
case err =>
fail(s"Expected ${classOf[GenericResult]}, got ${err.getClass}")
}
}
"successfully update an Analysis" in {
val expectedId = EventId(6L)
val cmd = dummySaveAnalysisCmd()
service.add(cmd).futureValue.successValue mustBe expectedId
val updCmd = cmd.copy(note = Some("This is an updated note"))
val res = service.update(defaultMid, expectedId, updCmd).futureValue.successValue
res must not be empty
res.value match {
case a: Analysis =>
a.note mustBe updCmd.note
a.updatedBy mustBe Some(defaultUserId)
a.updatedDate mustApproximate Some(dateTimeNow)
case other =>
fail(s"Expected an ${classOf[Analysis]} but got ${other.getClass}")
}
}
"successfully update an AnalysisCollection" in {
val expectedId = EventId(7L)
val cmd = dummySaveAnalysisCollectionCmd()
service.add(cmd).futureValue.successValue mustBe expectedId
val updCmd = cmd.copy(note = Some("This is an updated note"))
val res = service.update(defaultMid, expectedId, updCmd).futureValue.successValue
res must not be empty
res.value match {
case a: AnalysisCollection =>
a.note mustBe updCmd.note
a.updatedBy mustBe Some(defaultUserId)
a.updatedDate mustApproximate Some(dateTimeNow)
case other =>
fail(s"Expected an ${classOf[Analysis]} but got ${other.getClass}")
}
}
}
}
| kpmeen/musit | service_management/test/services/analysis/AnalysisServiceSpec.scala | Scala | gpl-2.0 | 8,648 |
package extracells.integration.opencomputers
import appeng.api.AEApi
import appeng.api.config.Actionable
import appeng.api.implementations.tiles.IWirelessAccessPoint
import appeng.api.networking.security.MachineSource
import appeng.api.networking.storage.IStorageGrid
import appeng.api.networking.{IGrid, IGridHost, IGridNode}
import appeng.api.storage.IMEMonitor
import appeng.api.storage.data.{IAEFluidStack, IAEItemStack}
import appeng.api.util.WorldCoord
import appeng.tile.misc.TileSecurity
import li.cil.oc.api.Network
import li.cil.oc.api.driver.EnvironmentHost
import li.cil.oc.api.internal.{Agent, Database, Drone, Robot}
import li.cil.oc.api.machine.{Arguments, Callback, Context}
import li.cil.oc.api.network._
import li.cil.oc.api.prefab.ManagedEnvironment
import li.cil.oc.integration.{appeng, ec}
import li.cil.oc.server.network.Component
import net.minecraft.item.ItemStack
import net.minecraftforge.common.util.ForgeDirection
import net.minecraftforge.fluids.FluidContainerRegistry
import scala.collection.JavaConversions._
class UpgradeAE(host: EnvironmentHost) extends ManagedEnvironment with appeng.NetworkControl[TileSecurity] with ec.NetworkControl[TileSecurity]{
val robot: Robot =
if (host.isInstanceOf[Robot])
host.asInstanceOf[Robot]
else
null
val drone: Drone =
if (host.isInstanceOf[Drone])
host.asInstanceOf[Drone]
else
null
var isActive = false
val agent: Agent = host.asInstanceOf[Agent]
setNode(Network.newNode(this, Visibility.Network).withConnector().withComponent("upgrade_me", Visibility.Neighbors).create());
def getComponent: ItemStack = {
if (robot != null)
return robot.getStackInSlot(robot.componentSlot(node.address))
else if(drone != null){
val i = drone.internalComponents.iterator
while (i.hasNext){
val item = i.next
if(item != null && item.getItem == ItemUpgradeAE)
return item
}
}
null
}
def getSecurity: IGridHost = {
if (host.world.isRemote) return null
val component = getComponent
val sec = AEApi.instance.registries.locatable.getLocatableBy(getAEKey(component)).asInstanceOf[IGridHost]
if(checkRange(component, sec))
sec
else
null
}
def checkRange(stack: ItemStack, sec: IGridHost): Boolean = {
if (sec == null) return false
val gridNode: IGridNode = sec.getGridNode(ForgeDirection.UNKNOWN)
if (gridNode == null) return false
val grid = gridNode.getGrid
if(grid == null) return false
stack.getItemDamage match{
case 0 =>
grid.getMachines(AEApi.instance.definitions.blocks.wireless.maybeEntity.get.asInstanceOf[Class[_ <: IGridHost]]).iterator.hasNext
case 1 =>
val gridBlock = gridNode.getGridBlock
if (gridBlock == null) return false
val loc = gridBlock.getLocation
if (loc == null) return false
for (node <- grid.getMachines(AEApi.instance.definitions.blocks.wireless.maybeEntity.get.asInstanceOf[Class[_ <: IGridHost]])) {
val accessPoint: IWirelessAccessPoint = node.getMachine.asInstanceOf[IWirelessAccessPoint]
val distance: WorldCoord = accessPoint.getLocation.subtract(agent.xPosition.toInt, agent.yPosition.toInt, agent.zPosition.toInt)
val squaredDistance: Int = distance.x * distance.x + distance.y * distance.y + distance.z * distance.z
val range = accessPoint.getRange
if (squaredDistance <= range * range) return true
}
false
case _ =>
val gridBlock = gridNode.getGridBlock
if (gridBlock == null) return false
val loc = gridBlock.getLocation
if (loc == null) return false
for (node <- grid.getMachines(AEApi.instance.definitions.blocks.wireless.maybeEntity.get.asInstanceOf[Class[_ <: IGridHost]])) {
val accessPoint: IWirelessAccessPoint = node.getMachine.asInstanceOf[IWirelessAccessPoint]
val distance: WorldCoord = accessPoint.getLocation.subtract(agent.xPosition.toInt, agent.yPosition.toInt, agent.zPosition.toInt)
val squaredDistance: Int = distance.x * distance.x + distance.y * distance.y + distance.z * distance.z
val range = accessPoint.getRange / 2
if (squaredDistance <= range * range) return true
}
false
}
}
def getGrid: IGrid = {
if (host.world.isRemote) return null
val securityTerminal = getSecurity
if (securityTerminal == null) return null
val gridNode: IGridNode = securityTerminal.getGridNode(ForgeDirection.UNKNOWN)
if (gridNode == null) return null
gridNode.getGrid
}
def getAEKey(stack: ItemStack): Long = {
try {
return WirelessHandlerUpgradeAE.getEncryptionKey(stack).toLong
}
catch {
case ignored: Throwable => {
}
}
0L
}
override def tile: TileSecurity = {
val sec = getSecurity
if (sec == null)
throw new SecurityException("No Security Station")
val node = sec.getGridNode(ForgeDirection.UNKNOWN)
if (node == null) throw new SecurityException("No Security Station")
val gridBlock = node.getGridBlock
if (gridBlock == null) throw new SecurityException("No Security Station")
val coord = gridBlock.getLocation
if (coord == null) throw new SecurityException("No Security Station")
val tileSecurity = coord.getWorld.getTileEntity(coord.x, coord.y, coord.z).asInstanceOf[TileSecurity]
if (tileSecurity == null) throw new SecurityException("No Security Station")
tileSecurity
}
def getFluidInventory: IMEMonitor[IAEFluidStack] = {
val grid = getGrid
if (grid == null) return null
val storage: IStorageGrid = grid.getCache(classOf[IStorageGrid])
if (storage == null) return null
storage.getFluidInventory
}
def getItemInventory: IMEMonitor[IAEItemStack] = {
val grid = getGrid
if (grid == null) return null
val storage: IStorageGrid = grid.getCache(classOf[IStorageGrid])
if (storage == null) return null
storage.getItemInventory
}
@Callback(doc = "function([number:amount]):number -- Transfer selected items to your ae system.")
def sendItems(context: Context, args: Arguments): Array[AnyRef] = {
val selected = agent.selectedSlot
val invRobot = agent.mainInventory
if (invRobot.getSizeInventory <= 0) return Array(0.underlying.asInstanceOf[AnyRef])
val stack = invRobot.getStackInSlot(selected)
val inv = getItemInventory
if (stack == null || inv == null) return Array(0.underlying.asInstanceOf[AnyRef])
val amount = Math.min(args.optInteger(0, 64), stack.stackSize)
val stack2 = stack.copy
stack2.stackSize = amount
val notInjectet = inv.injectItems(AEApi.instance.storage.createItemStack(stack2), Actionable.MODULATE, new MachineSource(tile))
if (notInjectet == null){
stack.stackSize -= amount
if (stack.stackSize <= 0)
invRobot.setInventorySlotContents(selected, null)
else
invRobot.setInventorySlotContents(selected, stack)
return Array(amount.underlying.asInstanceOf[AnyRef])
}else{
stack.stackSize = stack.stackSize - amount + notInjectet.getStackSize.toInt
if (stack.stackSize <= 0)
invRobot.setInventorySlotContents(selected, null)
else
invRobot.setInventorySlotContents(selected, stack)
return Array((stack2.stackSize - notInjectet.getStackSize).underlying.asInstanceOf[AnyRef])
}
}
@Callback(doc = "function(database:address, entry:number[, number:amount]):number -- Get items from your ae system.")
def requestItems(context: Context, args: Arguments): Array[AnyRef] = {
val address = args.checkString(0)
val entry = args.checkInteger(1)
val amount = args.optInteger(2, 64)
val selected = agent.selectedSlot
val invRobot = agent.mainInventory
if (invRobot.getSizeInventory <= 0) return Array(0.underlying.asInstanceOf[AnyRef])
val inv = getItemInventory
println(inv)
if (inv == null) return Array(0.underlying.asInstanceOf[AnyRef])
val n: Node = node.network.node(address)
if (n == null) throw new IllegalArgumentException("no such component")
if (!(n.isInstanceOf[Component])) throw new IllegalArgumentException("no such component")
val component: Component = n.asInstanceOf[Component]
val env: Environment = n.host
if (!(env.isInstanceOf[Database])) throw new IllegalArgumentException("not a database")
val database: Database = env.asInstanceOf[Database]
val sel = invRobot.getStackInSlot(selected)
val inSlot =
if (sel == null)
0
else
sel.stackSize
val maxSize =
if (sel == null)
64
else
sel.getMaxStackSize
val stack = database.getStackInSlot(entry - 1)
if(stack == null) return Array(0.underlying.asInstanceOf[AnyRef])
stack.stackSize = Math.min(amount, maxSize - inSlot)
val stack2 = stack.copy
stack2.stackSize = 1
val sel2 =
if (sel != null) {
val sel3 = sel.copy
sel3.stackSize = 1
sel3
}else
null
if(sel != null && !ItemStack.areItemStacksEqual(sel2, stack2)) return Array(0.underlying.asInstanceOf[AnyRef])
val extracted = inv.extractItems(AEApi.instance.storage.createItemStack(stack), Actionable.MODULATE, new MachineSource(tile))
if(extracted == null) return Array(0.underlying.asInstanceOf[AnyRef])
val ext = extracted.getStackSize.toInt
stack.stackSize = inSlot + ext
invRobot.setInventorySlotContents(selected, stack)
Array(ext.underlying.asInstanceOf[AnyRef])
}
@Callback(doc = "function([number:amount]):number -- Transfer selecte fluid to your ae system.")
def sendFluids(context: Context, args: Arguments): Array[AnyRef] = {
val selected = agent.selectedTank
val tanks = agent.tank
if (tanks.tankCount <= 0) return Array(0.underlying.asInstanceOf[AnyRef])
val tank = tanks.getFluidTank(selected)
val inv = getFluidInventory
if (tank == null || inv == null || tank.getFluid == null) return Array(0.underlying.asInstanceOf[AnyRef])
val amount = Math.min(args.optInteger(0, tank.getCapacity), tank.getFluidAmount)
val fluid = tank.getFluid
val fluid2 = fluid.copy
fluid2.amount = amount
val notInjectet = inv.injectItems(AEApi.instance.storage.createFluidStack(fluid2), Actionable.MODULATE, new MachineSource(tile))
if (notInjectet == null){
tank.drain(amount, true)
Array(amount.underlying.asInstanceOf[AnyRef])
}else{
tank.drain(amount - notInjectet.getStackSize.toInt, true)
Array((amount - notInjectet.getStackSize).underlying.asInstanceOf[AnyRef])
}
}
@Callback(doc = "function(database:address, entry:number[, number:amount]):number -- Get fluid from your ae system.")
def requestFluids(context: Context, args: Arguments): Array[AnyRef] = {
val address = args.checkString(0)
val entry = args.checkInteger(1)
val amount = args.optInteger(2, FluidContainerRegistry.BUCKET_VOLUME)
val tanks = agent.tank
val selected = agent.selectedTank
if (tanks.tankCount <= 0) return Array(0.underlying.asInstanceOf[AnyRef])
val tank = tanks.getFluidTank(selected)
val inv = getFluidInventory
if (tank == null || inv == null) return Array(0.underlying.asInstanceOf[AnyRef])
val n: Node = node.network.node(address)
if (n == null) throw new IllegalArgumentException("no such component")
if (!(n.isInstanceOf[Component])) throw new IllegalArgumentException("no such component")
val component: Component = n.asInstanceOf[Component]
val env: Environment = n.host
if (!(env.isInstanceOf[Database])) throw new IllegalArgumentException("not a database")
val database: Database = env.asInstanceOf[Database]
val fluid = FluidContainerRegistry.getFluidForFilledItem(database.getStackInSlot(entry - 1))
fluid.amount = amount
val fluid2 = fluid.copy()
fluid2.amount = tank.fill(fluid, false)
if (fluid2.amount == 0) return Array(0.underlying.asInstanceOf[AnyRef])
val extracted = inv.extractItems(AEApi.instance.storage.createFluidStack(fluid2), Actionable.MODULATE, new MachineSource(tile))
if (extracted == 0) return Array(0.underlying.asInstanceOf[AnyRef])
Array(tank.fill(extracted.getFluidStack, true).underlying.asInstanceOf[AnyRef])
}
@Callback(doc = "function():boolean -- Return true if the card is linket to your ae network.")
def isLinked(context: Context, args: Arguments): Array[AnyRef] = {
val isLinked = getGrid != null
Array(boolean2Boolean(isLinked))
}
override def update() {
super.update()
if (host.world.getTotalWorldTime % 10 == 0 && isActive) {
if (!node.asInstanceOf[Connector].tryChangeBuffer(-getEnergy)) {
isActive = false
}
}
}
def getEnergy = {
val c = getComponent
if (c == null)
.0
else
c.getItemDamage match{
case 0 => .6
case 1 => .3
case _ => .05
}
}
override def onMessage(message: Message) {
super.onMessage(message)
if (message.name == "computer.stopped") {
isActive = false
}
else if (message.name == "computer.started") {
isActive = true
}
}
}
| ieatbedrock/Bedrocks-AE2-addons | src/main/scala/extracells/integration/opencomputers/UpgradeAE.scala | Scala | mit | 13,192 |
package spotlight.analysis
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try
import akka.actor.{ Actor, ActorRef, Props }
import akka.agent.Agent
import akka.event.LoggingReceive
import cats.data.Kleisli
import cats.instances.future._
import cats.instances.list._
import cats.syntax.traverse._
import com.persist.logging._
import com.typesafe.config.Config
import net.ceedubs.ficus.Ficus._
import demesne.{ AggregateRootType, BoundedContext, DomainModel, StartTask }
import omnibus.akka.envelope._
import omnibus.akka.metrics.InstrumentedActor
import omnibus.commons.{ AllIssuesOr, ErrorOr }
import omnibus.commons.concurrent._
import spotlight.Settings
import spotlight.analysis.algorithm._
import spotlight.analysis.algorithm.statistical._
import spotlight.model.outlier.AnalysisPlan
/** Created by rolfsd on 9/29/15.
*/
object DetectionAlgorithmRouter extends ClassLogging {
sealed trait RouterProtocol
case class RegisterAlgorithmReference( algorithm: String, handler: ActorRef ) extends RouterProtocol
case class RegisterAlgorithmRootType(
algorithm: String,
algorithmRootType: AggregateRootType,
model: DomainModel,
sharded: Boolean = true
) extends RouterProtocol
case class AlgorithmRegistered( algorithm: String ) extends RouterProtocol
val ContextKey = 'DetectionAlgorithmRouter
def startTask( configuration: Config )( implicit ec: ExecutionContext ): StartTask = {
StartTask.withBoundTask( "load user algorithms" ) { bc: BoundedContext ⇒
val t: Future[StartTask.Result] = {
for {
loaded ← Registry.loadAlgorithms( bc, configuration.resolve() )
registered ← Registry.registerWithRouter( loaded )
_ = log.info( Map( "@msg" → "starting with new algorithm routes", "loaded" → loaded.mkString( "[", ", ", "]" ) ) )
} yield {
log.info( Map( "@msg" → "DetectionAlgorithmRouter routing table", "routing-table" → registered.mkString( "[", ", ", "]" ) ) )
StartTask.Result( rootTypes = registered.values.toSet )
}
}
t.toTask
}
}
def props( plan: AnalysisPlan.Summary, routingTable: Map[String, AlgorithmRoute] ): Props = {
Props( new Default( plan, routingTable ) )
}
val DispatcherPath: String = OutlierDetection.DispatcherPath
def name( suffix: String ): String = "router" //"router:" + suffix //todo suffix is redundant given it's inclusion in foreman naming
trait Provider {
def initialRoutingTable: Map[String, AlgorithmRoute]
def plan: AnalysisPlan.Summary
}
private class Default(
override val plan: AnalysisPlan.Summary,
override val initialRoutingTable: Map[String, AlgorithmRoute]
) extends DetectionAlgorithmRouter with Provider
object Registry {
private lazy val algorithmRootTypes: Agent[Map[String, AggregateRootType]] = {
import scala.concurrent.ExecutionContext.Implicits.global
Agent( Map.empty[String, AggregateRootType] )
}
def registerWithRouter( algorithmRoots: List[( String, AggregateRootType )] ): Future[Map[String, AggregateRootType]] = {
//TODO: DMR: WORK HERE TO LOG
algorithmRootTypes alter { _ ++ algorithmRoots }
}
def rootTypeFor( algorithm: String )( implicit ec: ExecutionContext ): Option[AggregateRootType] = {
import scala.concurrent.duration._
unsafeRootTypeFor( algorithm ) orElse { scala.concurrent.Await.result( futureRootTypeFor( algorithm ), 30.seconds ) }
}
def unsafeRootTypeFor( algorithm: String ): Option[AggregateRootType] = {
log.debug(
Map(
"@msg" → "unsafe pull of algorithm root types",
"root-types" → algorithmRootTypes.get.keySet.mkString( "[", ", ", "]" )
)
)
algorithmRootTypes.get() get algorithm
}
def futureRootTypeFor( algorithm: String )( implicit ec: ExecutionContext ): Future[Option[AggregateRootType]] = {
algorithmRootTypes.future() map { rootTable ⇒
log.debug(
Map(
"@msg" → "safe pull of algorithm root types",
"root-types" → rootTable.keySet.mkString( "[", ", ", "]" )
)
)
rootTable get algorithm
}
}
type AlgorithmRootType = ( String, AggregateRootType )
type EC[_] = ExecutionContext
def loadAlgorithms[_: EC]( boundedContext: BoundedContext, configuration: Config ): Future[List[AlgorithmRootType]] = {
type AlgorithmClass = Class[_ <: Algorithm[_]]
type AlgorithmClasses = Map[String, AlgorithmClass]
def algorithmRootTypeFor( clazz: AlgorithmClass ): Future[AggregateRootType] = {
Future fromTry[AggregateRootType] {
Try {
import scala.reflect.runtime.{ universe ⇒ ru }
val loader = getClass.getClassLoader
val mirror = ru runtimeMirror loader
val algorithmSymbol = mirror moduleSymbol clazz
val algorithmMirror = mirror reflectModule algorithmSymbol
val algorithm = algorithmMirror.instance.asInstanceOf[Algorithm[_]]
algorithm.module.rootType
}
}
}
val userAlgorithms = Kleisli[Future, ( DomainModel, Config ), ( DomainModel, AlgorithmClasses )] {
case ( m, c ) ⇒ {
Settings
.userAlgorithmClassesFrom( c )
.map { as ⇒ Future successful ( m, as ) }
.valueOr { exs ⇒
exs map { ex ⇒ log.error( "loading user algorithm failed", ex ) }
Future failed exs.head
}
}
}
val collectRootTypes = Kleisli[Future, ( DomainModel, AlgorithmClasses ), List[AlgorithmRootType]] {
case ( m, acs ) ⇒
acs.toList traverse {
case ( a, c ) ⇒ {
algorithmRootTypeFor( c ) map { rt ⇒
log.debug(
Map(
"@msg" → "collecting unknown algorithms",
"algorithm" → a,
"fqcn" → c.getName,
"identified-root-type" → rt.toString,
"is-known" → m.rootTypes.contains( rt )
)
)
( a, rt )
}
}
}
}
val loadRootTypesIntoModel = Kleisli[Future, List[AlgorithmRootType], List[AlgorithmRootType]] { arts ⇒
boundedContext
.addAggregateTypes( arts.map( _._2 ).toSet )
.map { _ ⇒
log.debug(
Map(
"@msg" → "loaded algorithm root types into bounded context",
"algorithm root types" → arts.mkString( "[", ", ", "]" )
)
)
arts
}
}
// val registerWithRouter = kleisli[Future, List[AlgorithmRootType], List[AlgorithmRootType]] { arts ⇒
// for {
// registered ← Registry.registerWithRouter( arts )
// _ = log.debug( Map( "@msg" → "registered with router", "algorithm root types" → registered.mkString( "[", ", ", "]" ) ) )
// } yield registered.toList
// }
val addModel = Kleisli[Future, ( BoundedContext, Config ), ( DomainModel, Config )] {
case ( bc, c ) ⇒ bc.futureModel map { ( _, c ) }
}
val load = addModel andThen userAlgorithms andThen collectRootTypes andThen loadRootTypesIntoModel // andThen registerWithRouter
load.run( boundedContext, configuration )
}
}
}
class DetectionAlgorithmRouter extends Actor with EnvelopingActor with InstrumentedActor with ActorLogging {
provider: DetectionAlgorithmRouter.Provider ⇒
import DetectionAlgorithmRouter._
var routingTable: Map[String, AlgorithmRoute] = provider.initialRoutingTable
def addRoute( algorithm: String, resolver: AlgorithmRoute ): Unit = { routingTable += ( algorithm → resolver ) }
log.debug(
Map(
"@msg" → "created routing-table",
"self" → self.path.name,
"routing-table" → routingTable.mkString( "[", ", ", "]" )
)
)
def contains( algorithm: String ): Boolean = {
val found = routingTable contains algorithm
log.debug(
Map(
"@msg" → "looking for",
"self" → self.path.name,
"algorithm" → algorithm,
"routing-keys" → routingTable.keys.mkString( "[", ", ", "]" ),
"found" → found
)
)
found
}
val registration: Receive = {
case RegisterAlgorithmReference( algorithm, handler ) ⇒ {
addRoute( algorithm, AlgorithmRoute.DirectRoute( handler ) )
sender() !+ AlgorithmRegistered( algorithm )
}
case RegisterAlgorithmRootType( algorithm, algorithmRootType, model, true ) ⇒ {
log.debug(
Map(
"@msg" → "received RegisterAlgorithmRootType with sharding",
"algorithm" → algorithm,
"algorithm-root-type" → algorithmRootType.name,
"model" → model.toString
)
)
addRoute( algorithm, AlgorithmRoute.routeFor( plan, algorithmRootType )( model ) )
sender() !+ AlgorithmRegistered( algorithm )
}
case RegisterAlgorithmRootType( algorithm, algorithmRootType, model, _ ) ⇒ {
addRoute( algorithm, AlgorithmRoute.RootTypeRoute( plan, algorithmRootType, model ) )
sender() !+ AlgorithmRegistered( algorithm )
}
}
val routing: Receive = {
case m: DetectUsing if contains( m.algorithm ) ⇒ routingTable( m.algorithm ) forward m
}
override val receive: Receive = LoggingReceive { around( routing orElse registration ) }
override def unhandled( message: Any ): Unit = {
message match {
case m: DetectUsing ⇒ {
log.error(
Map(
"@msg" → "cannot route unregistered algorithm",
"algorithm" → m.algorithm,
"routing-keys" → routingTable.keySet.mkString( "[", ", ", "]" ),
"found" → contains( m.algorithm )
)
)
}
case m ⇒ {
log.error(
Map(
"@msg" → "router ignoring unrecognized message",
"message" → m.toString,
"routing-keys" → routingTable.keySet.mkString( "[", ", ", "]" )
)
)
}
}
}
}
| dmrolfs/lineup | core/src/main/scala/spotlight/analysis/DetectionAlgorithmRouter.scala | Scala | mit | 10,264 |
package org.powlab.jeye.decode.pattern.stream
import scala.collection.mutable.ArrayBuffer
import org.powlab.jeye.decode.graph.OpcodeDetails._
import org.powlab.jeye.decode.graph.GroupOpcodeNode
import org.powlab.jeye.decode.graph.OpcodeNode
import org.powlab.jeye.decode.graph.OpcodeTree
import org.powlab.jeye.decode.processor.load.LoadInformator._
import org.powlab.jeye.decode.processor.math.MathInformator.isIncrementNode
/**
* Inc + Load -> ++int
* Группа состоит из Inc + Load
*/
class IncLoadPreStreamPattern extends StreamPattern {
def details(resolvedNode: OpcodeNode, tree: OpcodeTree): OpcodeDetail = {
new IncDetail(DETAIL_INC_LOAD_PRE, resolvedNode.runtimeOpcode.values(1), false)
}
def resolve(loadNode: OpcodeNode, tree: OpcodeTree): GroupOpcodeNode = {
if (!isBaseLoadNode(loadNode) || tree.previewCount(loadNode) != 1) {
return null
}
val incNode = tree.preview(loadNode)
if (! isIncrementNode(incNode)) {
return null
}
if (getBaseLoadOpcodeIndex(loadNode.runtimeOpcode) != incNode.runtimeOpcode.values(0)) {
return null
}
val buffer = ArrayBuffer(incNode, loadNode)
val position = tree.nextPosition(incNode)
new GroupOpcodeNode(buffer, false, position, 0)
}
}
| powlab/jeye | src/main/scala/org/powlab/jeye/decode/pattern/stream/IncLoadPreStreamPattern.scala | Scala | apache-2.0 | 1,277 |
package com.signalcollect
import com.signalcollect.interfaces.AggregationOperation
import scala.collection.immutable.SortedMap
/** Container for the deliver and collect duration measurements */
case class ActivityTime(signal: Int, deliver: Int, collect: Int) extends Ordered[ActivityTime] {
override def toString: String =
f"signal: ${signal}ns, deliver: ${deliver}ns, collect: ${collect}ns"
def compare(that: ActivityTime) = {
(that.signal + that.deliver + that.collect) - (this.signal + this.deliver + this.collect)
}
}
/**
* Finds the vertices in the graph which were active for the longest duration
*
* @param n the number of top vertices to find
*/
class TopActivityAggregator[Id](n: Int)
extends AggregationOperation[SortedMap[ActivityTime, Id]] {
type ActivityMap = SortedMap[ActivityTime, Id]
def extract(v: Vertex[_, _, _, _]): ActivityMap = v match {
case t: Timeable[Id, _, _, _] =>
SortedMap((ActivityTime(t.signalTime, t.deliverTime, t.collectTime) -> t.id))
case _ =>
SortedMap[ActivityTime, Id]()
}
def reduce(activities: Stream[ActivityMap]): ActivityMap = {
activities.foldLeft(SortedMap[ActivityTime, Id]()) { (acc, m) => acc ++ m }.take(n)
}
}
/** Allows measuring how long a vertex stays in deliverSignal and collect*/
trait Timeable[Id, State, GraphIdUpperBound, GraphSignalUpperBound] extends Vertex[Id, State, GraphIdUpperBound, GraphSignalUpperBound] {
var signalTime: Int = 0
var deliverTime: Int = 0
var collectTime: Int = 0
def time[R](block: => R): (R, Int) = {
val t0 = System.nanoTime()
val result = block
val t1 = System.nanoTime()
(result, (t1 - t0).toInt)
}
abstract override def executeSignalOperation(graphEditor: GraphEditor[GraphIdUpperBound, GraphSignalUpperBound]): Unit = {
val (_, t) = time(super.executeSignalOperation(graphEditor))
signalTime += t
}
abstract override def deliverSignalWithSourceId(signal: GraphSignalUpperBound, sourceId: GraphIdUpperBound,
graphEditor: GraphEditor[GraphIdUpperBound, GraphSignalUpperBound]): Boolean = {
val (result, t) = time(super.deliverSignalWithSourceId(signal, sourceId, graphEditor))
deliverTime += t
result
}
abstract override def deliverSignalWithoutSourceId(signal: GraphSignalUpperBound,
graphEditor: GraphEditor[GraphIdUpperBound, GraphSignalUpperBound]): Boolean = {
val (result, t) = time(super.deliverSignalWithoutSourceId(signal, graphEditor))
deliverTime += t
result
}
abstract override def executeCollectOperation(graphEditor: GraphEditor[GraphIdUpperBound, GraphSignalUpperBound]): Unit = {
val (_, t) = time(super.executeCollectOperation(graphEditor))
collectTime += t
}
}
| danihegglin/DynDCO | src/main/scala/com/signalcollect/Timeable.scala | Scala | apache-2.0 | 2,720 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.tutorial
import scala.collection.mutable.ArrayBuffer
import java.io.File
import cc.factorie.app.nlp.lexicon.StopWords
import cc.factorie.app.strings.alphaSegmenter
import cc.factorie.directed._
import cc.factorie.variable._
/**
* LDA example using collapsed gibbs sampling; very flexible.
*/
object SimpleLDA {
val numTopics = 10
implicit val model = DirectedModel()
object ZDomain extends DiscreteDomain(numTopics)
object ZSeqDomain extends DiscreteSeqDomain { def elementDomain = ZDomain }
class Zs(len:Int) extends DiscreteSeqVariable(len) { def domain = ZSeqDomain }
object WordSeqDomain extends CategoricalSeqDomain[String]
val WordDomain = WordSeqDomain.elementDomain
class Words(strings:Seq[String]) extends CategoricalSeqVariable(strings) {
def domain = WordSeqDomain
def zs = model.parentFactor(this).asInstanceOf[PlatedCategoricalMixture.Factor]._3
}
class Document(val file:String, val theta:ProportionsVar, strings:Seq[String]) extends Words(strings)
val beta = MassesVariable.growableUniform(WordDomain, 0.1)
val alphas = MassesVariable.dense(numTopics, 0.1)
def main(args: Array[String]): Unit = {
implicit val random = new scala.util.Random(0)
val directories = if (args.length > 0) args.toList else List("12", "11", "10", "09", "08").take(1).map("/Users/mccallum/research/data/text/nipstxt/nips"+_)
val phis = Mixture(numTopics)(ProportionsVariable.growableDense(WordDomain) ~ Dirichlet(beta))
val documents = new ArrayBuffer[Document]
for (directory <- directories) {
for (file <- new File(directory).listFiles; if file.isFile) {
val theta = ProportionsVariable.dense(numTopics) ~ Dirichlet(alphas)
val tokens = alphaSegmenter(file).map(_.toLowerCase).filter(!StopWords.contains(_)).toSeq
val zs = new Zs(tokens.length) :~ PlatedDiscrete(theta)
documents += new Document(file.toString, theta, tokens) ~ PlatedCategoricalMixture(phis, zs)
}
}
val collapse = new ArrayBuffer[Var]
collapse += phis
collapse ++= documents.map(_.theta)
val sampler = new CollapsedGibbsSampler(collapse, model)
for (i <- 1 to 20) {
for (doc <- documents) sampler.process(doc.zs)
}
}
}
| digitalreasoning/factorie-scala-210 | src/main/scala/cc/factorie/tutorial/SimpleLDA.scala | Scala | apache-2.0 | 2,991 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import java.nio.file.Files
import java.util.Properties
import kafka.server.{BrokerTopicStats, LogDirFailureChannel}
import kafka.utils._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record._
import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, Test}
import scala.collection.mutable
/**
* Unit tests for the log cleaning logic
*/
class LogCleanerManagerTest extends Logging {
val tmpDir = TestUtils.tempDir()
val tmpDir2 = TestUtils.tempDir()
val logDir = TestUtils.randomPartitionLogDir(tmpDir)
val logDir2 = TestUtils.randomPartitionLogDir(tmpDir)
val topicPartition = new TopicPartition("log", 0)
val topicPartition2 = new TopicPartition("log2", 0)
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
logProps.put(LogConfig.SegmentIndexBytesProp, 1024: java.lang.Integer)
logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact)
val logConfig = LogConfig(logProps)
val time = new MockTime(1400000000000L, 1000L) // Tue May 13 16:53:20 UTC 2014 for `currentTimeMs`
val offset = 999
val cleanerCheckpoints: mutable.Map[TopicPartition, Long] = mutable.Map[TopicPartition, Long]()
class LogCleanerManagerMock(logDirs: Seq[File],
logs: Pool[TopicPartition, UnifiedLog],
logDirFailureChannel: LogDirFailureChannel) extends LogCleanerManager(logDirs, logs, logDirFailureChannel) {
override def allCleanerCheckpoints: Map[TopicPartition, Long] = {
cleanerCheckpoints.toMap
}
override def updateCheckpoints(dataDir: File, partitionToUpdateOrAdd: Option[(TopicPartition,Long)] = None,
partitionToRemove: Option[TopicPartition] = None): Unit = {
assert(partitionToRemove.isEmpty, "partitionToRemove argument with value not yet handled")
val (tp, offset) = partitionToUpdateOrAdd.getOrElse(
throw new IllegalArgumentException("partitionToUpdateOrAdd==None argument not yet handled"))
cleanerCheckpoints.put(tp, offset)
}
}
@AfterEach
def tearDown(): Unit = {
Utils.delete(tmpDir)
}
private def setupIncreasinglyFilthyLogs(partitions: Seq[TopicPartition],
startNumBatches: Int,
batchIncrement: Int): Pool[TopicPartition, UnifiedLog] = {
val logs = new Pool[TopicPartition, UnifiedLog]()
var numBatches = startNumBatches
for (tp <- partitions) {
val log = createLog(2048, LogConfig.Compact, topicPartition = tp)
logs.put(tp, log)
writeRecords(log, numBatches = numBatches, recordsPerBatch = 1, batchesPerSegment = 5)
numBatches += batchIncrement
}
logs
}
@Test
def testGrabFilthiestCompactedLogThrowsException(): Unit = {
val tp = new TopicPartition("A", 1)
val logSegmentSize = TestUtils.singletonRecords("test".getBytes).sizeInBytes * 10
val logSegmentsCount = 2
val tpDir = new File(logDir, "A-1")
Files.createDirectories(tpDir.toPath)
val logDirFailureChannel = new LogDirFailureChannel(10)
val config = createLowRetentionLogConfig(logSegmentSize, LogConfig.Compact)
val maxProducerIdExpirationMs = 60 * 60 * 1000
val segments = new LogSegments(tp)
val leaderEpochCache = UnifiedLog.maybeCreateLeaderEpochCache(tpDir, topicPartition, logDirFailureChannel, config.recordVersion, "")
val producerStateManager = new ProducerStateManager(topicPartition, tpDir, maxProducerIdExpirationMs, time)
val offsets = LogLoader.load(LoadLogParams(
tpDir,
tp,
config,
time.scheduler,
time,
logDirFailureChannel,
hadCleanShutdown = true,
segments,
0L,
0L,
maxProducerIdExpirationMs,
leaderEpochCache,
producerStateManager))
val localLog = new LocalLog(tpDir, config, segments, offsets.recoveryPoint,
offsets.nextOffsetMetadata, time.scheduler, time, tp, logDirFailureChannel)
// the exception should be caught and the partition that caused it marked as uncleanable
class LogMock extends UnifiedLog(offsets.logStartOffset, localLog, new BrokerTopicStats,
LogManager.ProducerIdExpirationCheckIntervalMs, leaderEpochCache,
producerStateManager, _topicId = None, keepPartitionMetadataFile = true) {
// Throw an error in getFirstBatchTimestampForSegments since it is called in grabFilthiestLog()
override def getFirstBatchTimestampForSegments(segments: Iterable[LogSegment]): Iterable[Long] =
throw new IllegalStateException("Error!")
}
val log: UnifiedLog = new LogMock()
writeRecords(log = log,
numBatches = logSegmentsCount * 2,
recordsPerBatch = 10,
batchesPerSegment = 2
)
val logsPool = new Pool[TopicPartition, UnifiedLog]()
logsPool.put(tp, log)
val cleanerManager = createCleanerManagerMock(logsPool)
cleanerCheckpoints.put(tp, 1)
val thrownException = assertThrows(classOf[LogCleaningException], () => cleanerManager.grabFilthiestCompactedLog(time).get)
assertEquals(log, thrownException.log)
assertTrue(thrownException.getCause.isInstanceOf[IllegalStateException])
}
@Test
def testGrabFilthiestCompactedLogReturnsLogWithDirtiestRatio(): Unit = {
val tp0 = new TopicPartition("wishing-well", 0)
val tp1 = new TopicPartition("wishing-well", 1)
val tp2 = new TopicPartition("wishing-well", 2)
val partitions = Seq(tp0, tp1, tp2)
// setup logs with cleanable range: [20, 20], [20, 25], [20, 30]
val logs = setupIncreasinglyFilthyLogs(partitions, startNumBatches = 20, batchIncrement = 5)
val cleanerManager = createCleanerManagerMock(logs)
partitions.foreach(partition => cleanerCheckpoints.put(partition, 20))
val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time).get
assertEquals(tp2, filthiestLog.topicPartition)
assertEquals(tp2, filthiestLog.log.topicPartition)
}
@Test
def testGrabFilthiestCompactedLogIgnoresUncleanablePartitions(): Unit = {
val tp0 = new TopicPartition("wishing-well", 0)
val tp1 = new TopicPartition("wishing-well", 1)
val tp2 = new TopicPartition("wishing-well", 2)
val partitions = Seq(tp0, tp1, tp2)
// setup logs with cleanable range: [20, 20], [20, 25], [20, 30]
val logs = setupIncreasinglyFilthyLogs(partitions, startNumBatches = 20, batchIncrement = 5)
val cleanerManager = createCleanerManagerMock(logs)
partitions.foreach(partition => cleanerCheckpoints.put(partition, 20))
cleanerManager.markPartitionUncleanable(logs.get(tp2).dir.getParent, tp2)
val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time).get
assertEquals(tp1, filthiestLog.topicPartition)
assertEquals(tp1, filthiestLog.log.topicPartition)
}
@Test
def testGrabFilthiestCompactedLogIgnoresInProgressPartitions(): Unit = {
val tp0 = new TopicPartition("wishing-well", 0)
val tp1 = new TopicPartition("wishing-well", 1)
val tp2 = new TopicPartition("wishing-well", 2)
val partitions = Seq(tp0, tp1, tp2)
// setup logs with cleanable range: [20, 20], [20, 25], [20, 30]
val logs = setupIncreasinglyFilthyLogs(partitions, startNumBatches = 20, batchIncrement = 5)
val cleanerManager = createCleanerManagerMock(logs)
partitions.foreach(partition => cleanerCheckpoints.put(partition, 20))
cleanerManager.setCleaningState(tp2, LogCleaningInProgress)
val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time).get
assertEquals(tp1, filthiestLog.topicPartition)
assertEquals(tp1, filthiestLog.log.topicPartition)
}
@Test
def testGrabFilthiestCompactedLogIgnoresBothInProgressPartitionsAndUncleanablePartitions(): Unit = {
val tp0 = new TopicPartition("wishing-well", 0)
val tp1 = new TopicPartition("wishing-well", 1)
val tp2 = new TopicPartition("wishing-well", 2)
val partitions = Seq(tp0, tp1, tp2)
// setup logs with cleanable range: [20, 20], [20, 25], [20, 30]
val logs = setupIncreasinglyFilthyLogs(partitions, startNumBatches = 20, batchIncrement = 5)
val cleanerManager = createCleanerManagerMock(logs)
partitions.foreach(partition => cleanerCheckpoints.put(partition, 20))
cleanerManager.setCleaningState(tp2, LogCleaningInProgress)
cleanerManager.markPartitionUncleanable(logs.get(tp1).dir.getParent, tp1)
val filthiestLog: Option[LogToClean] = cleanerManager.grabFilthiestCompactedLog(time)
assertEquals(None, filthiestLog)
}
@Test
def testDirtyOffsetResetIfLargerThanEndOffset(): Unit = {
val tp = new TopicPartition("foo", 0)
val logs = setupIncreasinglyFilthyLogs(Seq(tp), startNumBatches = 20, batchIncrement = 5)
val cleanerManager = createCleanerManagerMock(logs)
cleanerCheckpoints.put(tp, 200)
val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time).get
assertEquals(0L, filthiestLog.firstDirtyOffset)
}
@Test
def testDirtyOffsetResetIfSmallerThanStartOffset(): Unit = {
val tp = new TopicPartition("foo", 0)
val logs = setupIncreasinglyFilthyLogs(Seq(tp), startNumBatches = 20, batchIncrement = 5)
logs.get(tp).maybeIncrementLogStartOffset(10L, ClientRecordDeletion)
val cleanerManager = createCleanerManagerMock(logs)
cleanerCheckpoints.put(tp, 0L)
val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time).get
assertEquals(10L, filthiestLog.firstDirtyOffset)
}
@Test
def testLogStartOffsetLargerThanActiveSegmentBaseOffset(): Unit = {
val tp = new TopicPartition("foo", 0)
val log = createLog(segmentSize = 2048, LogConfig.Compact, tp)
val logs = new Pool[TopicPartition, UnifiedLog]()
logs.put(tp, log)
appendRecords(log, numRecords = 3)
appendRecords(log, numRecords = 3)
appendRecords(log, numRecords = 3)
assertEquals(1, log.logSegments.size)
log.maybeIncrementLogStartOffset(2L, ClientRecordDeletion)
val cleanerManager = createCleanerManagerMock(logs)
cleanerCheckpoints.put(tp, 0L)
// The active segment is uncleanable and hence not filthy from the POV of the CleanerManager.
val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time)
assertEquals(None, filthiestLog)
}
@Test
def testDirtyOffsetLargerThanActiveSegmentBaseOffset(): Unit = {
// It is possible in the case of an unclean leader election for the checkpoint
// dirty offset to get ahead of the active segment base offset, but still be
// within the range of the log.
val tp = new TopicPartition("foo", 0)
val logs = new Pool[TopicPartition, UnifiedLog]()
val log = createLog(2048, LogConfig.Compact, topicPartition = tp)
logs.put(tp, log)
appendRecords(log, numRecords = 3)
appendRecords(log, numRecords = 3)
assertEquals(1, log.logSegments.size)
assertEquals(0L, log.activeSegment.baseOffset)
val cleanerManager = createCleanerManagerMock(logs)
cleanerCheckpoints.put(tp, 3L)
// These segments are uncleanable and hence not filthy
val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time)
assertEquals(None, filthiestLog)
}
/**
* When checking for logs with segments ready for deletion
* we shouldn't consider logs where cleanup.policy=delete
* as they are handled by the LogManager
*/
@Test
def testLogsWithSegmentsToDeleteShouldNotConsiderCleanupPolicyDeleteLogs(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Delete)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
val readyToDelete = cleanerManager.deletableLogs().size
assertEquals(0, readyToDelete, "should have 0 logs ready to be deleted")
}
/**
* We should find logs with segments ready to be deleted when cleanup.policy=compact,delete
*/
@Test
def testLogsWithSegmentsToDeleteShouldConsiderCleanupPolicyCompactDeleteLogs(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact + "," + LogConfig.Delete)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
val readyToDelete = cleanerManager.deletableLogs().size
assertEquals(1, readyToDelete, "should have 1 logs ready to be deleted")
}
/**
* When looking for logs with segments ready to be deleted we should consider
* logs with cleanup.policy=compact because they may have segments from before the log start offset
*/
@Test
def testLogsWithSegmentsToDeleteShouldConsiderCleanupPolicyCompactLogs(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
val readyToDelete = cleanerManager.deletableLogs().size
assertEquals(1, readyToDelete, "should have 1 logs ready to be deleted")
}
/**
* log under cleanup should be ineligible for compaction
*/
@Test
def testLogsUnderCleanupIneligibleForCompaction(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Delete)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
log.appendAsLeader(records, leaderEpoch = 0)
log.roll()
log.appendAsLeader(records, leaderEpoch = 0)
log.updateHighWatermark(2L)
// simulate cleanup thread working on the log partition
val deletableLog = cleanerManager.pauseCleaningForNonCompactedPartitions()
assertEquals(1, deletableLog.size, "should have 1 logs ready to be deleted")
// change cleanup policy from delete to compact
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, log.config.segmentSize)
logProps.put(LogConfig.RetentionMsProp, log.config.retentionMs)
logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact)
logProps.put(LogConfig.MinCleanableDirtyRatioProp, 0: Integer)
val config = LogConfig(logProps)
log.updateConfig(config)
// log cleanup inprogress, the log is not available for compaction
val cleanable = cleanerManager.grabFilthiestCompactedLog(time)
assertEquals(0, cleanable.size, "should have 0 logs ready to be compacted")
// log cleanup finished, and log can be picked up for compaction
cleanerManager.resumeCleaning(deletableLog.map(_._1))
val cleanable2 = cleanerManager.grabFilthiestCompactedLog(time)
assertEquals(1, cleanable2.size, "should have 1 logs ready to be compacted")
// update cleanup policy to delete
logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Delete)
val config2 = LogConfig(logProps)
log.updateConfig(config2)
// compaction in progress, should have 0 log eligible for log cleanup
val deletableLog2 = cleanerManager.pauseCleaningForNonCompactedPartitions()
assertEquals(0, deletableLog2.size, "should have 0 logs ready to be deleted")
// compaction done, should have 1 log eligible for log cleanup
cleanerManager.doneDeleting(Seq(cleanable2.get.topicPartition))
val deletableLog3 = cleanerManager.pauseCleaningForNonCompactedPartitions()
assertEquals(1, deletableLog3.size, "should have 1 logs ready to be deleted")
}
@Test
def testUpdateCheckpointsShouldAddOffsetToPartition(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
// expect the checkpoint offset is not the expectedOffset before doing updateCheckpoints
assertNotEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition).getOrElse(0))
cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset))
// expect the checkpoint offset is now updated to the expected offset after doing updateCheckpoints
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
}
@Test
def testUpdateCheckpointsShouldRemovePartitionData(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
// write some data into the cleaner-offset-checkpoint file
cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset))
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
// updateCheckpoints should remove the topicPartition data in the logDir
cleanerManager.updateCheckpoints(logDir, partitionToRemove = Option(topicPartition))
assertTrue(cleanerManager.allCleanerCheckpoints.get(topicPartition).isEmpty)
}
@Test
def testHandleLogDirFailureShouldRemoveDirAndData(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
// write some data into the cleaner-offset-checkpoint file in logDir and logDir2
cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset))
cleanerManager.updateCheckpoints(logDir2, partitionToUpdateOrAdd = Option(topicPartition2, offset))
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition2))
cleanerManager.handleLogDirFailure(logDir.getAbsolutePath)
// verify the partition data in logDir is gone, and data in logDir2 is still there
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition2))
assertTrue(cleanerManager.allCleanerCheckpoints.get(topicPartition).isEmpty)
}
@Test
def testMaybeTruncateCheckpointShouldTruncateData(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
val lowerOffset = 1L
val higherOffset = 1000L
// write some data into the cleaner-offset-checkpoint file in logDir
cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset))
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
// we should not truncate the checkpoint data for checkpointed offset <= the given offset (higherOffset)
cleanerManager.maybeTruncateCheckpoint(logDir, topicPartition, higherOffset)
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
// we should truncate the checkpoint data for checkpointed offset > the given offset (lowerOffset)
cleanerManager.maybeTruncateCheckpoint(logDir, topicPartition, lowerOffset)
assertEquals(lowerOffset, cleanerManager.allCleanerCheckpoints(topicPartition))
}
@Test
def testAlterCheckpointDirShouldRemoveDataInSrcDirAndAddInNewDir(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
// write some data into the cleaner-offset-checkpoint file in logDir
cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset))
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
cleanerManager.alterCheckpointDir(topicPartition, logDir, logDir2)
// verify we still can get the partition offset after alterCheckpointDir
// This data should locate in logDir2, not logDir
assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition))
// force delete the logDir2 from checkpoints, so that the partition data should also be deleted
cleanerManager.handleLogDirFailure(logDir2.getAbsolutePath)
assertTrue(cleanerManager.allCleanerCheckpoints.get(topicPartition).isEmpty)
}
/**
* log under cleanup should still be eligible for log truncation
*/
@Test
def testConcurrentLogCleanupAndLogTruncation(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Delete)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
// log cleanup starts
val pausedPartitions = cleanerManager.pauseCleaningForNonCompactedPartitions()
// Log truncation happens due to unclean leader election
cleanerManager.abortAndPauseCleaning(log.topicPartition)
cleanerManager.resumeCleaning(Seq(log.topicPartition))
// log cleanup finishes and pausedPartitions are resumed
cleanerManager.resumeCleaning(pausedPartitions.map(_._1))
assertEquals(None, cleanerManager.cleaningState(log.topicPartition))
}
/**
* log under cleanup should still be eligible for topic deletion
*/
@Test
def testConcurrentLogCleanupAndTopicDeletion(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Delete)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
// log cleanup starts
val pausedPartitions = cleanerManager.pauseCleaningForNonCompactedPartitions()
// Broker processes StopReplicaRequest with delete=true
cleanerManager.abortCleaning(log.topicPartition)
// log cleanup finishes and pausedPartitions are resumed
cleanerManager.resumeCleaning(pausedPartitions.map(_._1))
assertEquals(None, cleanerManager.cleaningState(log.topicPartition))
}
/**
* When looking for logs with segments ready to be deleted we shouldn't consider
* logs that have had their partition marked as uncleanable.
*/
@Test
def testLogsWithSegmentsToDeleteShouldNotConsiderUncleanablePartitions(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
cleanerManager.markPartitionUncleanable(log.dir.getParent, topicPartition)
val readyToDelete = cleanerManager.deletableLogs().size
assertEquals(0, readyToDelete, "should have 0 logs ready to be deleted")
}
/**
* Test computation of cleanable range with no minimum compaction lag settings active where bounded by LSO
*/
@Test
def testCleanableOffsetsForNone(): Unit = {
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps))
while(log.numberOfSegments < 8)
log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), leaderEpoch = 0)
log.updateHighWatermark(50)
val lastCleanOffset = Some(0L)
val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds)
assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.")
assertEquals(log.highWatermark, log.lastStableOffset, "The high watermark equals the last stable offset as no transactions are in progress")
assertEquals(log.lastStableOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset is bounded by the last stable offset.")
}
/**
* Test computation of cleanable range with no minimum compaction lag settings active where bounded by active segment
*/
@Test
def testCleanableOffsetsActiveSegment(): Unit = {
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps))
while(log.numberOfSegments < 8)
log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
val lastCleanOffset = Some(0L)
val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds)
assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.")
assertEquals(log.activeSegment.baseOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset begins with the active segment.")
}
/**
* Test computation of cleanable range with a minimum compaction lag time
*/
@Test
def testCleanableOffsetsForTime(): Unit = {
val compactionLag = 60 * 60 * 1000
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
logProps.put(LogConfig.MinCompactionLagMsProp, compactionLag: java.lang.Integer)
val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps))
val t0 = time.milliseconds
while(log.numberOfSegments < 4)
log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t0), leaderEpoch = 0)
val activeSegAtT0 = log.activeSegment
time.sleep(compactionLag + 1)
val t1 = time.milliseconds
while (log.numberOfSegments < 8)
log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t1), leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
val lastCleanOffset = Some(0L)
val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds)
assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.")
assertEquals(activeSegAtT0.baseOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset begins with the second block of log entries.")
}
/**
* Test computation of cleanable range with a minimum compaction lag time that is small enough that
* the active segment contains it.
*/
@Test
def testCleanableOffsetsForShortTime(): Unit = {
val compactionLag = 60 * 60 * 1000
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
logProps.put(LogConfig.MinCompactionLagMsProp, compactionLag: java.lang.Integer)
val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps))
val t0 = time.milliseconds
while (log.numberOfSegments < 8)
log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t0), leaderEpoch = 0)
log.updateHighWatermark(log.logEndOffset)
time.sleep(compactionLag + 1)
val lastCleanOffset = Some(0L)
val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds)
assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.")
assertEquals(log.activeSegment.baseOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset begins with active segment.")
}
@Test
def testCleanableOffsetsNeedsCheckpointReset(): Unit = {
val tp = new TopicPartition("foo", 0)
val logs = setupIncreasinglyFilthyLogs(Seq(tp), startNumBatches = 20, batchIncrement = 5)
logs.get(tp).maybeIncrementLogStartOffset(10L, ClientRecordDeletion)
var lastCleanOffset = Some(15L)
var cleanableOffsets = LogCleanerManager.cleanableOffsets(logs.get(tp), lastCleanOffset, time.milliseconds)
assertFalse(cleanableOffsets.forceUpdateCheckpoint, "Checkpoint offset should not be reset if valid")
logs.get(tp).maybeIncrementLogStartOffset(20L, ClientRecordDeletion)
cleanableOffsets = LogCleanerManager.cleanableOffsets(logs.get(tp), lastCleanOffset, time.milliseconds)
assertTrue(cleanableOffsets.forceUpdateCheckpoint, "Checkpoint offset needs to be reset if less than log start offset")
lastCleanOffset = Some(25L)
cleanableOffsets = LogCleanerManager.cleanableOffsets(logs.get(tp), lastCleanOffset, time.milliseconds)
assertTrue(cleanableOffsets.forceUpdateCheckpoint, "Checkpoint offset needs to be reset if greater than log end offset")
}
@Test
def testUndecidedTransactionalDataNotCleanable(): Unit = {
val compactionLag = 60 * 60 * 1000
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
logProps.put(LogConfig.MinCompactionLagMsProp, compactionLag: java.lang.Integer)
val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps))
val producerId = 15L
val producerEpoch = 0.toShort
val sequence = 0
log.appendAsLeader(MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, producerEpoch, sequence,
new SimpleRecord(time.milliseconds(), "1".getBytes, "a".getBytes),
new SimpleRecord(time.milliseconds(), "2".getBytes, "b".getBytes)), leaderEpoch = 0)
log.appendAsLeader(MemoryRecords.withTransactionalRecords(CompressionType.NONE, producerId, producerEpoch, sequence + 2,
new SimpleRecord(time.milliseconds(), "3".getBytes, "c".getBytes)), leaderEpoch = 0)
log.roll()
log.updateHighWatermark(3L)
time.sleep(compactionLag + 1)
// although the compaction lag has been exceeded, the undecided data should not be cleaned
var cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Some(0L), time.milliseconds())
assertEquals(0L, cleanableOffsets.firstDirtyOffset)
assertEquals(0L, cleanableOffsets.firstUncleanableDirtyOffset)
log.appendAsLeader(MemoryRecords.withEndTransactionMarker(time.milliseconds(), producerId, producerEpoch,
new EndTransactionMarker(ControlRecordType.ABORT, 15)), leaderEpoch = 0,
origin = AppendOrigin.Coordinator)
log.roll()
log.updateHighWatermark(4L)
// the first segment should now become cleanable immediately
cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Some(0L), time.milliseconds())
assertEquals(0L, cleanableOffsets.firstDirtyOffset)
assertEquals(3L, cleanableOffsets.firstUncleanableDirtyOffset)
time.sleep(compactionLag + 1)
// the second segment becomes cleanable after the compaction lag
cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Some(0L), time.milliseconds())
assertEquals(0L, cleanableOffsets.firstDirtyOffset)
assertEquals(4L, cleanableOffsets.firstUncleanableDirtyOffset)
}
@Test
def testDoneCleaning(): Unit = {
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 1024: java.lang.Integer)
val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps))
while(log.numberOfSegments < 8)
log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), leaderEpoch = 0)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
assertThrows(classOf[IllegalStateException], () => cleanerManager.doneCleaning(topicPartition, log.dir, 1))
cleanerManager.setCleaningState(topicPartition, LogCleaningPaused(1))
assertThrows(classOf[IllegalStateException], () => cleanerManager.doneCleaning(topicPartition, log.dir, 1))
cleanerManager.setCleaningState(topicPartition, LogCleaningInProgress)
cleanerManager.doneCleaning(topicPartition, log.dir, 1)
assertTrue(cleanerManager.cleaningState(topicPartition).isEmpty)
assertTrue(cleanerManager.allCleanerCheckpoints.get(topicPartition).nonEmpty)
cleanerManager.setCleaningState(topicPartition, LogCleaningAborted)
cleanerManager.doneCleaning(topicPartition, log.dir, 1)
assertEquals(LogCleaningPaused(1), cleanerManager.cleaningState(topicPartition).get)
assertTrue(cleanerManager.allCleanerCheckpoints.get(topicPartition).nonEmpty)
}
@Test
def testDoneDeleting(): Unit = {
val records = TestUtils.singletonRecords("test".getBytes, key="test".getBytes)
val log: UnifiedLog = createLog(records.sizeInBytes * 5, LogConfig.Compact + "," + LogConfig.Delete)
val cleanerManager: LogCleanerManager = createCleanerManager(log)
val tp = new TopicPartition("log", 0)
assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(Seq(tp)))
cleanerManager.setCleaningState(tp, LogCleaningPaused(1))
assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(Seq(tp)))
cleanerManager.setCleaningState(tp, LogCleaningInProgress)
cleanerManager.doneDeleting(Seq(tp))
assertTrue(cleanerManager.cleaningState(tp).isEmpty)
cleanerManager.setCleaningState(tp, LogCleaningAborted)
cleanerManager.doneDeleting(Seq(tp))
assertEquals(LogCleaningPaused(1), cleanerManager.cleaningState(tp).get)
}
/**
* Logs with invalid checkpoint offsets should update their checkpoint offset even if the log doesn't need cleaning
*/
@Test
def testCheckpointUpdatedForInvalidOffsetNoCleaning(): Unit = {
val tp = new TopicPartition("foo", 0)
val logs = setupIncreasinglyFilthyLogs(Seq(tp), startNumBatches = 20, batchIncrement = 5)
logs.get(tp).maybeIncrementLogStartOffset(20L, ClientRecordDeletion)
val cleanerManager = createCleanerManagerMock(logs)
cleanerCheckpoints.put(tp, 15L)
val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time)
assertEquals(None, filthiestLog, "Log should not be selected for cleaning")
assertEquals(20L, cleanerCheckpoints.get(tp).get, "Unselected log should have checkpoint offset updated")
}
/**
* Logs with invalid checkpoint offsets should update their checkpoint offset even if they aren't selected
* for immediate cleaning
*/
@Test
def testCheckpointUpdatedForInvalidOffsetNotSelected(): Unit = {
val tp0 = new TopicPartition("foo", 0)
val tp1 = new TopicPartition("foo", 1)
val partitions = Seq(tp0, tp1)
// create two logs, one with an invalid offset, and one that is dirtier than the log with an invalid offset
val logs = setupIncreasinglyFilthyLogs(partitions, startNumBatches = 20, batchIncrement = 5)
logs.get(tp0).maybeIncrementLogStartOffset(15L, ClientRecordDeletion)
val cleanerManager = createCleanerManagerMock(logs)
cleanerCheckpoints.put(tp0, 10L)
cleanerCheckpoints.put(tp1, 5L)
val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time).get
assertEquals(tp1, filthiestLog.topicPartition, "Dirtier log should be selected")
assertEquals(15L, cleanerCheckpoints.get(tp0).get, "Unselected log should have checkpoint offset updated")
}
private def createCleanerManager(log: UnifiedLog): LogCleanerManager = {
val logs = new Pool[TopicPartition, UnifiedLog]()
logs.put(topicPartition, log)
new LogCleanerManager(Seq(logDir, logDir2), logs, null)
}
private def createCleanerManagerMock(pool: Pool[TopicPartition, UnifiedLog]): LogCleanerManagerMock = {
new LogCleanerManagerMock(Seq(logDir), pool, null)
}
private def createLog(segmentSize: Int,
cleanupPolicy: String,
topicPartition: TopicPartition = new TopicPartition("log", 0)): UnifiedLog = {
val config = createLowRetentionLogConfig(segmentSize, cleanupPolicy)
val partitionDir = new File(logDir, UnifiedLog.logDirName(topicPartition))
UnifiedLog(partitionDir,
config,
logStartOffset = 0L,
recoveryPoint = 0L,
scheduler = time.scheduler,
time = time,
brokerTopicStats = new BrokerTopicStats,
maxProducerIdExpirationMs = 60 * 60 * 1000,
producerIdExpirationCheckIntervalMs = LogManager.ProducerIdExpirationCheckIntervalMs,
logDirFailureChannel = new LogDirFailureChannel(10),
topicId = None,
keepPartitionMetadataFile = true)
}
private def createLowRetentionLogConfig(segmentSize: Int, cleanupPolicy: String): LogConfig = {
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, segmentSize: Integer)
logProps.put(LogConfig.RetentionMsProp, 1: Integer)
logProps.put(LogConfig.CleanupPolicyProp, cleanupPolicy)
logProps.put(LogConfig.MinCleanableDirtyRatioProp, 0.05: java.lang.Double) // small for easier and clearer tests
LogConfig(logProps)
}
private def writeRecords(log: UnifiedLog,
numBatches: Int,
recordsPerBatch: Int,
batchesPerSegment: Int): Unit = {
for (i <- 0 until numBatches) {
appendRecords(log, recordsPerBatch)
if (i % batchesPerSegment == 0)
log.roll()
}
log.roll()
}
private def appendRecords(log: UnifiedLog, numRecords: Int): Unit = {
val startOffset = log.logEndOffset
val endOffset = startOffset + numRecords
var lastTimestamp = 0L
val records = (startOffset until endOffset).map { offset =>
val currentTimestamp = time.milliseconds()
if (offset == endOffset - 1)
lastTimestamp = currentTimestamp
new SimpleRecord(currentTimestamp, s"key-$offset".getBytes, s"value-$offset".getBytes)
}
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.NONE, records:_*), leaderEpoch = 1)
log.maybeIncrementHighWatermark(log.logEndOffsetMetadata)
}
private def makeLog(dir: File = logDir, config: LogConfig) =
UnifiedLog(dir = dir, config = config, logStartOffset = 0L, recoveryPoint = 0L, scheduler = time.scheduler,
time = time, brokerTopicStats = new BrokerTopicStats, maxProducerIdExpirationMs = 60 * 60 * 1000,
producerIdExpirationCheckIntervalMs = LogManager.ProducerIdExpirationCheckIntervalMs,
logDirFailureChannel = new LogDirFailureChannel(10), topicId = None, keepPartitionMetadataFile = true)
private def records(key: Int, value: Int, timestamp: Long) =
MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord(timestamp, key.toString.getBytes, value.toString.getBytes))
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala | Scala | apache-2.0 | 39,217 |
/*
* Copyright (c) 2015-2022 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Attribution Notice under the terms of the Apache License 2.0
*
* This work was created by the collective efforts of the openCypher community.
* Without limiting the terms of Section 6, any Derivative Work that is not
* approved by the public consensus process of the openCypher Implementers Group
* should not be described as “Cypher” (and Cypher® is a registered trademark of
* Neo4j Inc.) or as "openCypher". Extensions by implementers or prototypes or
* proposals for change that have been documented or implemented should only be
* described as "implementation extensions to Cypher" or as "proposed changes to
* Cypher that are not yet approved by the openCypher community".
*/
package org.opencypher.tools.tck.api.events
import org.opencypher.tools.tck.api._
import org.opencypher.tools.tck.values.CypherValue
import org.scalatest.Assertions
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import scala.collection.mutable.ListBuffer
class TCKEventsTest extends AnyFunSuite with Assertions with Matchers {
test("TCK events should be captured correctly") {
val events = ListBuffer[String]()
TCKEvents.feature.subscribe(
f => { if (f.name == "List6 - List size") { events += s"Feature '${f.name}' read" } })
TCKEvents.scenario.subscribe(s => events += s"Scenario '${s.name}' started")
TCKEvents.stepStarted.subscribe(s =>
events += s"Step '${s.step.getClass.getSimpleName} -> ${s.step.source.getText}' started")
TCKEvents.stepFinished.subscribe(s =>
events += s"Step '${s.step.getClass.getSimpleName}' finished. Result: ${s.result match {
case Right(e) => e match {
case Right(cypherValueRecords) => cypherValueRecords
case Left(failed) => failed.toString
}
case Left(ex) => ex.toString
}}")
val scenarios = CypherTCK.allTckScenarios.filter(s => s.name == "Return list size").toList
scenarios.size should equal(1)
scenarios.head(FakeGraph).run()
TCKEvents.reset()
events.toList should equal(List[String](
"Feature 'List6 - List size' read",
"Scenario 'Return list size' started",
"Step 'Execute -> any graph' started",
"Step 'Execute' finished. Result: <empty result>",
"Step 'Measure -> executing query:' started",
"Step 'Measure' finished. Result: <empty result>",
"Step 'Execute -> executing query:' started",
"Step 'Execute' finished. Result: | n |" + System.lineSeparator + "| 3 |",
"Step 'ExpectResult -> the result should be, in any order:' started",
"Step 'ExpectResult' finished. Result: | n |" + System.lineSeparator + "| 3 |",
"Step 'SideEffects -> no side effects' started",
"Step 'SideEffects' finished. Result: | n |" + System.lineSeparator + "| 3 |"
))
}
private object FakeGraph extends Graph with ProcedureSupport {
override def cypher(query: String, params: Map[String, CypherValue], queryType: QueryType): Result = {
queryType match {
case InitQuery =>
CypherValueRecords.empty
case SideEffectQuery =>
CypherValueRecords.empty
case ControlQuery =>
CypherValueRecords.empty
case ExecQuery =>
StringRecords(List("n"), List(Map("n" -> "3")))
}
}
override def registerProcedure(signature: String, values: CypherValueRecords): Unit =
()
}
}
| opencypher/openCypher | tools/tck-api/src/test/scala/org/opencypher/tools/tck/api/events/TCKEventsTest.scala | Scala | apache-2.0 | 4,095 |
Subsets and Splits