code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package slamdata.engine.analysis
import slamdata.Predef._
import fixplate._
import slamdata.engine.fp._
import slamdata.engine.{RenderTree, Terminal, NonTerminal}
import scalaz._
import Scalaz._
import scalaz.scalacheck.ScalazProperties._
import org.specs2.ScalaCheck
import org.specs2.mutable._
import org.specs2.scalaz._
import org.scalacheck._
sealed trait Exp[+A]
object Exp {
case class Num(value: Int) extends Exp[Nothing]
case class Mul[A](left: A, right: A) extends Exp[A]
case class Var(value: Symbol) extends Exp[Nothing]
case class Lambda[A](param: Symbol, body: A) extends Exp[A]
case class Apply[A](func: A, arg: A) extends Exp[A]
case class Let[A](name: Symbol, value: A, inBody: A) extends Exp[A]
implicit val arbSymbol = Arbitrary(Arbitrary.arbitrary[String].map(Symbol(_)))
implicit val arbExp: Arbitrary ~> λ[α => Arbitrary[Exp[α]]] =
new (Arbitrary ~> λ[α => Arbitrary[Exp[α]]]) {
def apply[α](arb: Arbitrary[α]): Arbitrary[Exp[α]] =
Arbitrary(Gen.oneOf(
Arbitrary.arbitrary[Int].map(Num(_)),
for {
a <- arb.arbitrary
b <- arb.arbitrary
} yield Mul(a, b),
Arbitrary.arbitrary[Symbol].map(Var(_)),
for {
a <- Arbitrary.arbitrary[Symbol]
b <- arb.arbitrary
} yield Lambda(a, b),
for {
a <- arb.arbitrary
b <- arb.arbitrary
} yield Apply(a, b),
for {
a <- Arbitrary.arbitrary[Symbol]
b <- arb.arbitrary
c <- arb.arbitrary
} yield Let(a, b, c)))
}
def num(v: Int) = Term[Exp](Num(v))
def mul(left: Term[Exp], right: Term[Exp]) = Term[Exp](Mul(left, right))
def vari(v: Symbol) = Term[Exp](Var(v))
def lam(param: Symbol, body: Term[Exp]) = Term[Exp](Lambda(param, body))
def ap(func: Term[Exp], arg: Term[Exp]) = Term[Exp](Apply(func, arg))
def let(name: Symbol, v: Term[Exp], inBody: Term[Exp]) = Term[Exp](Let(name, v, inBody))
implicit val ExpTraverse: Traverse[Exp] = new Traverse[Exp] {
def traverseImpl[G[_], A, B](fa: Exp[A])(f: A => G[B])(implicit G: Applicative[G]): G[Exp[B]] = fa match {
case Num(v) => G.point(Num(v))
case Mul(left, right) => G.apply2(f(left), f(right))(Mul(_, _))
case Var(v) => G.point(Var(v))
case Lambda(p, b) => G.map(f(b))(Lambda(p, _))
case Apply(func, arg) => G.apply2(f(func), f(arg))(Apply(_, _))
case Let(n, v, i) => G.apply2(f(v), f(i))(Let(n, _, _))
}
}
implicit val ExpRenderTree: RenderTree[Exp[_]] =
new RenderTree[Exp[_]] {
def render(v: Exp[_]) = v match {
case Num(value) => Terminal(List("Num"), Some(value.toString))
case Mul(_, _) => Terminal(List("Mul"), None)
case Var(sym) => Terminal(List("Var"), Some(sym.toString))
case Lambda(param, _) => Terminal(List("Lambda"), Some(param.toString))
case Apply(_, _) => Terminal(List("Apply"), None)
case Let(name, _, _) => Terminal(List("Let"), Some(name.toString))
}
}
implicit val IntRenderTree = RenderTree.fromToString[Int]("Int")
// NB: an unusual definition of equality, in that only the first 3 characters
// of variable names are significant. This is to distinguish it from `==`
// as well as from a derivable Equal.
implicit val EqualExp: EqualF[Exp] = new EqualF[Exp] {
def equal[A](e1: Exp[A], e2: Exp[A])(implicit eq: Equal[A]) = (e1, e2) match {
case (Num(v1), Num(v2)) => v1 == v2
case (Mul(a1, b1), Mul(a2, b2)) => a1 ≟ a2 && b1 ≟ b2
case (Var(s1), Var(s2)) =>
s1.name.substring(0, 3 min s1.name.length) == s2.name.substring(0, 3 min s2.name.length)
case (Lambda(p1, a1), Lambda(p2, a2)) => p1 == p2 && a1 ≟ a2
case (Apply(f1, a1), Apply(f2, a2)) => f1 ≟ f2 && a1 ≟ a2
case (Let(n1, v1, i1), Let(n2, v2, i2)) => n1 == n2 && v1 ≟ v2 && i1 ≟ i2
case _ => false
}
}
implicit val ExpUnzip = new Unzip[Exp] {
def unzip[A, B](f: Exp[(A, B)]) = (f.map(_._1), f.map(_._2))
}
implicit val ExpBinder: Binder[Exp] = new Binder[Exp] {
type G[A] = Map[Symbol, A]
def initial[A] = Map[Symbol, A]()
def bindings[A](t: Exp[Term[Exp]], b: G[A])(f: Exp[Term[Exp]] => A) =
t match {
case Let(name, value, _) => b + (name -> f(value.unFix))
case _ => b
}
def subst[A](t: Exp[Term[Exp]], b: G[A]) = t match {
case Var(symbol) => b.get(symbol)
case _ => None
}
}
}
class ExpSpec extends Spec {
import Exp._
implicit val arbExpInt: Arbitrary[Exp[Int]] = arbExp(Arbitrary.arbInt)
checkAll(traverse.laws[Exp])
}
class FixplateSpecs extends Specification with ScalaCheck with ScalazMatchers {
import Exp._
implicit def arbTerm[F[_]]:
(Arbitrary ~> λ[α => Arbitrary[F[α]]]) => Arbitrary[Term[F]] =
new ((Arbitrary ~> λ[α => Arbitrary[F[α]]]) => Arbitrary[Term[F]]) {
def apply(FA: Arbitrary ~> λ[α => Arbitrary[F[α]]]):
Arbitrary[Term[F]] =
Arbitrary(Gen.sized(size =>
FA(
if (size <= 0)
Arbitrary(Gen.fail[Term[F]])
else
Arbitrary(Gen.resize(size - 1, arbTerm(FA).arbitrary))).arbitrary.map(Term(_))))
}
val example1ƒ: Exp[Option[Int]] => Option[Int] = {
case Num(v) => Some(v)
case Mul(left, right) => (left |@| right)(_ * _)
case Var(v) => None
case Lambda(_, b) => b
case Apply(func, arg) => None
case Let(_, _, i) => i
}
val addOne: Term[Exp] => Term[Exp] = _.unFix match {
case Num(n) => num(n+1)
case t => Term[Exp](t)
}
val simplify: Term[Exp] => Term[Exp] = _.unFix match {
case Mul(Term(Num(0)), Term(Num(_))) => num(0)
case Mul(Term(Num(1)), Term(Num(n))) => num(n)
case Mul(Term(Num(_)), Term(Num(0))) => num(0)
case Mul(Term(Num(n)), Term(Num(1))) => num(n)
case t => Term[Exp](t)
}
val addOneOrSimplify: Term[Exp] => Term[Exp] = t => t.unFix match {
case Num(_) => addOne(t)
case Mul(_, _) => simplify(t)
case _ => t
}
"Term" should {
"isLeaf" should {
"be true for simple literal" in {
num(1).isLeaf must beTrue
}
"be false for expression" in {
mul(num(1), num(2)).isLeaf must beFalse
}
}
"children" should {
"be empty for simple literal" in {
num(1).children must_== Nil
}
"contain sub-expressions" in {
mul(num(1), num(2)).children must_== List(num(1), num(2))
}
}
"universe" should {
"be one for simple literal" in {
num(1).universe must_== List(num(1))
}
"contain root and sub-expressions" in {
mul(num(1), num(2)).universe must_== List(mul(num(1), num(2)), num(1), num(2))
}
}
"transform" should {
"change simple literal" in {
num(1).transform(addOne) must_== num(2)
}
"change sub-expressions" in {
mul(num(1), num(2)).transform(addOne) must_== mul(num(2), num(3))
}
"be bottom-up" in {
mul(num(0), num(1)).transform(addOneOrSimplify) must_== num(2)
mul(num(1), num(2)).transform(addOneOrSimplify) must_== mul(num(2), num(3))
}
}
"topDownTransform" should {
"change simple literal" in {
num(1).topDownTransform(addOne) must_== num(2)
}
"change sub-expressions" in {
mul(num(1), num(2)).topDownTransform(addOne) must_== mul(num(2), num(3))
}
"be top-down" in {
mul(num(0), num(1)).topDownTransform(addOneOrSimplify) must_== num(0)
mul(num(1), num(2)).topDownTransform(addOneOrSimplify) must_== num(2)
}
}
"foldMap" should {
"fold stuff" in {
mul(num(0), num(1)).foldMap(_ :: Nil) must_== mul(num(0), num(1)) :: num(0) :: num(1) :: Nil
}
}
"descend" should {
"not apply at the root" in {
num(0).descend(addOne) must_== num(0)
}
"apply at children" in {
mul(num(0), num(1)).descend(addOne) must_== mul(num(1), num(2))
}
"not apply below children" in {
mul(num(0), mul(num(1), num(2))).descend(addOne) must_== mul(num(1), mul(num(1), num(2)))
}
}
// NB: unlike most of the operators `descend` is not implemented with `descendM`
"descendM" should {
val addOneOpt: Term[Exp] => Option[Term[Exp]] = t => Some(addOne(t))
"not apply at the root" in {
num(0).descendM(addOneOpt) must_== Some(num(0))
}
"apply at children" in {
mul(num(0), num(1)).descendM(addOneOpt) must_== Some(mul(num(1), num(2)))
}
"not apply below children" in {
mul(num(0), mul(num(1), num(2))).descendM(addOneOpt) must_== Some(mul(num(1), mul(num(1), num(2))))
}
}
"rewrite" should {
"apply more than once" in {
val f: PartialFunction[Term[Exp], Term[Exp]] = {
case Term(Num(2)) => num(1)
case Term(Num(1)) => num(0)
}
mul(num(2), num(3)).rewrite(f.lift) must_== mul(num(0), num(3))
}
}
"restructure" should {
type E[A] = (Exp[A], Int)
def eval(t: Exp[Term[E]]): E[Term[E]] = t match {
case Num(x) => (t, x)
case Mul(Term((_, c1)), Term((_, c2))) => (t, c1 * c2)
case _ => ???
}
"evaluate simple expr" in {
val v = mul(num(1), mul(num(2), num(3)))
v.restructure(eval).unFix._2 must_== 6
}
}
def eval(t: Exp[Int]): Int = t match {
case Num(x) => x
case Mul(x, y) => x*y
case _ => ???
}
def findConstants(t: Exp[List[Int]]): List[Int] = t match {
case Num(x) => x :: Nil
case _ => t.fold
}
"cata" should {
"evaluate simple expr" in {
val v = mul(num(1), mul(num(2), num(3)))
v.cata(eval) must_== 6
}
"find all constants" in {
mul(num(0), num(1)).cata(findConstants) must_== List(0, 1)
}
"produce correct annotations for 5 * 2" in {
mul(num(5), num(2)).cata(example1ƒ) must beSome(10)
}
}
"zipCata" should {
"both eval and find all constants" in {
mul(num(5), num(2)).cata(zipCata(eval, findConstants)) must_==
((10, List(5, 2)))
}
}
"liftPara" should {
"behave like cata" in {
val v = mul(num(1), mul(num(2), num(3)))
v.para(liftPara(eval)) must_== v.cata(eval)
}
}
"liftHisto" should {
"behave like cata" in {
val v = mul(num(1), mul(num(2), num(3)))
v.histo(liftHisto(eval)) must_== v.cata(eval)
}
}
"liftApo" should {
"behave like ana" ! prop { (i: Int) =>
apo(i)(liftApo(extractFactors)) must_== ana(i)(extractFactors)
}
}
"liftFutu" should {
"behave like ana" ! prop { (i: Int) =>
futu(i)(liftFutu(extractFactors)) must_== ana(i)(extractFactors)
}
}
"topDownCata" should {
def subst(vars: Map[Symbol, Term[Exp]], t: Term[Exp]): (Map[Symbol, Term[Exp]], Term[Exp]) = t.unFix match {
case Let(sym, value, body) => (vars + (sym -> value), body)
case Var(sym) => (vars, vars.get(sym).getOrElse(t))
case _ => (vars, t)
}
"bind vars" in {
val v = let('x, num(1), mul(num(0), vari('x)))
v.topDownCata(Map.empty[Symbol, Term[Exp]])(subst) must_== mul(num(0), num(1))
}
}
"trans" should {
// TODO
}
// Evaluate as usual, but trap 0*0 as a special case
def peval(t: Exp[(Term[Exp], Int)]): Int = t match {
case Mul((Term(Num(0)), _), (Term(Num(0)), _)) => -1
case Num(x) => x
case Mul((_, x), (_, y)) => x * y
case _ => ???
}
"para" should {
"evaluate simple expr" in {
val v = mul(num(1), mul(num(2), num(3)))
v.para(peval) must_== 6
}
"evaluate special-case" in {
val v = mul(num(0), num(0))
v.para(peval) must_== -1
}
"evaluate equiv" in {
val v = mul(num(0), mul(num(0), num(1)))
v.para(peval) must_== 0
}
}
"gpara" should {
"behave like para" in {
val v = mul(num(0), mul(num(0), num(1)))
v.gpara[Id, Int](
new (λ[α => Exp[Id[α]]] ~> λ[α => Id[Exp[α]]]) {
def apply[A](ex: Exp[Id[A]]): Id[Exp[A]] =
ex.map(_.copoint).point[Id]
},
expr => { peval(expr.map(_.runEnvT)) }) must_== 0
}
}
"distCata" should {
"behave like cata" in {
val v = mul(num(0), mul(num(0), num(1)))
v.gcata[Id, Int](distCata, eval) must_== v.cata(eval)
}
}
"distPara" should {
"behave like para" in {
val v = mul(num(0), mul(num(0), num(1)))
v.gcata[(Term[Exp], ?), Int](distPara, peval) must_== v.para(peval)
}
}
"apo" should {
"pull out factors of two" in {
def f(x: Int): Exp[Term[Exp] \\/ Int] =
if (x % 2 == 0) Mul(-\\/(num(2)), \\/-(x/2))
else Num(x)
apo(12)(f) must_== mul(num(2), mul(num(2), num(3)))
}
"construct factorial" in {
def fact(x: Int): Exp[Term[Exp] \\/ Int] =
if (x > 1) Mul(-\\/(num(x)), \\/-(x-1))
else Num(x)
apo(4)(fact) must_== mul(num(4), mul(num(3), mul(num(2), num(1))))
}
}
def extractFactors(x: Int): Exp[Int] =
if (x > 2 && x % 2 == 0) Mul(2, x/2)
else Num(x)
"ana" should {
"pull out factors of two" in {
ana(12)(extractFactors) must_== mul(num(2), mul(num(2), num(3)))
}
}
"distAna" should {
"behave like ana" ! prop { (i: Int) =>
gana[Id, Exp, Int](i)(distAna, extractFactors) must_== ana(i)(extractFactors)
}
}
"hylo" should {
"factor and then evaluate" ! prop { (i: Int) =>
hylo(i)(eval, extractFactors) must_== i
}
}
def strings(t: Exp[(Int, String)]): String = t match {
case Num(x) => x.toString
case Mul((x, xs), (y, ys)) =>
xs + " (" + x + ")" + ", " + ys + " (" + y + ")"
case _ => ???
}
"zygo" should {
"eval and strings" in {
mul(mul(num(0), num(0)), mul(num(2), num(5))).zygo(eval, strings) must_==
"0 (0), 0 (0) (0), 2 (2), 5 (5) (10)"
}
}
"paraZygo" should {
"peval and strings" in {
mul(mul(num(0), num(0)), mul(num(2), num(5))).paraZygo(peval, strings) must_==
"0 (0), 0 (0) (-1), 2 (2), 5 (5) (10)"
}
}
// NB: This is better done with cata, but we fake it here
def partialEval(t: Exp[Cofree[Exp, Term[Exp]]]): Term[Exp] = t match {
case Mul(x, y) => (x.head.unFix, y.head.unFix) match {
case (Num(a), Num(b)) => num(a * b)
case _ => Term(t.map(_.head))
}
case _ => Term(t.map(_.head))
}
"histo" should {
"eval simple literal multiplication" in {
mul(num(5), num(10)).histo(partialEval) must_== num(50)
}
"partially evaluate mul in lambda" in {
lam('foo, mul(mul(num(4), num(7)), vari('foo))).histo(partialEval) must_==
lam('foo, mul(num(28), vari('foo)))
}
}
def extract2and3(x: Int): Exp[Free[Exp, Int]] =
// factors all the way down
if (x > 2 && x % 2 == 0) Mul(Free.point(2), Free.point(x/2))
// factors once and then stops
else if (x > 3 && x % 3 == 0)
Mul(Free.liftF(Num(3)), Free.liftF(Num(x/3)))
else Num(x)
"futu" should {
"factor multiples of two" in {
futu(8)(extract2and3) must_== mul(num(2), mul(num(2), num(2)))
}
"factor multiples of three" in {
futu(81)(extract2and3) must_== mul(num(3), num(27))
}
"factor 3 within 2" in {
futu(324)(extract2and3) must_== mul(num(2), mul(num(2), mul(num(3), num(27))))
}
}
"chrono" should {
"factor and partially eval" ! prop { (i: Int) =>
chrono(i)(partialEval, extract2and3) must_== num(i)
}
}
"RenderTree" should {
import slamdata.engine.{RenderTree}
"render nodes and leaves" in {
mul(num(0), num(1)).shows must_==
"""Mul
|├─ Num(0)
|╰─ Num(1)""".stripMargin
}
}
}
// NB: This really tests stuff in the fp package, but that exists for Term,
// and here we have a fixpoint data type using Term, so …
"EqualF" should {
"be true for same expr" in {
mul(num(0), num(1)) ≟ mul(num(0), num(1)) must beTrue
}
"be false for different types" in {
num(0) ≠ vari('x) must beTrue
}
"be false for different children" in {
mul(num(0), num(1)) ≠ mul(num(2), num(3)) must beTrue
}
"be true for variables with matching prefixes" in {
vari('abc1) ≟ vari('abc2) must beTrue
}
"be true for sub-exprs with variables with matching prefixes" in {
mul(num(1), vari('abc1)) ≟ mul(num(1), vari('abc2)) must beTrue
}
"be implemented for unfixed exprs" in {
Mul(num(1), vari('abc1)) ≟ Mul(num(1), vari('abc2)) must beTrue
// NB: need to cast both terms to a common type
def exp(x: Exp[Term[Exp]]) = x
exp(Mul(num(1), vari('abc1))) ≠ exp(Num(1)) must beTrue
}
}
"Holes" should {
"holes" should {
"find none" in {
holes(Num(0)) must_== Num(0)
}
"find and replace two children" in {
(holes(mul(num(0), num(1)).unFix) match {
case Mul((Term(Num(0)), f1), (Term(Num(1)), f2)) =>
f1(num(2)) must_== Mul(num(2), num(1))
f2(num(2)) must_== Mul(num(0), num(2))
case r => failure
}): org.specs2.execute.Result
}
}
"holesList" should {
"find none" in {
holesList(Num(0)) must_== Nil
}
"find and replace two children" in {
(holesList(mul(num(0), num(1)).unFix) match {
case (t1, f1) :: (t2, f2) :: Nil =>
t1 must_== num(0)
f1(num(2)) must_== Mul(num(2), num(1))
t2 must_== num(1)
f2(num(2)) must_== Mul(num(0), num(2))
case _ => failure
}): org.specs2.execute.Result
}
}
"project" should {
"not find child of leaf" in {
project(0, num(0).unFix) must beNone
}
"find first child of simple expr" in {
project(0, mul(num(0), num(1)).unFix) must beSome(num(0))
}
"not find child with bad index" in {
project(-1, mul(num(0), num(1)).unFix) must beNone
project(2, mul(num(0), num(1)).unFix) must beNone
}
}
"sizeF" should {
"be 0 for flat" in {
sizeF(Num(0)) must_== 0
}
"be 2 for simple expr" in {
sizeF(mul(num(0), num(1)).unFix) must_== 2
}
"be non-recursive" in {
sizeF(mul(num(0), mul(num(1), num(2))).unFix) must_== 2
}
}
}
"Attr" should {
"attrSelf" should {
"annotate all" ! Prop.forAll(expGen) { exp =>
universe(attrSelf(exp)) must equal(exp.universe.map(attrSelf(_)))
}
}
"forget" should {
"forget unit" ! Prop.forAll(expGen) { exp =>
forget(attrUnit(exp)) must_== exp
}
}
"foldMap" should {
"zeros" ! Prop.forAll(expGen) { exp =>
Foldable[Cofree[Exp, ?]].foldMap(attrK(exp, 0))(_ :: Nil) must_== exp.universe.map(κ(0))
}
"selves" ! Prop.forAll(expGen) { exp =>
Foldable[Cofree[Exp, ?]].foldMap(attrSelf(exp))(_ :: Nil) must_== exp.universe
}
}
"RenderTree" should {
"render simple nested expr" in {
implicit def RU = new RenderTree[Unit] { def render(v: Unit) = Terminal(List("()"), None) }
attrUnit(mul(num(0), num(1))).shows must_==
"""Mul
|├─ Annotation
|│ ╰─ ()
|├─ Num(0)
|│ ╰─ Annotation
|│ ╰─ ()
|╰─ Num(1)
| ╰─ Annotation
| ╰─ ()""".stripMargin
}
}
"zip" should {
"tuplify simple constants" ! Prop.forAll(expGen) { exp =>
unsafeZip2(attrK(exp, 0), attrK(exp, 1)) must
equal(attrK(exp, (0, 1)))
}
}
"bound combinator" should {
val Example2 = let('foo, num(5), mul(vari('foo), num(2)))
"produce incorrect annotations when not used in let expression" in {
Example2.cata(example1ƒ) must beNone
}
"produce correct annotations when used in let expression" in {
boundCata(Example2)(example1ƒ) must beSome(10)
}
}
}
def expGen = Gen.resize(100, arbTerm(arbExp).arbitrary)
}
| wemrysi/quasar | core/src/test/scala/slamdata/engine/analysis/fixplate.scala | Scala | apache-2.0 | 20,885 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.tstreams.common
import org.scalatest.{FlatSpec, Matchers}
/**
* Created by Ivan A. Kudryavtsev on 11.06.17.
*/
class PartitionIterationPolicyTests extends FlatSpec with Matchers {
val PARTITION_COUNT = 3
class TestPartitionIterationPolicy(count: Int, set: Set[Int]) extends PartitionIterationPolicy(count, set) {
override def getNextPartition: Int = 0
}
it should "handle proper partition set correctly" in {
val partitions = Set(0,1,2)
new TestPartitionIterationPolicy(PARTITION_COUNT, partitions)
}
it should "handle improper partition set correctly" in {
val partitions = Set(0,1,4)
intercept[IllegalArgumentException] {
new TestPartitionIterationPolicy(PARTITION_COUNT, partitions)
}
}
it should "handle correctly empty set" in {
val partitions = Set.empty[Int]
intercept[IllegalArgumentException] {
new TestPartitionIterationPolicy(PARTITION_COUNT, partitions)
}
}
it should "handle getCurrentPartition correctly" in {
val partitions = Set(0,1,2)
val p = new TestPartitionIterationPolicy(PARTITION_COUNT, partitions)
p.getCurrentPartition shouldBe 0
}
it should "handle startNewRound correctly" in {
val partitions = Set(0,1,2)
val p = new TestPartitionIterationPolicy(PARTITION_COUNT, partitions)
p.startNewRound()
p.getCurrentPartition shouldBe 0
}
}
| bwsw/t-streams | src/test/scala/com/bwsw/tstreams/common/PartitionIterationPolicyTests.scala | Scala | apache-2.0 | 2,198 |
package services.migration.r2
import model.{MigrationBatch, SourceContent}
import play.Logger
import services.migration.{MigrationBatchParams, ThrottleControl}
import scala.concurrent.Future
abstract class R2CrosswordMigratorService(client : R2IntegrationAPIClient) extends R2MigrationService {
import ThrottleControl._
import play.api.libs.concurrent.Execution.Implicits._
private def loadContentWithThrottle(id : Integer) = {
r2ThrottlerFt[SourceContent]{
client.loadCrosswordById(id)
}
}
def loadContentById(id : Integer) = loadContentWithThrottle(id)
def getBatchOfContentIds(params : MigrationBatchParams) =
client.getBatchOfCrosswordIds(params)
def loadBatchOfContent(params: MigrationBatchParams) : Future[MigrationBatch] = {
def mapIdsToAudios(ids: Future[List[Int]]) = {
def idsToAudios(ids : List[Int]) = ids.map(loadContentWithThrottle(_))
ids.map{idsToAudios(_)}.flatMap(Future.sequence(_))
}
val ids = client.getBatchOfCrosswordIds(params)
val audios = mapIdsToAudios(ids)
audios.map(loadedAudios => {
Logger.info(s"Loaded the batch of ${params.batchSize} audios from R2")
new MigrationBatch(loadedAudios)
})
}
def loadIndividualContent(audioId : Int) : Future[SourceContent] = loadContentWithThrottle(audioId)
def migrateContentInR2(audioId : Int, composerId : String) : Future[(Boolean, String)] = {
r2ThrottlerFt[(Boolean, String)]{
client.migrateCrosswordInR2(audioId, composerId)
}
}
}
object R2CrosswordMigratorServiceImpl extends R2CrosswordMigratorService(new R2IntegrationAPIClient()){} | guardian/flex-content-migrator | app/services/migration/r2/R2CrosswordMigratorService.scala | Scala | mit | 1,626 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.lucidworks.spark.analysis
import java.io.{PrintWriter, Reader, StringWriter}
import java.util.regex.Pattern
import com.lucidworks.spark.util.Utils
import org.apache.commons.io.IOUtils
import org.apache.lucene.analysis.custom.CustomAnalyzer
import org.apache.lucene.analysis.tokenattributes.{CharTermAttribute, OffsetAttribute, PositionIncrementAttribute}
import org.apache.lucene.analysis.{Analyzer, DelegatingAnalyzerWrapper, TokenStream}
import org.apache.lucene.util.{Version => LuceneVersion}
import org.apache.solr.schema.JsonPreAnalyzedParser
import org.json4s.jackson.JsonMethods._
import org.json4s.jackson.Serialization
import scala.collection.JavaConversions._
import scala.collection.immutable
import scala.collection.mutable
import scala.util.control.Breaks._
import scala.util.control.NonFatal
/**
* This class allows simple access to custom Lucene text processing pipelines, a.k.a. text analyzers,
* which are specified via a JSON schema that hosts named analyzer specifications and mappings from
* field name(s) to analyzer(s).
*
* Here's an example schema with descriptions inline as comments:
* {{{
* {
* "defaultLuceneMatchVersion": "7.0.0" // Optional. Supplied to analysis components
* // that don't explicitly specify "luceneMatchVersion".
* "analyzers": [ // Optional. If not included, all field mappings must be
* { // to fully qualified class names of Lucene Analyzer subclasses.
* "name": "html", // Required. Mappings in the "fields" array below refer to this name.
* "charFilters":[{ // Optional.
* "type": "htmlstrip" // Required. "htmlstrip" is the SPI name for HTMLStripCharFilter
* }],
* "tokenizer": { // Required. Only one allowed.
* "type": "standard" // Required. "standard" is the SPI name for StandardTokenizer
* },
* "filters": [{ // Optional.
* "type": "stop", // Required. "stop" is the SPI name for StopFilter
* "ignoreCase": "true", // Component-specific params
* "format": "snowball",
* "words": "org/apache/lucene/analysis/snowball/english_stop.txt"
* }, {
* "type": "lowercase" // Required. "lowercase" is the SPI name for LowerCaseFilter
* }]
* },
* { "name": "stdtok", "tokenizer": { "type": "standard" } }
* ],
* "fields": [{ // Required. To lookup an analyzer for a field, first the "name"
* // mappings are consulted, and then the "regex" mappings are
* // tested, in the order specified.
* "name": "keywords", // Either "name" or "regex" is required. "name" matches the field name exactly.
* "analyzer": "org.apache.lucene.analysis.core.KeywordAnalyzer" // FQCN of an Analyzer subclass
* }, {
* "regex": ".*html.*" // Either "name" or "regex" is required. "regex" must match the whole field name.
* "analyzer": "html" // Reference to the named analyzer specified in the "analyzers" section.
* }, {
* "regex": ".+", // Either "name" or "regex" is required. "regex" must match the whole field name.
* "analyzer": "stdtok" // Reference to the named analyzer specified in the "analyzers" section.
* }]
* }
* }}}
*/
class LuceneTextAnalyzer(analysisSchema: String) extends Serializable {
@transient private lazy val analyzerSchema = new AnalyzerSchema(analysisSchema)
@transient private lazy val analyzerCache = mutable.Map.empty[String, Analyzer]
def isValid: Boolean = analyzerSchema.isValid
def invalidMessages: String = analyzerSchema.invalidMessages.result()
/** Returns the analyzer mapped to the given field in the configured analysis schema, if any. */
def getFieldAnalyzer(field: String): Option[Analyzer] = analyzerSchema.getAnalyzer(field)
def analyze(field: String, o: Any): Seq[String] = {
o match {
case s: String => analyze(field, s)
case as: mutable.WrappedArray[String] @unchecked => analyzeMV(field, as)
case a: Any => analyze(field, a.toString)
case _ => Seq.empty[String]
}
}
def analyzeJava(field: String, o: Any): java.util.List[String] = {
seqAsJavaList(analyze(field, o))
}
/** Looks up the analyzer mapped to the given field from the configured analysis schema,
* uses it to perform analysis on the given string, returning the produced token sequence.
*/
def analyze(field: String, str: String): Seq[String] = {
if ( ! isValid) throw new IllegalArgumentException(invalidMessages)
if (str == null) return Seq.empty[String]
analyze(tokenStream(field, str))
}
/** Looks up the analyzer mapped to the given field from the configured analysis schema,
* uses it to perform analysis on the given reader, returning the produced token sequence.
*/
def analyze(field: String, reader: Reader): Seq[String] = {
if ( ! isValid) throw new IllegalArgumentException(invalidMessages)
analyze(tokenStream(field, reader))
}
/** For each of the field->value pairs in fieldValues, looks up the analyzer mapped
* to the field from the configured analysis schema, and uses it to perform analysis on the
* value. Returns a map from the fields to the produced token sequences.
*/
def analyze(fieldValues: immutable.Map[String,String]): immutable.Map[String,Seq[String]] = {
val builder = immutable.Map.newBuilder[String,Seq[String]]
for ((field, value) <- fieldValues) builder += field -> analyze(field, value)
builder.result()
}
/** Looks up the analyzer mapped to the given field from the configured analysis schema,
* uses it to perform analysis on each of the given values, and returns the flattened
* concatenation of the produced token sequence.
*/
def analyzeMV(field: String, values: Seq[String]): Seq[String] = {
if (values == null) return Seq.empty[String]
val seqBuilder = Seq.newBuilder[String]
values foreach { value => seqBuilder ++= analyze(field, value) }
seqBuilder.result()
}
/** For each of the field->multi-value pairs in fieldValues, looks up the analyzer mapped
* to the field from the configured analysis schema, and uses it to perform analysis on the
* each of the values. Returns a map from the fields to the flattened concatenation of the
* produced token sequences.
*/
def analyzeMV(fieldValues: immutable.Map[String,Seq[String]]): immutable.Map[String,Seq[String]] = {
val builder = immutable.Map.newBuilder[String,Seq[String]]
for ((field, values) <- fieldValues) { builder += field -> analyzeMV(field, values) }
builder.result()
}
/** Java-friendly version: looks up the analyzer mapped to the given field from the configured
* analysis schema, uses it to perform analysis on the given string, returning the produced
* token sequence. */
def analyzeJava(field: String, str: String): java.util.List[String] = {
seqAsJavaList(analyze(field, str))
}
/** Java-friendly version: looks up the analyzer mapped to the given field from the configured
* analysis schema, uses it to perform analysis on the given reader, returning the produced
* token sequence. */
def analyzeJava(field: String, reader: Reader): java.util.List[String] = {
seqAsJavaList(analyze(field, reader))
}
/** Java-friendly version: for each of the field->value pairs in fieldValues, looks up the
* analyzer mapped to the field from the configured analysis schema, and uses it to perform
* analysis on the value. Returns a map from the fields to the produced token sequences.
*/
def analyzeJava(fieldValues: java.util.Map[String,String]): java.util.Map[String,java.util.List[String]] = {
val output = new java.util.HashMap[String,java.util.List[String]]()
for ((field, value) <- fieldValues) output.put(field, analyzeJava(field, value))
java.util.Collections.unmodifiableMap(output)
}
/** Java-friendly version: looks up the analyzer mapped to the given field from the configured
* analysis schema, uses it to perform analysis on each of the given values, and returns the
* flattened concatenation of the produced token sequence.
*/
def analyzeMVJava(field: String, values: java.util.List[String]): java.util.List[String] = {
if (values == null) return java.util.Collections.emptyList[String]()
val output = new java.util.ArrayList[String]()
values foreach { value => output.addAll(analyzeJava(field, value)) }
output
}
/** Java-friendly version: for each of the field->multi-value pairs in fieldValues, looks up the
* analyzer mapped to the field from the configured analysis schema, and uses it to perform
* analysis on each of the values. Returns a map from the fields to the flattened concatenation
* of the produced token sequences.
*/
def analyzeMVJava(fieldValues: java.util.Map[String,java.util.List[String]])
: java.util.Map[String,java.util.List[String]] = {
val output = new java.util.HashMap[String,java.util.List[String]]()
for ((field, values) <- fieldValues) output.put(field, analyzeMVJava(field, values))
java.util.Collections.unmodifiableMap(output)
}
/** Looks up the analyzer mapped to `fieldName` and returns a [[org.apache.lucene.analysis.TokenStream]]
* for the analyzer to tokenize the contents of `text`. */
def tokenStream(fieldName: String, text: String) = analyzerWrapper.tokenStream(fieldName, text)
/** Looks up the analyzer mapped to `fieldName` and returns a [[org.apache.lucene.analysis.TokenStream]]
* for the analyzer to tokenize the contents of `reader`. */
def tokenStream(fieldName: String, reader: Reader) = analyzerWrapper.tokenStream(fieldName, reader)
/** Looks up the analyzer mapped to the given field from the configured analysis schema,
* uses it to perform analysis on the given string, and returns a PreAnalyzedField-compatible
* JSON string with the following serialized attributes:
*
* - CharTermAttribute (token text)
* - OffsetAttribute (start and end character offsets)
* - PositionIncrementAttribute (token position relative to the previous token)
*
* If stored = true, the original string input value will be included as a value to be stored.
* (Note that the Solr schema for the destination Solr field must be configured to store the
* value; if it is not, then the stored value included in the JSON will be ignored by Solr.)
*/
def toPreAnalyzedJson(field: String, str: String, stored: Boolean): String = {
toPreAnalyzedJson(tokenStream(field, str), if (stored) Some(str) else None)
}
/** Looks up the analyzer mapped to the given field from the configured analysis schema,
* uses it to perform analysis on the given reader, and returns a PreAnalyzedField-compatible
* JSON string with the following serialized attributes:
*
* - CharTermAttribute (token text),
* - OffsetAttribute (start and end position)
* - PositionIncrementAttribute (token position relative to the previous token)
*
* If stored = true, the original reader input value, read into a string, will be included as
* a value to be stored. (Note that the Solr schema for the destination Solr field must be
* configured to store the value; if it is not, then the stored value included in the JSON
* will be ignored by Solr.)
*/
def toPreAnalyzedJson(field: String, reader: Reader, stored: Boolean): String = {
if (stored)
toPreAnalyzedJson(field, IOUtils.toString(reader), stored = true)
else
toPreAnalyzedJson(tokenStream(field, reader), None)
}
private def toPreAnalyzedJson(stream: TokenStream, str: Option[String]): String = {
// Implementation note: Solr's JsonPreAnalyzedParser.toFormattedString() produces JSON
// suitable for use with PreAnalyzedField, but there are problems with using it with
// CustomAnalyzer:
//
// - toFormattedString() will serialize all attributes present on the passed-in token
// stream's AttributeSource, which is fixed by CustomAnalyzer at those in the
// PackedTokenAttributeImpl, which includes the PositionLengthAttribute and the
// TypeAttribute, neither of which are indexed, and so shouldn't be output
// (by default anyway) from this method.
// - To modify the set of attributes that CustomAnalyzer has in its AttributeSource,
// CustomAnalyzer can't be extended because it's final, so CustomAnalyzer's
// createComponents() method can't be overridden to pass in an alternate AttributeFactory
// to TokenizerFactory.create(). However, a wrapper can be constructed that forwards all
// methods except createComponents(), and then have createComponents() do the right thing.
// - Once an alternate AttributeFactory is used in an effectively overridden
// CustomAnalyzer.createComponents(), this form will be cached for future uses, but we
// don't want that, since it might conflict with the analyze*() methods' requirements,
// and future versions of toPreAnalyzedJson might allow for customization of attributes
// to output (including e.g. PayloadAttribute). So we would have to either use an
// alternate cache, or not cache analyzers used by toPreAnalyzedJson(), both of which
// seem overcomplicated.
//
// The code below constructs JSON with a fixed set of serialized attributes.
val termAtt = stream.addAttribute(classOf[CharTermAttribute])
val offsetAtt = stream.addAttribute(classOf[OffsetAttribute])
val posIncAtt = stream.addAttribute(classOf[PositionIncrementAttribute])
var tokens = List.newBuilder[immutable.ListMap[String, Any]]
val token = immutable.ListMap.newBuilder[String, Any]
try {
stream.reset()
while (stream.incrementToken) {
token.clear()
token += (JsonPreAnalyzedParser.TOKEN_KEY -> new String(termAtt.buffer, 0, termAtt.length))
token += (JsonPreAnalyzedParser.OFFSET_START_KEY -> offsetAtt.startOffset)
token += (JsonPreAnalyzedParser.OFFSET_END_KEY -> offsetAtt.endOffset)
token += (JsonPreAnalyzedParser.POSINCR_KEY -> posIncAtt.getPositionIncrement)
tokens += token.result
}
stream.end()
} finally {
stream.close()
}
val topLevel = immutable.ListMap.newBuilder[String, Any]
topLevel += (JsonPreAnalyzedParser.VERSION_KEY -> JsonPreAnalyzedParser.VERSION)
if (str.isDefined) topLevel += (JsonPreAnalyzedParser.STRING_KEY -> str)
topLevel += (JsonPreAnalyzedParser.TOKENS_KEY -> tokens.result)
implicit val formats = org.json4s.DefaultFormats // required by Serialization.write()
Serialization.write(topLevel.result)
}
@transient private lazy val analyzerWrapper = new AnalyzerWrapper
private class AnalyzerWrapper extends DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
override protected def getWrappedAnalyzer(field: String): Analyzer = {
analyzerCache.synchronized {
var analyzer = analyzerCache.get(field)
if (analyzer.isEmpty) {
if (isValid) analyzer = analyzerSchema.getAnalyzer(field)
if ( ! isValid) throw new IllegalArgumentException(invalidMessages) // getAnalyzer can make isValid false
if (analyzer.isEmpty) throw new IllegalArgumentException(s"No analyzer defined for field '$field'")
analyzerCache.put(field, analyzer.get)
}
analyzer.get
}
}
}
private def analyze(inputStream: TokenStream): Seq[String] = {
val builder = Seq.newBuilder[String]
val charTermAttr = inputStream.addAttribute(classOf[CharTermAttribute])
inputStream.reset()
while (inputStream.incrementToken) builder += charTermAttr.toString
inputStream.end()
inputStream.close()
builder.result()
}
}
private class AnalyzerSchema(val analysisSchema: String) {
implicit val formats = org.json4s.DefaultFormats // enable extract
val schemaConfig = parse(analysisSchema).extract[SchemaConfig]
val analyzers = mutable.Map[String, Analyzer]()
var isValid: Boolean = true
var invalidMessages : StringBuilder = new StringBuilder()
try {
schemaConfig.defaultLuceneMatchVersion.foreach { version =>
if ( ! LuceneVersion.parseLeniently(version).onOrAfter(LuceneVersion.LUCENE_7_0_0)) {
isValid = false
invalidMessages.append(
s"""defaultLuceneMatchVersion "${schemaConfig.defaultLuceneMatchVersion}"""")
.append(" is not on or after ").append(LuceneVersion.LUCENE_7_0_0).append("\n")
}
}
} catch {
case NonFatal(e) => isValid = false
invalidMessages.append(e.getMessage).append("\n")
}
schemaConfig.fields.foreach { field =>
if (field.name.isDefined) {
if (field.regex.isDefined) {
isValid = false
invalidMessages.append("""Both "name" and "regex" keys are defined in a field,"""
+ " but only one may be.\n")
}
} else if (field.regex.isEmpty) {
isValid = false
invalidMessages.append("""Neither "name" nor "regex" key is defined in a field,""").
append(" but one must be.\n")
}
if (schemaConfig.namedAnalyzerConfigs.get(field.analyzer).isEmpty) {
def badAnalyzerMessage(suffix: String): Unit = {
invalidMessages.append(s"""field "${field.fieldRef}": """)
.append(s""" analyzer "${field.analyzer}" """).append(suffix)
}
try { // Attempt to interpret the analyzer as a fully qualified class name
Utils.classForName(field.analyzer).asInstanceOf[Class[_ <: Analyzer]]
} catch {
case _: ClassNotFoundException => isValid = false
badAnalyzerMessage("not found.\n")
case _: ClassCastException => isValid = false
badAnalyzerMessage("is not a subclass of org.apache.lucene.analysis.Analyzer")
}
}
}
def getAnalyzer(fieldName: String): Option[Analyzer] = {
var analyzer: Option[Analyzer] = None
if (isValid) {
var fieldConfig = schemaConfig.namedFields.get(fieldName)
if (fieldConfig.isEmpty) {
breakable {
schemaConfig.fields.filter(c => c.regex.isDefined).foreach { field =>
if (field.pattern matcher fieldName matches()) {
fieldConfig = Some(field)
break
}
}
}
}
if (fieldConfig.isDefined) {
val analyzerConfig = schemaConfig.namedAnalyzerConfigs.get(fieldConfig.get.analyzer)
if (analyzerConfig.isDefined) {
analyzer = analyzers.get(analyzerConfig.get.name)
if (analyzer.isEmpty) try {
analyzer = Some(buildAnalyzer(analyzerConfig.get))
analyzers.put(analyzerConfig.get.name, analyzer.get)
} catch {
case NonFatal(e) => isValid = false
val writer = new StringWriter
writer.write(s"Exception initializing analyzer '${analyzerConfig.get.name}': ")
e.printStackTrace(new PrintWriter(writer))
invalidMessages.append(writer.toString).append("\n")
}
} else {
try {
val clazz = Utils.classForName(fieldConfig.get.analyzer)
analyzer = Some(clazz.newInstance.asInstanceOf[Analyzer])
schemaConfig.defaultLuceneMatchVersion foreach { version =>
analyzer.get.setVersion(LuceneVersion.parseLeniently(version))
}
} catch {
case NonFatal(e) => isValid = false
val writer = new StringWriter
writer.write(s"Exception initializing analyzer '${fieldConfig.get.analyzer}': ")
e.printStackTrace(new PrintWriter(writer))
invalidMessages.append(writer.toString).append("\n")
}
}
}
}
analyzer
}
private def buildAnalyzer(analyzerConfig: AnalyzerConfig): Analyzer = {
var builder = CustomAnalyzer.builder()
if (schemaConfig.defaultLuceneMatchVersion.isDefined) {
builder = builder.withDefaultMatchVersion(
LuceneVersion.parseLeniently(schemaConfig.defaultLuceneMatchVersion.get))
}
// Builder methods' param maps must be mutable to enable put("luceneMatchVersion", ...)
if (analyzerConfig.charFilters.isDefined) {
for (charFilter <- analyzerConfig.charFilters.get) {
val charFilterNoType = mutable.Map[String, String]() ++ (charFilter - "type")
builder = builder.addCharFilter(charFilter("type"), charFilterNoType)
}
}
val tokenizerNoType = mutable.Map[String, String]() ++ (analyzerConfig.tokenizer - "type")
builder = builder.withTokenizer(analyzerConfig.tokenizer("type"), tokenizerNoType)
if (analyzerConfig.filters.isDefined) {
for (filter <- analyzerConfig.filters.get) {
val filterNoType = mutable.Map[String, String]() ++ (filter - "type")
builder = builder.addTokenFilter(filter("type"), filterNoType)
}
}
builder.build()
}
}
private case class AnalyzerConfig(name: String,
charFilters: Option[List[Map[String, String]]],
tokenizer: Map[String, String],
filters: Option[List[Map[String, String]]])
private case class FieldConfig(regex: Option[String], name: Option[String], analyzer: String) {
val pattern: Pattern = regex.map(_.r.pattern).orNull
val fieldRef: String = name.getOrElse(regex.get)
}
private case class SchemaConfig(defaultLuceneMatchVersion: Option[String],
analyzers: List[AnalyzerConfig],
fields: List[FieldConfig]) {
val namedAnalyzerConfigs: Map[String, AnalyzerConfig] = analyzers.map(a => a.name -> a).toMap
val namedFields: Map[String, FieldConfig]
= fields.filter(c => c.name.isDefined).map(c => c.name.get -> c).toMap
}
| LucidWorks/spark-solr | src/main/scala/com/lucidworks/spark/analysis/LuceneTextAnalyzer.scala | Scala | apache-2.0 | 22,907 |
package fpis.datastructures
sealed trait Tree[+A]
case class Leaf[A](value: A) extends Tree[A]
case class Branch[A](left: Tree[A], right: Tree[A]) extends Tree[A]
object Tree {
/* Correction on https://github.com/fpinscala/fpinscala/blob/master/answers/src/main/scala/fpinscala/datastructures/Tree.scala */
/* Exercise 3.25
* Write a function size that counts the number of nodes (leaves and branches) in a tree.
* */
def size[A](t: Tree[A]): Int = t match {
case Leaf(_) => 1
case Branch(l, r) => 1 + size(l) + size(r)
}
/* Exercise 3.26
* Write a function maximum that returns the maximum element in a Tree[Int]. (Note:
* In Scala, you can use x.max(y) or x max y to compute the maximum of two integers x and y.)
* */
def maximum(t: Tree[Int]): Int = t match {
case Leaf(v) => v
case Branch(l, r) => maximum(l) max maximum(r)
}
/* Exercise 3.27
* Write a function depth that returns the maximum path length from the root of a tree to any leaf.
* */
def depth[A](t: Tree[A]): Int = t match {
case Leaf(_) => 1
case Branch(l, r) => 1 + (depth(l) max depth(r))
}
/* Exercise 3.28
* Write a function map, analogous to the method of the same name on List,
* that modifies each element in a tree with a given function.
* */
def map[A, B](t: Tree[A])(f: A => B): Tree[B] = t match {
case Leaf(a) => Leaf(f(a))
case Branch(l, r) => Branch(map(l)(f), map(r)(f))
}
/* Exercise 3.29
* Generalize size, maximum, depth, and map, writing a new function fold that abstracts
* over their similarities. Re-implement them in terms of this more general function. Can
* you draw an analogy between this fold function and the left and right folds for List?
* */
def fold[A, B](t: Tree[A])(f: A => B)(g: (B, B) => B): B = t match {
case Leaf(a) => f(a)
case Branch(l, r) => g((fold(l)(f)(g)), (fold(r)(f)(g)))
}
def size_2[A](t: Tree[A]): Int = fold[A, Int](t)(_ => 1)((b1, b2) => (1 + b1 + b2))
def maximum_2(t: Tree[Int]): Int = fold[Int, Int](t)(a => a)((b1, b2) => b1 max b2)
def depth_2[A](t: Tree[A]): Int = fold[A, Int](t)(_ => 1)((b1, b2) => 1 + (b1 max b2))
def map_2[A, B](t: Tree[A])(f: A => B): Tree[B] = fold[A, Tree[B]](t)(a => Leaf(f(a)))((b1, b2) => Branch(b1, b2))
} | TGITS/programming-workouts | scala/basic/fpis_chp3_functional_data_structures/src/main/scala/fpis/datastructures/Tree.scala | Scala | mit | 2,299 |
package github
import java.time.ZonedDateTime
import play.api.libs.json.JsValue
/**
* Subset of issue event. See https://developer.github.com/v3/issues/events/
*/
case class IssueEvent (
id: Long,
timestamp: ZonedDateTime,
event: String,
issue: Issue,
milestoneTitle: Option[String]
) {
def this (jsValue: JsValue) = this (
(jsValue \\ "id").as[Long],
ZonedDateTime.parse((jsValue \\ "created_at").as[String]),
(jsValue \\ "event").as[String],
new Issue((jsValue \\ "issue").as[JsValue]),
(jsValue \\ "milestone" \\ "title").asOpt[String]
)
}
| shuwada/github-burnup-chart | src/main/scala/github/IssueEvent.scala | Scala | apache-2.0 | 576 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kafka.confluent
import com.github.benmanes.caffeine.cache.{CacheLoader, Caffeine, LoadingCache}
import com.typesafe.scalalogging.LazyLogging
import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient
import org.locationtech.geomesa.features.avro.AvroSimpleFeatureTypeParser
import org.locationtech.geomesa.index.metadata.GeoMesaMetadata
import org.locationtech.geomesa.kafka.confluent.ConfluentMetadata._
import org.locationtech.geomesa.kafka.data.KafkaDataStore
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeatureType
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
class ConfluentMetadata(val schemaRegistry: SchemaRegistryClient,
sftOverrides: Map[String, SimpleFeatureType] = Map.empty)
extends GeoMesaMetadata[String] with LazyLogging {
private val topicSftCache: LoadingCache[String, String] = {
Caffeine.newBuilder().expireAfterWrite(10, TimeUnit.MINUTES).build(
new CacheLoader[String, String] {
override def load(topic: String): String = {
try {
// use the overridden sft for the topic if it exists, else look it up in the registry
val sft = sftOverrides.getOrElse(topic, {
val subject = topic + SubjectPostfix
val schemaId = schemaRegistry.getLatestSchemaMetadata(subject).getId
val sft = AvroSimpleFeatureTypeParser.schemaToSft(schemaRegistry.getById(schemaId))
// store the schema id to access the schema when creating the feature serializer
sft.getUserData.put(SchemaIdKey, schemaId.toString)
sft
})
KafkaDataStore.setTopic(sft, topic)
SimpleFeatureTypes.encodeType(sft, includeUserData = true)
} catch {
case NonFatal(e) => logger.error("Error retrieving schema from confluent registry: ", e); null
}
}
}
)
}
override def getFeatureTypes: Array[String] = {
schemaRegistry.getAllSubjects.asScala.collect {
case s: String if s.endsWith(SubjectPostfix) => s.substring(0, s.lastIndexOf(SubjectPostfix))
}.toArray
}
override def read(typeName: String, key: String, cache: Boolean): Option[String] = {
if (key != GeoMesaMetadata.AttributesKey) {
logger.warn(s"Requested read on ConfluentMetadata with unsupported key $key. " +
s"ConfluentMetadata only supports ${GeoMesaMetadata.AttributesKey}")
None
} else {
if (!cache) {
topicSftCache.invalidate(typeName)
}
Option(topicSftCache.get(typeName))
}
}
override def invalidateCache(typeName: String, key: String): Unit = {
if (key != GeoMesaMetadata.AttributesKey) {
logger.warn(s"Requested invalidate cache on ConfluentMetadata with unsupported key $key. " +
s"ConfluentMetadata only supports ${GeoMesaMetadata.AttributesKey}")
} else {
topicSftCache.invalidate(typeName)
}
}
override def close(): Unit = {}
override def scan(typeName: String, prefix: String, cache: Boolean): Seq[(String, String)] =
throw new NotImplementedError(s"ConfluentMetadata only supports ${GeoMesaMetadata.AttributesKey}")
override def insert(typeName: String, key: String, value: String): Unit = {}
override def insert(typeName: String, kvPairs: Map[String, String]): Unit = {}
override def remove(typeName: String, key: String): Unit = {}
override def remove(typeName: String, keys: Seq[String]): Unit = {}
override def delete(typeName: String): Unit = {}
override def backup(typeName: String): Unit = {}
override def resetCache(): Unit = {}
}
object ConfluentMetadata {
// hardcoded to the default confluent uses (<topic>-value)
val SubjectPostfix = "-value"
// key in user data where avro schema id is stored
val SchemaIdKey = "geomesa.avro.schema.id"
}
| locationtech/geomesa | geomesa-kafka/geomesa-kafka-confluent/src/main/scala/org/locationtech/geomesa/kafka/confluent/ConfluentMetadata.scala | Scala | apache-2.0 | 4,438 |
package scadla.examples
import scadla._
import utils._
import Trig._
import InlineOps._
import scadla.EverythingIsIn.{millimeters, radians}
import squants.space.{Length, Angle, Degrees, Millimeters}
import scala.language.postfixOps // for mm notation
import squants.space.LengthConversions._ // for mm notation
/** A class for the small rollers in the mecanum wheel */
class Roller(height: Length, maxOuterRadius: Length, minOuterRadius: Length, innerRadius: Length) {
val axis = 0.5 mm
val h = height - 2*axis
protected def carveAxle(s: Solid) = s - Cylinder(innerRadius, height)
def outline = {
// r * f = maxOuterRadius
// r * f * cos(a) = minOuterRadius
// r * sin(a) = height/2
val a = acos(minOuterRadius/maxOuterRadius)
val r = h / 2 / sin(a)
val f = maxOuterRadius / r
val s = Sphere(r).scale(f, f, 1).moveZ(height/2)
val c1 = Cylinder(maxOuterRadius, h).moveZ(axis)
val c2 = Cylinder(minOuterRadius, height)
(s * c1) + c2
}
def solid = carveAxle(outline)
//to make only the "skeleton" of a roller,
//then it can be coated with oogoo to get better friction
def skeleton = {
val base =
carveAxle(
Cylinder(minOuterRadius, height) +
solid.scale(0.8, 0.8, 1)
)
val angle = 22.5° // π / 8
val grooveDepth = (maxOuterRadius - minOuterRadius) max 2
val inner = (maxOuterRadius - grooveDepth) max ((minOuterRadius + innerRadius) / 2)
val slice = PieSlice(maxOuterRadius, inner, angle, h).moveZ(axis)
(0 until 8).foldLeft(base)( (acc, i) => acc - slice.rotateZ(i*2*angle) )
}
//mold for k*l roller
def mold(k: Int, l: Int) = {
val wall = 2 mm
val distToWall = wall + maxOuterRadius
val step = maxOuterRadius + distToWall
val flatRoller = Rotate(-90°, 0, 0, outline)
val row = {
val rs = for (i <- 0 until k) yield Translate( distToWall + i*step, 1, distToWall, flatRoller)
Union(rs:_*)
}
val rows = {
val rs = for (j <- 0 until l) yield row.moveY(j*(2+height))
Union(rs:_*)
}
val grooves = {
val w = wall * 0.4
val groove = CenteredCube.xz(w,l*(2+height),w).rotateY(Pi/4).moveZ(distToWall)
val gs = for (i <- 0 until (k-1)) yield groove.moveX( distToWall + (i+0.5)*step)
Union(gs:_*)
}
val base = Cube((k * step : Length) + wall, l*(2+height), distToWall)
base - grooves - rows
}
}
class MecanumWheel(radius: Length, width: Length, angle: Angle, nbrRollers: Int) {
//TODO ideally the projection of the arc created by the roller should match the shape of the wheel
//some more parameters
val tolerance = 0.15 mm
var centerAxleRadius = (2.5 mm) + tolerance
var shaftFlat = 0.45 mm
var rollerAxleRadius1 = (1.75 mm) / 2 + tolerance
var rollerAxleRadius2 = (1.0 mm) + tolerance
var rollerGap = 0.0 mm
var rollerRimGap = 0.5 mm
var mountThickness = 1.0 mm
//the rollers' dimensions
// innerR + maxR == radius
// 2*π*innerR == 1/cos(angle) * nbrRollers * (rollerGap + 2*maxR)
def maxR = {
val c1 = 2 * math.Pi / nbrRollers // circumference not angle
val c2 = 2 / cos(angle)
(radius * c1 - rollerGap) / (c1 + c2)
}
def innerR = radius - maxR
def minR = rollerAxleRadius2 + mountThickness
// width == cos(angle)*rollerHeight + 2*sin(angle) * minR + 2*cos(angle)*mountThickness
def rollerHeight = (width - 2*sin(angle.abs) * minR - 2*cos(angle.abs)*mountThickness) / cos(angle.abs)
def printParameters: Unit = {
Console.println("base parameters:")
Console.println(" radius: " + radius)
Console.println(" width: " + width)
Console.println(" angle: " + angle)
Console.println(" nbrRollers: " + nbrRollers)
Console.println(" rollerAxleRadius1: " + rollerAxleRadius1)
Console.println(" rollerAxleRadius2: " + rollerAxleRadius2)
Console.println(" rollerGap: " + rollerGap)
Console.println(" rollerRimGap: " + rollerRimGap)
Console.println(" mountThickness: " + mountThickness)
Console.println("derived parameters:")
Console.println(" maxR: " + maxR)
Console.println(" innerR: " + innerR)
Console.println(" minR: " + minR)
Console.println(" rollerHeight: " + rollerHeight)
}
def roller = new Roller(rollerHeight, maxR, minR, rollerAxleRadius2)
//assumes it is centered at (0,0,0)
protected def placeOnRim(s: Solid) = {
val oriented = s.rotate(angle, 0, 0).translate(innerR, 0, width/2)
val placed = for (i <- 0 until nbrRollers) yield oriented.rotate(0, 0, i * 2 * π / nbrRollers)
Union(placed:_*)
}
protected def axleHeight = rollerHeight+2*mountThickness+20
protected def rollersForCarving = {
val r1 = Hull(roller.solid, roller.solid.moveX(2*maxR))
val r2 = Bigger(r1, rollerRimGap).moveZ(-rollerHeight/2)
val c = Cylinder(rollerAxleRadius1, axleHeight).moveZ(-axleHeight/2)
placeOnRim(r2 + c)
}
protected def rollers = {
val r = roller.solid.moveZ(-rollerHeight/2)
//val c = Cylinder(rollerAxleRadius1, axleHeight).moveZ(-axleHeight/2)
placeOnRim(r) //+ c)
}
def rim = {
val base = Tube(innerR-maxR-rollerRimGap, centerAxleRadius, width)
val shaft = Translate(centerAxleRadius - shaftFlat, -centerAxleRadius/2, 0, Cube(2*centerAxleRadius, centerAxleRadius, width))
val op = width * tan(angle.abs) / 2
val ad = innerR
val hyp = hypot(op, ad)
val rth = minR*sin(angle.abs)*2 + mountThickness
val lowerRing = Tube(hyp + rollerAxleRadius1 + mountThickness, centerAxleRadius, rth)
val upperRing = lowerRing.moveZ(width - rth)
base + shaft + lowerRing + upperRing
}
def hub = Difference(rim, rollersForCarving)
//the hub in two halfs, easier to print
def hubHalves(nbrHoles: Int) = {
val angle = π * 2 / nbrHoles
val holeOffsetX = innerR / 2
val holeOffsetA = angle / 2
val holes = for(i <- 0 until nbrHoles) yield
Cylinder(rollerAxleRadius1, width).moveX(holeOffsetX).rotate(0, 0, holeOffsetA + i*angle)
val withHoles = hub -- holes
val lowerHalf = withHoles * Cylinder(innerR + maxR, width/2)
val upperHalf = withHoles * Cylinder(innerR + maxR, width/2).moveZ(width/2)
val kHeight = (width / 2 - 2) min 5
val kRadius = 1.5 mm
val knobX = holeOffsetX + kRadius - 1
val knob = Cylinder(kRadius, kHeight)
val knobs = for(i <- 0 until nbrHoles) yield knob.move(knobX, 0, width/2).rotate(0, 0, i*angle)
val lowerWithKnobs = lowerHalf ++ knobs
val upperWithKnobs = upperHalf -- knobs.map(Bigger(_, 2*tolerance))
(lowerWithKnobs, upperWithKnobs)
}
def hubHalvesPrintable(nbrHoles: Int) = {
val (l, h) = hubHalves(nbrHoles)
(l, h.rotate(π, 0, 0).moveZ(-width/2))
}
def assembled = Union(hub, rollers)
def assembly = {
import scadla.assembly._
val rollerP = new Part("roller", roller.solid)
val axleHeight = width / cos(angle)
val axle = new Part("filament, 1.75mm", Cylinder(rollerAxleRadius2, axleHeight))
axle.vitamin = true
val (lower,upper) = hubHalves(8)
val lowerP = new Part("hub, lower half", lower)
val upperP = new Part("hub, upper half", upper, Some(upper.rotate(π, 0, 0).moveZ(-width/2)))
val asmbl0 = Assembly("Mecanum wheel")
def place(as: Assembly, c: Assembly, w: Vector) = {
val jt = Joint.revolute(0,0,1,Millimeters)
val f0 = Frame(Vector(innerR.toMillimeters,0,width/2,Millimeters), Quaternion.mkRotation(angle, Vector(1,0,0,Millimeters)))
(0 until nbrRollers).foldLeft(as)( (acc, i) => {
val f1 = Frame(Vector(0,0,0,Millimeters), Quaternion.mkRotation(i * 2 * π / nbrRollers, Vector(0,0,1,Millimeters)))
val frame = f0.compose(f1)
acc.+(frame, jt, c, w)
})
}
val asmbl1 = asmbl0.+(Joint.fixed(0,0,-1,Millimeters), lowerP).+(Joint.fixed(0,0, 1,Millimeters), upperP)
val asmbl2 = place(asmbl1, rollerP, Vector(0,0,-rollerHeight/2,Millimeters))
place(asmbl2, axle, Vector(0,0, -axleHeight.toMillimeters, Millimeters))
}
}
object MecanumWheel {
def main(args: Array[String]): Unit = {
//a small version
val r = 25 mm
val w = 18 mm
val n = 12
val a = π / 6
val wheel1 = new MecanumWheel(r, w, a, n)
val wheel2 = new MecanumWheel(r, w,-a, n)
wheel1.printParameters
///* the parts */
//val (lower1, upper1) = wheel1.hubHalvesPrintable(8)
//val (lower2, upper2) = wheel2.hubHalvesPrintable(8)
//val roller = wheel1.roller.skeleton
//val mold = wheel1.roller.mold(6, 2)
///* save to files */
//backends.OpenSCAD.toSTL(lower1, "lower1.stl")
//backends.OpenSCAD.toSTL(upper1, "upper1.stl")
//backends.OpenSCAD.toSTL(lower2, "lower2.stl")
//backends.OpenSCAD.toSTL(upper2, "upper2.stl")
//backends.OpenSCAD.toSTL(roller, "roller.stl")
//backends.OpenSCAD.toSTL(mold, "mold.stl")
/* view the full wheel */
val obj = wheel1.assembled
backends.Renderer.default.view(obj)
//backends.OpenSCAD.view(obj, Nil, Nil, Nil) //this version renders in a faster but with less details
}
}
| dzufferey/scadla | src/main/scala/scadla/examples/MecanumWheel.scala | Scala | apache-2.0 | 9,051 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.trees
import java.util.UUID
import scala.collection.{mutable, Map}
import scala.reflect.ClassTag
import org.apache.commons.lang3.ClassUtils
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.sql.catalyst.{AliasIdentifier, IdentifierWithDatabase}
import org.apache.spark.sql.catalyst.ScalaReflection._
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType, FunctionResource}
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.JoinType
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, Partitioning}
import org.apache.spark.sql.catalyst.util.StringUtils.PlanStringConcat
import org.apache.spark.sql.catalyst.util.truncatedString
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.storage.StorageLevel
/** Used by [[TreeNode.getNodeNumbered]] when traversing the tree for a given number */
private class MutableInt(var i: Int)
case class Origin(
line: Option[Int] = None,
startPosition: Option[Int] = None)
/**
* Provides a location for TreeNodes to ask about the context of their origin. For example, which
* line of code is currently being parsed.
*/
object CurrentOrigin {
private val value = new ThreadLocal[Origin]() {
override def initialValue: Origin = Origin()
}
def get: Origin = value.get()
def set(o: Origin): Unit = value.set(o)
def reset(): Unit = value.set(Origin())
def setPosition(line: Int, start: Int): Unit = {
value.set(
value.get.copy(line = Some(line), startPosition = Some(start)))
}
def withOrigin[A](o: Origin)(f: => A): A = {
set(o)
val ret = try f finally { reset() }
ret
}
}
// A tag of a `TreeNode`, which defines name and type
case class TreeNodeTag[T](name: String)
// scalastyle:off
abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product {
// scalastyle:on
self: BaseType =>
val origin: Origin = CurrentOrigin.get
/**
* A mutable map for holding auxiliary information of this tree node. It will be carried over
* when this node is copied via `makeCopy`, or transformed via `transformUp`/`transformDown`.
*/
private val tags: mutable.Map[TreeNodeTag[_], Any] = mutable.Map.empty
protected def copyTagsFrom(other: BaseType): Unit = {
tags ++= other.tags
}
def setTagValue[T](tag: TreeNodeTag[T], value: T): Unit = {
tags(tag) = value
}
def getTagValue[T](tag: TreeNodeTag[T]): Option[T] = {
tags.get(tag).map(_.asInstanceOf[T])
}
def unsetTagValue[T](tag: TreeNodeTag[T]): Unit = {
tags -= tag
}
/**
* Returns a Seq of the children of this node.
* Children should not change. Immutability required for containsChild optimization
*/
def children: Seq[BaseType]
lazy val containsChild: Set[TreeNode[_]] = children.toSet
// Copied from Scala 2.13.1
// github.com/scala/scala/blob/v2.13.1/src/library/scala/util/hashing/MurmurHash3.scala#L56-L73
// to prevent the issue https://github.com/scala/bug/issues/10495
// TODO(SPARK-30848): Remove this once we drop Scala 2.12.
private final def productHash(x: Product, seed: Int, ignorePrefix: Boolean = false): Int = {
val arr = x.productArity
// Case objects have the hashCode inlined directly into the
// synthetic hashCode method, but this method should still give
// a correct result if passed a case object.
if (arr == 0) {
x.productPrefix.hashCode
} else {
var h = seed
if (!ignorePrefix) h = scala.util.hashing.MurmurHash3.mix(h, x.productPrefix.hashCode)
var i = 0
while (i < arr) {
h = scala.util.hashing.MurmurHash3.mix(h, x.productElement(i).##)
i += 1
}
scala.util.hashing.MurmurHash3.finalizeHash(h, arr)
}
}
private lazy val _hashCode: Int = productHash(this, scala.util.hashing.MurmurHash3.productSeed)
override def hashCode(): Int = _hashCode
/**
* Faster version of equality which short-circuits when two treeNodes are the same instance.
* We don't just override Object.equals, as doing so prevents the scala compiler from
* generating case class `equals` methods
*/
def fastEquals(other: TreeNode[_]): Boolean = {
this.eq(other) || this == other
}
/**
* Find the first [[TreeNode]] that satisfies the condition specified by `f`.
* The condition is recursively applied to this node and all of its children (pre-order).
*/
def find(f: BaseType => Boolean): Option[BaseType] = if (f(this)) {
Some(this)
} else {
children.foldLeft(Option.empty[BaseType]) { (l, r) => l.orElse(r.find(f)) }
}
/**
* Runs the given function on this node and then recursively on [[children]].
* @param f the function to be applied to each node in the tree.
*/
def foreach(f: BaseType => Unit): Unit = {
f(this)
children.foreach(_.foreach(f))
}
/**
* Runs the given function recursively on [[children]] then on this node.
* @param f the function to be applied to each node in the tree.
*/
def foreachUp(f: BaseType => Unit): Unit = {
children.foreach(_.foreachUp(f))
f(this)
}
/**
* Returns a Seq containing the result of applying the given function to each
* node in this tree in a preorder traversal.
* @param f the function to be applied.
*/
def map[A](f: BaseType => A): Seq[A] = {
val ret = new collection.mutable.ArrayBuffer[A]()
foreach(ret += f(_))
ret.toSeq
}
/**
* Returns a Seq by applying a function to all nodes in this tree and using the elements of the
* resulting collections.
*/
def flatMap[A](f: BaseType => TraversableOnce[A]): Seq[A] = {
val ret = new collection.mutable.ArrayBuffer[A]()
foreach(ret ++= f(_))
ret.toSeq
}
/**
* Returns a Seq containing the result of applying a partial function to all elements in this
* tree on which the function is defined.
*/
def collect[B](pf: PartialFunction[BaseType, B]): Seq[B] = {
val ret = new collection.mutable.ArrayBuffer[B]()
val lifted = pf.lift
foreach(node => lifted(node).foreach(ret.+=))
ret.toSeq
}
/**
* Returns a Seq containing the leaves in this tree.
*/
def collectLeaves(): Seq[BaseType] = {
this.collect { case p if p.children.isEmpty => p }
}
/**
* Finds and returns the first [[TreeNode]] of the tree for which the given partial function
* is defined (pre-order), and applies the partial function to it.
*/
def collectFirst[B](pf: PartialFunction[BaseType, B]): Option[B] = {
val lifted = pf.lift
lifted(this).orElse {
children.foldLeft(Option.empty[B]) { (l, r) => l.orElse(r.collectFirst(pf)) }
}
}
/**
* Efficient alternative to `productIterator.map(f).toArray`.
*/
protected def mapProductIterator[B: ClassTag](f: Any => B): Array[B] = {
val arr = Array.ofDim[B](productArity)
var i = 0
while (i < arr.length) {
arr(i) = f(productElement(i))
i += 1
}
arr
}
/**
* Returns a copy of this node with the children replaced.
* TODO: Validate somewhere (in debug mode?) that children are ordered correctly.
*/
def withNewChildren(newChildren: Seq[BaseType]): BaseType = {
assert(newChildren.size == children.size, "Incorrect number of children")
var changed = false
val remainingNewChildren = newChildren.toBuffer
val remainingOldChildren = children.toBuffer
def mapTreeNode(node: TreeNode[_]): TreeNode[_] = {
val newChild = remainingNewChildren.remove(0)
val oldChild = remainingOldChildren.remove(0)
if (newChild fastEquals oldChild) {
oldChild
} else {
changed = true
newChild
}
}
def mapChild(child: Any): Any = child match {
case arg: TreeNode[_] if containsChild(arg) => mapTreeNode(arg)
// CaseWhen Case or any tuple type
case (left, right) => (mapChild(left), mapChild(right))
case nonChild: AnyRef => nonChild
case null => null
}
val newArgs = mapProductIterator {
case s: StructType => s // Don't convert struct types to some other type of Seq[StructField]
// Handle Seq[TreeNode] in TreeNode parameters.
case s: Stream[_] =>
// Stream is lazy so we need to force materialization
s.map(mapChild).force
case s: Seq[_] =>
s.map(mapChild)
case m: Map[_, _] =>
// `map.mapValues().view.force` return `Map` in Scala 2.12 but return `IndexedSeq` in Scala
// 2.13, call `toMap` method manually to compatible with Scala 2.12 and Scala 2.13
// `mapValues` is lazy and we need to force it to materialize
m.mapValues(mapChild).view.force.toMap
case arg: TreeNode[_] if containsChild(arg) => mapTreeNode(arg)
case Some(child) => Some(mapChild(child))
case nonChild: AnyRef => nonChild
case null => null
}
if (changed) makeCopy(newArgs) else this
}
/**
* Returns a copy of this node where `rule` has been recursively applied to the tree.
* When `rule` does not apply to a given node it is left unchanged.
* Users should not expect a specific directionality. If a specific directionality is needed,
* transformDown or transformUp should be used.
*
* @param rule the function use to transform this nodes children
*/
def transform(rule: PartialFunction[BaseType, BaseType]): BaseType = {
transformDown(rule)
}
/**
* Returns a copy of this node where `rule` has been recursively applied to it and all of its
* children (pre-order). When `rule` does not apply to a given node it is left unchanged.
*
* @param rule the function used to transform this nodes children
*/
def transformDown(rule: PartialFunction[BaseType, BaseType]): BaseType = {
val afterRule = CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(this, identity[BaseType])
}
// Check if unchanged and then possibly return old copy to avoid gc churn.
if (this fastEquals afterRule) {
mapChildren(_.transformDown(rule))
} else {
// If the transform function replaces this node with a new one, carry over the tags.
afterRule.copyTagsFrom(this)
afterRule.mapChildren(_.transformDown(rule))
}
}
/**
* Returns a copy of this node where `rule` has been recursively applied first to all of its
* children and then itself (post-order). When `rule` does not apply to a given node, it is left
* unchanged.
*
* @param rule the function use to transform this nodes children
*/
def transformUp(rule: PartialFunction[BaseType, BaseType]): BaseType = {
val afterRuleOnChildren = mapChildren(_.transformUp(rule))
val newNode = if (this fastEquals afterRuleOnChildren) {
CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(this, identity[BaseType])
}
} else {
CurrentOrigin.withOrigin(origin) {
rule.applyOrElse(afterRuleOnChildren, identity[BaseType])
}
}
// If the transform function replaces this node with a new one, carry over the tags.
newNode.copyTagsFrom(this)
newNode
}
/**
* Returns a copy of this node where `f` has been applied to all the nodes in `children`.
*/
def mapChildren(f: BaseType => BaseType): BaseType = {
if (containsChild.nonEmpty) {
mapChildren(f, forceCopy = false)
} else {
this
}
}
/**
* Returns a copy of this node where `f` has been applied to all the nodes in `children`.
* @param f The transform function to be applied on applicable `TreeNode` elements.
* @param forceCopy Whether to force making a copy of the nodes even if no child has been changed.
*/
private def mapChildren(
f: BaseType => BaseType,
forceCopy: Boolean): BaseType = {
var changed = false
def mapChild(child: Any): Any = child match {
case arg: TreeNode[_] if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
newChild
} else {
arg
}
case tuple @ (arg1: TreeNode[_], arg2: TreeNode[_]) =>
val newChild1 = if (containsChild(arg1)) {
f(arg1.asInstanceOf[BaseType])
} else {
arg1.asInstanceOf[BaseType]
}
val newChild2 = if (containsChild(arg2)) {
f(arg2.asInstanceOf[BaseType])
} else {
arg2.asInstanceOf[BaseType]
}
if (forceCopy || !(newChild1 fastEquals arg1) || !(newChild2 fastEquals arg2)) {
changed = true
(newChild1, newChild2)
} else {
tuple
}
case other => other
}
val newArgs = mapProductIterator {
case arg: TreeNode[_] if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
newChild
} else {
arg
}
case Some(arg: TreeNode[_]) if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
Some(newChild)
} else {
Some(arg)
}
// `map.mapValues().view.force` return `Map` in Scala 2.12 but return `IndexedSeq` in Scala
// 2.13, call `toMap` method manually to compatible with Scala 2.12 and Scala 2.13
case m: Map[_, _] => m.mapValues {
case arg: TreeNode[_] if containsChild(arg) =>
val newChild = f(arg.asInstanceOf[BaseType])
if (forceCopy || !(newChild fastEquals arg)) {
changed = true
newChild
} else {
arg
}
case other => other
}.view.force.toMap // `mapValues` is lazy and we need to force it to materialize
case d: DataType => d // Avoid unpacking Structs
case args: Stream[_] => args.map(mapChild).force // Force materialization on stream
case args: Iterable[_] => args.map(mapChild)
case nonChild: AnyRef => nonChild
case null => null
}
if (forceCopy || changed) makeCopy(newArgs, forceCopy) else this
}
/**
* Args to the constructor that should be copied, but not transformed.
* These are appended to the transformed args automatically by makeCopy
* @return
*/
protected def otherCopyArgs: Seq[AnyRef] = Nil
/**
* Creates a copy of this type of tree node after a transformation.
* Must be overridden by child classes that have constructor arguments
* that are not present in the productIterator.
* @param newArgs the new product arguments.
*/
def makeCopy(newArgs: Array[AnyRef]): BaseType = makeCopy(newArgs, allowEmptyArgs = false)
/**
* Creates a copy of this type of tree node after a transformation.
* Must be overridden by child classes that have constructor arguments
* that are not present in the productIterator.
* @param newArgs the new product arguments.
* @param allowEmptyArgs whether to allow argument list to be empty.
*/
private def makeCopy(
newArgs: Array[AnyRef],
allowEmptyArgs: Boolean): BaseType = attachTree(this, "makeCopy") {
val allCtors = getClass.getConstructors
if (newArgs.isEmpty && allCtors.isEmpty) {
// This is a singleton object which doesn't have any constructor. Just return `this` as we
// can't copy it.
return this
}
// Skip no-arg constructors that are just there for kryo.
val ctors = allCtors.filter(allowEmptyArgs || _.getParameterTypes.size != 0)
if (ctors.isEmpty) {
sys.error(s"No valid constructor for $nodeName")
}
val allArgs: Array[AnyRef] = if (otherCopyArgs.isEmpty) {
newArgs
} else {
newArgs ++ otherCopyArgs
}
val defaultCtor = ctors.find { ctor =>
if (ctor.getParameterTypes.length != allArgs.length) {
false
} else if (allArgs.contains(null)) {
// if there is a `null`, we can't figure out the class, therefore we should just fallback
// to older heuristic
false
} else {
val argsArray: Array[Class[_]] = allArgs.map(_.getClass)
ClassUtils.isAssignable(argsArray, ctor.getParameterTypes, true /* autoboxing */)
}
}.getOrElse(ctors.maxBy(_.getParameterTypes.length)) // fall back to older heuristic
try {
CurrentOrigin.withOrigin(origin) {
val res = defaultCtor.newInstance(allArgs.toArray: _*).asInstanceOf[BaseType]
res.copyTagsFrom(this)
res
}
} catch {
case e: java.lang.IllegalArgumentException =>
throw new TreeNodeException(
this,
s"""
|Failed to copy node.
|Is otherCopyArgs specified correctly for $nodeName.
|Exception message: ${e.getMessage}
|ctor: $defaultCtor?
|types: ${newArgs.map(_.getClass).mkString(", ")}
|args: ${newArgs.mkString(", ")}
""".stripMargin)
}
}
override def clone(): BaseType = {
mapChildren(_.clone(), forceCopy = true)
}
/**
* Returns the name of this type of TreeNode. Defaults to the class name.
* Note that we remove the "Exec" suffix for physical operators here.
*/
def nodeName: String = getClass.getSimpleName.replaceAll("Exec$", "")
/**
* The arguments that should be included in the arg string. Defaults to the `productIterator`.
*/
protected def stringArgs: Iterator[Any] = productIterator
private lazy val allChildren: Set[TreeNode[_]] = (children ++ innerChildren).toSet[TreeNode[_]]
/** Returns a string representing the arguments to this node, minus any children */
def argString(maxFields: Int): String = stringArgs.flatMap {
case tn: TreeNode[_] if allChildren.contains(tn) => Nil
case Some(tn: TreeNode[_]) if allChildren.contains(tn) => Nil
case Some(tn: TreeNode[_]) => tn.simpleString(maxFields) :: Nil
case tn: TreeNode[_] => tn.simpleString(maxFields) :: Nil
case seq: Seq[Any] if seq.toSet.subsetOf(allChildren.asInstanceOf[Set[Any]]) => Nil
case iter: Iterable[_] if iter.isEmpty => Nil
case seq: Seq[_] => truncatedString(seq, "[", ", ", "]", maxFields) :: Nil
case set: Set[_] => truncatedString(set.toSeq, "{", ", ", "}", maxFields) :: Nil
case array: Array[_] if array.isEmpty => Nil
case array: Array[_] => truncatedString(array, "[", ", ", "]", maxFields) :: Nil
case null => Nil
case None => Nil
case Some(null) => Nil
case Some(any) => any :: Nil
case table: CatalogTable =>
table.storage.serde match {
case Some(serde) => table.identifier :: serde :: Nil
case _ => table.identifier :: Nil
}
case other => other :: Nil
}.mkString(", ")
/**
* ONE line description of this node.
* @param maxFields Maximum number of fields that will be converted to strings.
* Any elements beyond the limit will be dropped.
*/
def simpleString(maxFields: Int): String = s"$nodeName ${argString(maxFields)}".trim
/**
* ONE line description of this node containing the node identifier.
* @return
*/
def simpleStringWithNodeId(): String
/** ONE line description of this node with more information */
def verboseString(maxFields: Int): String
/** ONE line description of this node with some suffix information */
def verboseStringWithSuffix(maxFields: Int): String = verboseString(maxFields)
override def toString: String = treeString
/** Returns a string representation of the nodes in this tree */
final def treeString: String = treeString(verbose = true)
final def treeString(
verbose: Boolean,
addSuffix: Boolean = false,
maxFields: Int = SQLConf.get.maxToStringFields,
printOperatorId: Boolean = false): String = {
val concat = new PlanStringConcat()
treeString(concat.append, verbose, addSuffix, maxFields, printOperatorId)
concat.toString
}
def treeString(
append: String => Unit,
verbose: Boolean,
addSuffix: Boolean,
maxFields: Int,
printOperatorId: Boolean): Unit = {
generateTreeString(0, Nil, append, verbose, "", addSuffix, maxFields, printOperatorId, 0)
}
/**
* Returns a string representation of the nodes in this tree, where each operator is numbered.
* The numbers can be used with [[TreeNode.apply]] to easily access specific subtrees.
*
* The numbers are based on depth-first traversal of the tree (with innerChildren traversed first
* before children).
*/
def numberedTreeString: String =
treeString.split("\\n").zipWithIndex.map { case (line, i) => f"$i%02d $line" }.mkString("\\n")
/**
* Returns the tree node at the specified number, used primarily for interactive debugging.
* Numbers for each node can be found in the [[numberedTreeString]].
*
* Note that this cannot return BaseType because logical plan's plan node might return
* physical plan for innerChildren, e.g. in-memory relation logical plan node has a reference
* to the physical plan node it is referencing.
*/
def apply(number: Int): TreeNode[_] = getNodeNumbered(new MutableInt(number)).orNull
/**
* Returns the tree node at the specified number, used primarily for interactive debugging.
* Numbers for each node can be found in the [[numberedTreeString]].
*
* This is a variant of [[apply]] that returns the node as BaseType (if the type matches).
*/
def p(number: Int): BaseType = apply(number).asInstanceOf[BaseType]
private def getNodeNumbered(number: MutableInt): Option[TreeNode[_]] = {
if (number.i < 0) {
None
} else if (number.i == 0) {
Some(this)
} else {
number.i -= 1
// Note that this traversal order must be the same as numberedTreeString.
innerChildren.map(_.getNodeNumbered(number)).find(_ != None).getOrElse {
children.map(_.getNodeNumbered(number)).find(_ != None).flatten
}
}
}
/**
* All the nodes that should be shown as a inner nested tree of this node.
* For example, this can be used to show sub-queries.
*/
def innerChildren: Seq[TreeNode[_]] = Seq.empty
/**
* Appends the string representation of this node and its children to the given Writer.
*
* The `i`-th element in `lastChildren` indicates whether the ancestor of the current node at
* depth `i + 1` is the last child of its own parent node. The depth of the root node is 0, and
* `lastChildren` for the root node should be empty.
*
* Note that this traversal (numbering) order must be the same as [[getNodeNumbered]].
*/
def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
append: String => Unit,
verbose: Boolean,
prefix: String = "",
addSuffix: Boolean = false,
maxFields: Int,
printNodeId: Boolean,
indent: Int = 0): Unit = {
append(" " * indent)
if (depth > 0) {
lastChildren.init.foreach { isLast =>
append(if (isLast) " " else ": ")
}
append(if (lastChildren.last) "+- " else ":- ")
}
val str = if (verbose) {
if (addSuffix) verboseStringWithSuffix(maxFields) else verboseString(maxFields)
} else {
if (printNodeId) {
simpleStringWithNodeId()
} else {
simpleString(maxFields)
}
}
append(prefix)
append(str)
append("\\n")
if (innerChildren.nonEmpty) {
innerChildren.init.foreach(_.generateTreeString(
depth + 2, lastChildren :+ children.isEmpty :+ false, append, verbose,
addSuffix = addSuffix, maxFields = maxFields, printNodeId = printNodeId, indent = indent))
innerChildren.last.generateTreeString(
depth + 2, lastChildren :+ children.isEmpty :+ true, append, verbose,
addSuffix = addSuffix, maxFields = maxFields, printNodeId = printNodeId, indent = indent)
}
if (children.nonEmpty) {
children.init.foreach(_.generateTreeString(
depth + 1, lastChildren :+ false, append, verbose, prefix, addSuffix,
maxFields, printNodeId = printNodeId, indent = indent)
)
children.last.generateTreeString(
depth + 1, lastChildren :+ true, append, verbose, prefix,
addSuffix, maxFields, printNodeId = printNodeId, indent = indent)
}
}
/**
* Returns a 'scala code' representation of this `TreeNode` and its children. Intended for use
* when debugging where the prettier toString function is obfuscating the actual structure. In the
* case of 'pure' `TreeNodes` that only contain primitives and other TreeNodes, the result can be
* pasted in the REPL to build an equivalent Tree.
*/
def asCode: String = {
val args = productIterator.map {
case tn: TreeNode[_] => tn.asCode
case s: String => "\\"" + s + "\\""
case other => other.toString
}
s"$nodeName(${args.mkString(",")})"
}
def toJSON: String = compact(render(jsonValue))
def prettyJson: String = pretty(render(jsonValue))
private def jsonValue: JValue = {
val jsonValues = scala.collection.mutable.ArrayBuffer.empty[JValue]
def collectJsonValue(tn: BaseType): Unit = {
val jsonFields = ("class" -> JString(tn.getClass.getName)) ::
("num-children" -> JInt(tn.children.length)) :: tn.jsonFields
jsonValues += JObject(jsonFields)
tn.children.foreach(collectJsonValue)
}
collectJsonValue(this)
jsonValues
}
protected def jsonFields: List[JField] = {
val fieldNames = getConstructorParameterNames(getClass)
val fieldValues = productIterator.toSeq ++ otherCopyArgs
assert(fieldNames.length == fieldValues.length, s"${getClass.getSimpleName} fields: " +
fieldNames.mkString(", ") + s", values: " + fieldValues.mkString(", "))
fieldNames.zip(fieldValues).map {
// If the field value is a child, then use an int to encode it, represents the index of
// this child in all children.
case (name, value: TreeNode[_]) if containsChild(value) =>
name -> JInt(children.indexOf(value))
case (name, value: Seq[BaseType]) if value.forall(containsChild) =>
name -> JArray(
value.map(v => JInt(children.indexOf(v.asInstanceOf[TreeNode[_]]))).toList
)
case (name, value) => name -> parseToJson(value)
}.toList
}
private def parseToJson(obj: Any): JValue = obj match {
case b: Boolean => JBool(b)
case b: Byte => JInt(b.toInt)
case s: Short => JInt(s.toInt)
case i: Int => JInt(i)
case l: Long => JInt(l)
case f: Float => JDouble(f)
case d: Double => JDouble(d)
case b: BigInt => JInt(b)
case null => JNull
case s: String => JString(s)
case u: UUID => JString(u.toString)
case dt: DataType => dt.jsonValue
// SPARK-17356: In usage of mllib, Metadata may store a huge vector of data, transforming
// it to JSON may trigger OutOfMemoryError.
case m: Metadata => Metadata.empty.jsonValue
case clazz: Class[_] => JString(clazz.getName)
case s: StorageLevel =>
("useDisk" -> s.useDisk) ~ ("useMemory" -> s.useMemory) ~ ("useOffHeap" -> s.useOffHeap) ~
("deserialized" -> s.deserialized) ~ ("replication" -> s.replication)
case n: TreeNode[_] => n.jsonValue
case o: Option[_] => o.map(parseToJson)
// Recursive scan Seq[TreeNode], Seq[Partitioning], Seq[DataType]
case t: Seq[_] if t.forall(_.isInstanceOf[TreeNode[_]]) ||
t.forall(_.isInstanceOf[Partitioning]) || t.forall(_.isInstanceOf[DataType]) =>
JArray(t.map(parseToJson).toList)
case t: Seq[_] if t.length > 0 && t.head.isInstanceOf[String] =>
JString(truncatedString(t, "[", ", ", "]", SQLConf.get.maxToStringFields))
case t: Seq[_] => JNull
case m: Map[_, _] => JNull
// if it's a scala object, we can simply keep the full class path.
// TODO: currently if the class name ends with "$", we think it's a scala object, there is
// probably a better way to check it.
case obj if obj.getClass.getName.endsWith("$") => "object" -> obj.getClass.getName
case p: Product if shouldConvertToJson(p) =>
try {
val fieldNames = getConstructorParameterNames(p.getClass)
val fieldValues = p.productIterator.toSeq
assert(fieldNames.length == fieldValues.length, s"${getClass.getSimpleName} fields: " +
fieldNames.mkString(", ") + s", values: " + fieldValues.mkString(", "))
("product-class" -> JString(p.getClass.getName)) :: fieldNames.zip(fieldValues).map {
case (name, value) => name -> parseToJson(value)
}.toList
} catch {
case _: RuntimeException => null
}
case _ => JNull
}
private def shouldConvertToJson(product: Product): Boolean = product match {
case exprId: ExprId => true
case field: StructField => true
case id: IdentifierWithDatabase => true
case alias: AliasIdentifier => true
case join: JoinType => true
case spec: BucketSpec => true
case catalog: CatalogTable => true
case partition: Partitioning => true
case resource: FunctionResource => true
case broadcast: BroadcastMode => true
case table: CatalogTableType => true
case storage: CatalogStorageFormat => true
case _ => false
}
}
| wzhfy/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/trees/TreeNode.scala | Scala | apache-2.0 | 30,301 |
package ghpages.examples
import ghpages.GhPagesMacros
import ghpages.examples.util.SingleSide
object CatsEffectExample {
// EXAMPLE:START
import cats.effect._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
// Cats Effect example code
// ========================
final case class Logger(log: String => SyncIO[Unit]) {
def apply[A](name: String, effect: IO[A]): IO[A] =
for {
_ <- log(s"[$name] Starting").to[IO]
a <- effect
_ <- log(s"[$name] Completed.").to[IO]
} yield a
}
// Hooks example
// =============
final case class CounterProps(initialCount: Int, logger: Logger)
val Counter = ScalaFnComponent.withHooks[CounterProps]
.useStateBy(_.initialCount)
.render { (props, state) =>
val inc: SyncIO[Unit] =
// Depending on which scalajs-react modules you're using, you'll use one of the following:
//
// 1. If you're using "core-ext-cats_effect" and "core", then:
state.withEffect[SyncIO].modState(_ + 1)
//
// 2. If you're using "core-bundle-cats_effect" instead of "core",
// then Cats Effect types are the defaults and you'd use:
// state.modState(_ + 1)
val incAndLog: IO[Unit] =
props.logger("counter", inc.to[IO])
<.div(
<.div("Counter: ", state.value),
<.button("Increment", ^.onClick --> incAndLog),
// Here we supply an IO[Unit] directly ^^^^^^
)
}
// Class Component example
// =======================
final class CounterAndLog($: BackendScope[Unit, String]) {
private val logger =
// As mentioned above, `.withEffect[SyncIO]` isn't needed when you've chosen Cats Effect as your default effect type
Logger(str => $.withEffect[SyncIO].modState(_ + "\\n" + str))
private val counter =
Counter(CounterProps(0, logger))
def render(state: String): VdomNode = {
<.div(
counter,
<.pre(
^.marginTop := 0.5.em,
^.width := 40.ex,
^.height := 20.em,
^.border := "1px solid",
state,
)
)
}
}
val CounterAndLog = ScalaComponent.builder[Unit]
.initialState("Ready.")
.renderBackend[CounterAndLog]
.build
// What about mounting?
// ====================
// Because mounting a component to DOM is something you only do once at the start of an application,
// there's no effectful support. If you're using `IOApp` or similar, you'd just wrap the mounting
// line of code in `IO { ... }`.
// EXAMPLE:END
def content = SingleSide.Content(source, main())
lazy val main = addIntro(CounterAndLog.withKey(_)(), _(
^.marginBottom := "2em",
"There are two ways of using ",
<.a(^.href := "https://typelevel.org/cats-effect", "Cats Effect"),
" directly with scalajs-react:",
<.ol(
<.li("Adding the ", <.code("core-ext-cats_effect"), " module before ", <.code("core"), " in your sbt dependencies. This adds support for Cats Effect but the default effect types (when scalajs-react provides ", <.em("you"), " with effects) still defaults to Callback."),
<.li("Using the ", <.code("core-bundle-cats_effect"), " module instead of ", <.code("core"), " in your sbt dependencies. This configure scalajs-react to use Cats Effect as the default effect types."),
),
"See the scalajs-react ",
<.a(^.href := "https://github.com/japgolly/scalajs-react/blob/master/doc/MODULES.md", "Modules Guide"),
" for more detail.",
))
val source = GhPagesMacros.exampleSource
}
| japgolly/scalajs-react | ghpages/src/main/scala/ghpages/examples/CatsEffectExample.scala | Scala | apache-2.0 | 3,595 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.common
import scalaz.syntax.semigroup._
import scalaz.syntax.order._
case class ColumnRef(selector: CPath, ctype: CType)
object ColumnRef {
def identity(ctype: CType) = ColumnRef(CPath.Identity, ctype)
implicit object order extends scalaz.Order[ColumnRef] {
def order(r1: ColumnRef, r2: ColumnRef): scalaz.Ordering = {
(r1.selector ?|? r2.selector) |+| (r1.ctype ?|? r2.ctype)
}
}
implicit val ordering: scala.math.Ordering[ColumnRef] = order.toScalaOrdering
}
| precog/platform | common/src/main/scala/com/precog/common/ColumnRef.scala | Scala | agpl-3.0 | 1,581 |
package app.components.custom
import app.WsClient
import app.actions.GlobalContextImpl
import app.components.custom.userspage.UsersPageMem
import japgolly.scalajs.react.Callback
import shared.api.ServerApi
import shared.dto.User
case class ThePageState(modState: (ThePageState => ThePageState) => Callback = null,
getState: () => ThePageState = null,
serverApi: WsClient[ServerApi] = null,
windowFuncMem: WindowFuncMem = WindowFuncMem(),
routerMem: RouterMem = RouterMem(),
loggedInAs: Option[User] = None,
usersPageMem: UsersPageMem = UsersPageMem()) extends WindowFunc with GlobalContextImpl with Router {
override protected def modWindowFuncMem(f: WindowFuncMem => WindowFuncMem): Callback =
modState(s => s.copy(windowFuncMem = f(s.windowFuncMem)))
override protected def modRouterMem(f: RouterMem => RouterMem): Callback =
modState(s => s.copy(routerMem = f(s.routerMem)))
override protected def modLoggedInAs(f: Option[User] => Option[User]) =
modState(s => s.copy(loggedInAs = f(s.loggedInAs)))
override protected def modUsersPageMem(f: UsersPageMem => UsersPageMem) =
modState(s => s.copy(usersPageMem = f(s.usersPageMem)))
}
| Igorocky/lesn | client/src/main/scala/app/components/custom/ThePageState.scala | Scala | mit | 1,309 |
package com.sksamuel.scapegoat
/**
* @author
* Stephen Samuel
*/
sealed trait Level
object Levels {
/**
* Errors indicate code that is potentially unsafe or likely to lead to bugs.
*
* An example is use of nulls. Use of nulls can lead to NullPointerExceptions and should be avoided.
*/
case object Error extends Level
/**
* Warnings are reserved for code that has bad semantics. This by itself does not necessarily mean the code
* is buggy, but could mean the developer made a mistake or does not fully understand the contructs or best
* practice.
*
* An example is an expression as a statement. While this is perfectly legal, it could indicate that the
* developer meant to assign the result to or otherwise use it.
*
* Another example is a constant if. You can do things like if (true) { } if you want, but since the block
* will always evaluate, the if statement perhaps indicates a mistake.
*/
case object Warning extends Level
/**
* Infos are used for code which is semantically fine, but there exists a more idomatic way of writing it.
*
* An example would be using an if statement to return true or false as the last statement in a block. Eg,
*
* {{{
* def foo = {
* if (a) true else false
* }
* }}}
*
* Can be re-written as
*
* def foo = a
*/
case object Info extends Level
def fromName(name: String): Level =
name.toLowerCase() match {
case "error" => Error
case "warning" => Warning
case "info" => Info
case _ => throw new IllegalArgumentException(s"Unrecognised level '$name'")
}
}
| sksamuel/scapegoat | src/main/scala/com/sksamuel/scapegoat/Level.scala | Scala | apache-2.0 | 1,649 |
package presistent
trait BinTreeNode[T <: AnyVal] {
def rootValue : Option[T]
def left : BinTreeNode[T]
def right : BinTreeNode[T]
def isEmpty : Boolean = rootValue.isEmpty
def hasChildren : Boolean = left.rootValue.isDefined || right.rootValue.isDefined
def contains(elem:T) : Boolean
def > (elem:T) : Boolean
def < (elem:T) : Boolean
def + (elem:T) : BinTreeNode[T]
def + (elem:Option[T]) : BinTreeNode[T]
def - (elem:T) : BinTreeNode[T]
def u (other:BinTreeNode[T]) : BinTreeNode[T]
def n (other:BinTreeNode[T]) : BinTreeNode[T]
} | rominavarela-scala/progfun1 | progfun1-week3/src/presistent/BinTreeNode.scala | Scala | mit | 563 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.core.config
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ProtocolConfigurationRegistrySpec extends Specification {
class FooProtocolConfiguration(val foo: String) extends ProtocolConfiguration
class BarProtocolConfiguration(val bar: String) extends ProtocolConfiguration
"building registry" should {
"return the configuration when 1 configuration" in {
ProtocolConfigurationRegistry(List(new FooProtocolConfiguration("foo"))).getProtocolConfiguration[FooProtocolConfiguration] must beSome.which(_.foo == "foo")
}
"return the configurations when 2 different configurations" in {
val registry = ProtocolConfigurationRegistry(List(new FooProtocolConfiguration("foo"), new BarProtocolConfiguration("bar")))
registry.getProtocolConfiguration[FooProtocolConfiguration] must beSome.which(_.foo == "foo")
registry.getProtocolConfiguration[BarProtocolConfiguration] must beSome.which(_.bar == "bar")
}
"not fail when no configuration" in {
ProtocolConfigurationRegistry(List.empty).getProtocolConfiguration[FooProtocolConfiguration] must beNone
}
"fail when multiple configurations of the same type" in {
ProtocolConfigurationRegistry(List(new FooProtocolConfiguration("foo1"), new FooProtocolConfiguration("foo2"))) must throwA[ExceptionInInitializerError]
}
}
} | Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-core/src/test/scala/com/excilys/ebi/gatling/core/config/ProtocolConfigurationRegistrySpec.scala | Scala | gpl-2.0 | 2,077 |
/*
* Copyright (c) 2014-2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package utils
package shredder
// Snowplow Common Enrich
import outputs.EnrichedEvent
// Specs2
import org.specs2.Specification
class ShredderSpec extends Specification /*with DataTables with ValidationMatchers*/ {
def is =
"This is a specification to test the Shredder functionality" ^
p ^
"makePartialHierarchy should initialize a partial TypeHierarchy" ! e1 ^
"shred should extract the JSONs from an unstructured event with multiple contexts" ! e2 ^
end
val EventId = "f81d4fae-7dec-11d0-a765-00a0c91e6bf6"
val CollectorTimestamp = "2014-04-29 09:00:54.000"
implicit val resolver = SpecHelpers.IgluResolver
def e1 =
Shredder.makePartialHierarchy(EventId, CollectorTimestamp) must_==
TypeHierarchy(rootId = EventId,
rootTstamp = CollectorTimestamp,
refRoot = "events",
refTree = List("events"),
refParent = "events")
def e2 = {
val event = new EnrichedEvent() match {
case e =>
e.event_id = EventId
e.collector_tstamp = CollectorTimestamp
e.unstruct_event =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0","data":{"schema":"iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-0","data":{"targetUrl":"http://snowplowanalytics.com/blog/page2","elementClasses":["next"]}}}"""
e.contexts =
"""{"schema":"iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0","data":[{"schema":"iglu:org.schema/WebPage/jsonschema/1-0-0","data":{"datePublished":"2014-07-23T00:00:00Z","author":"Jonathan Almeida","inLanguage":"en-US","genre":"blog","breadcrumb":["blog","releases"],"keywords":["snowplow","analytics","java","jvm","tracker"]}},{"schema":"iglu:org.schema/WebPage/jsonschema/1-0-0","data":{"datePublished":"2014-07-23T00:00:00Z","author":"Jonathan Almeida","inLanguage":"en-US","genre":"blog","breadcrumb":["blog","releases"],"keywords":["snowplow","analytics","java","jvm","tracker"]}}]}"""
e
}
// TODO: check actual contents (have already confirmed in REPL)
Shredder.shred(event).toOption.get must have size (3)
}
}
| TimothyKlim/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/utils/shredder/ShredderSpec.scala | Scala | apache-2.0 | 2,980 |
package edu.berkeley.nlp.entity
// Chunks are semi-inclusive intervals.
@SerialVersionUID(1L)
case class Chunk[T](val start: Int,
val end: Int,
val label: T);
object Chunk {
def seqify[T](chunk: Chunk[T]): Chunk[Seq[T]] = new Chunk(chunk.start, chunk.end, Seq(chunk.label));
} | malcolmgreaves/berkeley-entity | src/main/java/edu/berkeley/nlp/entity/Chunk.scala | Scala | gpl-3.0 | 320 |
package com.socrata.soda.server.wiremodels
import com.socrata.soda.server.responses.SodaResponse
import java.io.IOException
sealed abstract class ExtractResult[+T] {
def map[U](f: T => U): ExtractResult[U]
def flatMap[U](f: T => ExtractResult[U]): ExtractResult[U]
}
object ExtractResult {
def sequence[A](es: Seq[ExtractResult[A]]): ExtractResult[Seq[A]] = {
val result = Vector.newBuilder[A]
es.foreach {
case Extracted(a) => result += a
case failure: ExtractFailure => return failure
}
Extracted(result.result())
}
}
case class Extracted[T](value: T) extends ExtractResult[T] {
def map[U](f: T => U): Extracted[U] = Extracted(f(value))
def flatMap[U](f: T => ExtractResult[U]): ExtractResult[U] = f(value)
}
sealed abstract class ExtractFailure extends ExtractResult[Nothing] {
def map[U](f: Nothing => U): this.type = this
def flatMap[U](f: Nothing => ExtractResult[U]): this.type = this
}
case class IOProblem(error: IOException) extends ExtractFailure
case class RequestProblem(error: SodaResponse) extends ExtractFailure
| socrata-platform/soda-fountain | soda-fountain-lib/src/main/scala/com/socrata/soda/server/wiremodels/ExtractResult.scala | Scala | apache-2.0 | 1,078 |
package services.post
import java.net.URLEncoder
import play.api.libs.ws.Response
import play.api.mvc.RequestHeader
import services.auth.providers.Twitter
import services.comment.Commenter
import models.Comment
object TwitterPoster extends GenericPoster {
override val authProvider = Twitter
override def urlToPost(post: models.Post) =
"https://api.twitter.com/1.1/statuses/update.json?status=" + URLEncoder.encode(post.message, "UTF-8")
} | Froggies/Skimbo | app/services/post/TwitterPoster.scala | Scala | agpl-3.0 | 452 |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package org.abondar.scalabasic.typeparam
class Queue[T](
private[this] var leading: List[T],
private[this] var trailing: List[T]
) {
//queue data struct impl
def this() = this(Nil,Nil)
private def mirror() = if (leading.isEmpty) {
while(!trailing.isEmpty){
leading = trailing.head :: leading
trailing = trailing.tail
}
}
def head :T ={
mirror()
leading.head
}
def tail: Queue[T] = {
mirror()
new Queue(leading.tail,trailing)
}
def enqueue[U>:T](x:U) = new Queue[U](leading,x :: trailing)
}
object Queue{
def apply[T](xs:T*)= new Queue[T](xs.toList,Nil) //factory method to add
} | Dr762/ScalaBase | src/main/scala/org/abondar/scalabasic/typeparam/Queue.scala | Scala | apache-2.0 | 860 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller
import scala.concurrent.ExecutionContext
import scala.language.postfixOps
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import spray.http.HttpMethod
import spray.http.StatusCodes.InternalServerError
import spray.routing.Directive1
import spray.routing.Directives
import spray.routing.RequestContext
import whisk.common.Logging
import whisk.common.TransactionId
import whisk.core.entitlement._
import whisk.core.entitlement.Privilege.Privilege
import whisk.core.entitlement.Resource
import whisk.core.entity.EntityPath
import whisk.core.entity.Identity
import whisk.http.ErrorResponse.terminate
/** A trait for routes that require entitlement checks. */
trait BasicAuthorizedRouteProvider extends Directives with Logging {
/** An execution context for futures */
protected implicit val executionContext: ExecutionContext
/** An entitlement service to check access rights. */
protected val entitlementProvider: EntitlementProvider
/** The collection type for this trait. */
protected val collection: Collection
/** Route directives for API. The methods that are supported on the collection. */
protected lazy val collectionOps = pathEndOrSingleSlash & get
/** Route directives for API. The path prefix that identifies entity handlers. */
protected lazy val entityPrefix = pathPrefix(Segment)
/** Route directives for API. The methods that are supported on entities. */
protected lazy val entityOps = get
/** Checks entitlement and dispatches to handler if authorized. */
protected def authorizeAndDispatch(
method: HttpMethod,
user: Identity,
resource: Resource)(
implicit transid: TransactionId): RequestContext => Unit = {
val right = collection.determineRight(method, resource.entity)
onComplete(entitlementProvider.check(user, right, resource)) {
case Success(_) => dispatchOp(user, right, resource)
case Failure(t) => handleEntitlementFailure(t)
}
}
protected def handleEntitlementFailure(failure: Throwable)(
implicit transid: TransactionId): RequestContext => Unit = {
failure match {
case (r: RejectRequest) => terminate(r.code, r.message)
case t => terminate(InternalServerError)
}
}
/** Dispatches resource to the proper handler depending on context. */
protected def dispatchOp(
user: Identity,
op: Privilege,
resource: Resource)(
implicit transid: TransactionId): RequestContext => Unit
/** Extracts namespace for user from the matched path segment. */
protected def namespace(user: Identity, ns: String) = {
validate(isNamespace(ns), "namespace contains invalid characters") &
extract(_ => EntityPath(if (EntityPath(ns) == EntityPath.DEFAULT) user.namespace.asString else ns))
}
/** Extracts the HTTP method which is used to determine privilege for resource. */
protected val requestMethod = extract(_.request.method)
/** Confirms that a path segment is a valid namespace. Used to reject invalid namespaces. */
protected def isNamespace(n: String) = Try { EntityPath(n) } isSuccess
}
/**
* A common trait for entity routes that require entitlement checks,
* which share common collectionPrefix and entity operations.
*/
trait AuthorizedRouteProvider extends BasicAuthorizedRouteProvider {
/**
* Route directives for API.
* The default path prefix for the collection is one of
* '_/collection-path' matching an implicit namespace, or
* 'explicit-namespace/collection-path'.
*/
protected lazy val collectionPrefix = pathPrefix((EntityPath.DEFAULT.toString.r | Segment) / collection.path)
/** Route directives for API. The methods that are supported on entities. */
override protected lazy val entityOps = put | get | delete | post
/**
* Common REST API for Whisk Entities. Defines all the routes handled by this API. They are:
*
* GET namespace/entities[/] -- list all entities in namespace
* GET namespace/entities/name -- fetch entity by name from namespace
* PUT namespace/entities/name -- create or update entity by name from namespace with content
* DEL namespace/entities/name -- remove entity by name form namespace
* POST namespace/entities/name -- "activate" entity by name from namespace with content
*
* @param user the authenticated user for this route
*/
def routes(user: Identity)(implicit transid: TransactionId) = {
collectionPrefix { segment =>
namespace(user, segment) { ns =>
(collectionOps & requestMethod) {
// matched /namespace/collection
authorizeAndDispatch(_, user, Resource(ns, collection, None))
} ~ innerRoutes(user, ns)
}
}
}
/**
* Handles the inner routes of the collection. This allows customizing nested resources.
*/
protected def innerRoutes(user: Identity, ns: EntityPath)(implicit transid: TransactionId) = {
(entityPrefix & entityOps & requestMethod) { (segment, m) =>
// matched /namespace/collection/entity
(entityname(segment) & pathEnd) {
name => authorizeAndDispatch(m, user, Resource(ns, collection, Some(name)))
}
}
}
/** Extracts and validates entity name from the matched path segment. */
protected def entityname(segment: String): Directive1[String]
}
| nwspeete-ibm/openwhisk | core/controller/src/main/scala/whisk/core/controller/AuthorizedRouteDispatcher.scala | Scala | apache-2.0 | 6,220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector
import java.io.{BufferedReader, InputStreamReader, IOException}
import java.util
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table, TableCapability}
import org.apache.spark.sql.connector.catalog.TableCapability._
import org.apache.spark.sql.connector.read.{InputPartition, PartitionReader, PartitionReaderFactory, ScanBuilder}
import org.apache.spark.sql.connector.write._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.SerializableConfiguration
/**
* A HDFS based transactional writable data source.
* Each task writes data to `target/_temporary/uniqueId/$jobId-$partitionId-$attemptNumber`.
* Each job moves files from `target/_temporary/uniqueId/` to `target`.
*/
class SimpleWritableDataSource extends TestingV2Source {
class MyScanBuilder(path: String, conf: Configuration) extends SimpleScanBuilder {
override def planInputPartitions(): Array[InputPartition] = {
val dataPath = new Path(path)
val fs = dataPath.getFileSystem(conf)
if (fs.exists(dataPath)) {
fs.listStatus(dataPath).filterNot { status =>
val name = status.getPath.getName
name.startsWith("_") || name.startsWith(".")
}.map { f =>
CSVInputPartitionReader(f.getPath.toUri.toString)
}.toArray
} else {
Array.empty
}
}
override def createReaderFactory(): PartitionReaderFactory = {
val serializableConf = new SerializableConfiguration(conf)
new CSVReaderFactory(serializableConf)
}
}
class MyWriteBuilder(path: String, info: LogicalWriteInfo)
extends WriteBuilder with SupportsTruncate {
protected val queryId: String = info.queryId()
protected var needTruncate = false
override def truncate(): WriteBuilder = {
this.needTruncate = true
this
}
override def build(): Write = {
new Write {
override def toBatch: BatchWrite = {
val hadoopPath = new Path(path)
val hadoopConf = SparkContext.getActive.get.hadoopConfiguration
val fs = hadoopPath.getFileSystem(hadoopConf)
if (needTruncate) {
fs.delete(hadoopPath, true)
}
val pathStr = hadoopPath.toUri.toString
new MyBatchWrite(queryId, pathStr, hadoopConf)
}
}
}
}
class MyBatchWrite(queryId: String, path: String, conf: Configuration) extends BatchWrite {
override def createBatchWriterFactory(info: PhysicalWriteInfo): DataWriterFactory = {
SimpleCounter.resetCounter
new CSVDataWriterFactory(path, queryId, new SerializableConfiguration(conf))
}
override def onDataWriterCommit(message: WriterCommitMessage): Unit = {
SimpleCounter.increaseCounter
}
override def commit(messages: Array[WriterCommitMessage]): Unit = {
val finalPath = new Path(path)
val jobPath = new Path(new Path(finalPath, "_temporary"), queryId)
val fs = jobPath.getFileSystem(conf)
try {
for (file <- fs.listStatus(jobPath).map(_.getPath)) {
val dest = new Path(finalPath, file.getName)
if(!fs.rename(file, dest)) {
throw new IOException(s"failed to rename($file, $dest)")
}
}
} finally {
fs.delete(jobPath, true)
}
}
override def abort(messages: Array[WriterCommitMessage]): Unit = {
val jobPath = new Path(new Path(path, "_temporary"), queryId)
val fs = jobPath.getFileSystem(conf)
fs.delete(jobPath, true)
}
}
class MyTable(options: CaseInsensitiveStringMap)
extends SimpleBatchTable with SupportsWrite {
protected val path = options.get("path")
protected val conf = SparkContext.getActive.get.hadoopConfiguration
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new MyScanBuilder(new Path(path).toUri.toString, conf)
}
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
new MyWriteBuilder(path, info)
}
override def capabilities(): util.Set[TableCapability] =
util.EnumSet.of(BATCH_READ, BATCH_WRITE, TRUNCATE)
}
override def getTable(options: CaseInsensitiveStringMap): Table = {
new MyTable(options)
}
}
case class CSVInputPartitionReader(path: String) extends InputPartition
class CSVReaderFactory(conf: SerializableConfiguration)
extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val path = partition.asInstanceOf[CSVInputPartitionReader].path
val filePath = new Path(path)
val fs = filePath.getFileSystem(conf.value)
new PartitionReader[InternalRow] {
private val inputStream = fs.open(filePath)
private val lines = new BufferedReader(new InputStreamReader(inputStream))
.lines().iterator().asScala
private var currentLine: String = _
override def next(): Boolean = {
if (lines.hasNext) {
currentLine = lines.next()
true
} else {
false
}
}
override def get(): InternalRow = InternalRow(currentLine.split(",").map(_.trim.toInt): _*)
override def close(): Unit = {
inputStream.close()
}
}
}
}
private[connector] object SimpleCounter {
private var count: Int = 0
def increaseCounter: Unit = {
count += 1
}
def getCounter: Int = {
count
}
def resetCounter: Unit = {
count = 0
}
}
class CSVDataWriterFactory(path: String, jobId: String, conf: SerializableConfiguration)
extends DataWriterFactory {
override def createWriter(
partitionId: Int,
taskId: Long): DataWriter[InternalRow] = {
val jobPath = new Path(new Path(path, "_temporary"), jobId)
val filePath = new Path(jobPath, s"$jobId-$partitionId-$taskId")
val fs = filePath.getFileSystem(conf.value)
new CSVDataWriter(fs, filePath)
}
}
class CSVDataWriter(fs: FileSystem, file: Path) extends DataWriter[InternalRow] {
private val out = fs.create(file)
override def write(record: InternalRow): Unit = {
out.writeBytes(s"${record.getInt(0)},${record.getInt(1)}\n")
}
override def commit(): WriterCommitMessage = {
out.close()
null
}
override def abort(): Unit = {
try {
out.close()
} finally {
fs.delete(file, false)
}
}
override def close(): Unit = {}
}
| shaneknapp/spark | sql/core/src/test/scala/org/apache/spark/sql/connector/SimpleWritableDataSource.scala | Scala | apache-2.0 | 7,457 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "IncidentResponseManager"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
jdbc,
anorm,
"mysql" % "mysql-connector-java" % "5.1.18",
"com.typesafe" %% "play-plugins-mailer" % "2.1-RC2",
"org.apache.commons" % "commons-email" % "1.3"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// define the statements initially evaluated when entering 'console',
// 'console-quick', or 'console-project'
initialCommands := """
|// make app resources accessible
|Thread.currentThread.setContextClassLoader(getClass.getClassLoader)
|new play.core.StaticApplication(new java.io.File("."))
|import models._
|import scala.collection.JavaConversions._
""".stripMargin
)
}
| larryoatmeal/Incident-Response-Manager | project/Build.scala | Scala | apache-2.0 | 947 |
package cz.cvut.fit.cervamar.gatling.check
import cz.cvut.fit.cervamar.gatling.ResultCheck
import io.gatling.commons.validation.{Failure, Validation}
import io.gatling.core.check.CheckResult
import io.gatling.core.session.Session
import org.apache.tinkerpop.gremlin.driver.Result
import scala.collection.mutable
/**
* Created on 12/26/2017.
*
* @author Marek.Cervak
*/
case class SimpleResultCheck (func:List[Result] => Boolean) extends ResultCheck {
override def check(response:List[Result],session:Session)(implicit cache:mutable.Map[Any,Any]):Validation[CheckResult]={
if(func(response)){
CheckResult.NoopCheckResultSuccess
}else{
Failure("Gremlin Result check Failed")
}
}
} | cervamar/gremlin-gatling | src/main/scala/cz/cvut/fit/cervamar/gatling/check/SimpleResultCheck.scala | Scala | apache-2.0 | 718 |
/**
* Copyright (C) 2014 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.util
import org.apache.http.client.CookieStore
import org.orbeon.oxf.common.OXFException
import org.orbeon.oxf.externalcontext.{Credentials ⇒ _, _}
import org.orbeon.oxf.http.HttpMethod.GET
import org.orbeon.oxf.http._
import org.orbeon.oxf.pipeline.api.PipelineContext
import org.orbeon.oxf.webapp.ProcessorService
import scala.annotation.tailrec
// HTTP client for internal requests
//
// - no actual HTTP requests are performed
// - internal requests are made to the Orbeon servlet
object InternalHttpClient extends HttpClient {
def connect(
url : String,
credentials : Option[Credentials], // ignored
cookieStore : CookieStore, // ignored
method : HttpMethod,
headers : Map[String, List[String]],
content : Option[StreamedContent]
): HttpResponse = {
require(url.startsWith("/"), "InternalHttpClient only supports absolute paths")
val currentProcessorService =
ProcessorService.currentProcessorService.value getOrElse
(throw new OXFException(s"InternalHttpClient: missing current servlet or portlet connecting to $url."))
val incomingExternalContext = NetUtils.getExternalContext
val incomingRequest = incomingExternalContext.getRequest
// NOTE: Only `oxf:redirect` calls `Response.sendRedirect` with `isServerSide = true`. In turn, `oxf:redirect`
// is only called from the PFC with action results, and only passes `isServerSide = true` if
// `instance-passing = "forward"`, which is not the default. Form Runner doesn't make use of this. Even in that
// case data is passed as a URL parameter called `$instance` instead of using a request body. However, one user
// has reported needing this to work as of 2015-05.
@tailrec
def processRedirects(
pathQuery : String,
method : HttpMethod,
headers : Map[String, List[String]],
content : Option[StreamedContent]
): LocalResponse = {
val request =
new LocalRequest(
incomingRequest = incomingRequest,
contextPath = incomingRequest.getContextPath,
pathQuery = pathQuery,
method = method,
headersMaybeCapitalized = headers,
content = content
)
// Honor `Orbeon-Client` header (see also ServletExternalContext)
val urlRewriter =
Headers.firstHeaderIgnoreCase(headers, Headers.OrbeonClient) match {
case Some(client) if Headers.EmbeddedClientValues(client) ⇒
new WSRPURLRewriter(URLRewriterUtils.getPathMatchersCallable, request, wsrpEncodeResources = true)
case Some(_) ⇒
new ServletURLRewriter(request)
case None ⇒
incomingExternalContext.getResponse: URLRewriter
}
val response = new LocalResponse(urlRewriter)
currentProcessorService.service(
new PipelineContext,
new LocalExternalContext(
incomingExternalContext.getWebAppContext,
request,
response
)
)
// NOTE: It is unclear which headers should be passed upon redirect. For example, if we have a User-Agent
// header coming from the browser, it should be kept. But headers associated with content, such as
// Content-Type and Content-Length, must not be provided upon redirect. Possibly, only headers coming from
// the incoming request should be passed, minus content headers.
response.serverSideRedirect match {
case Some(location) ⇒ processRedirects(location, GET, Map.empty, None)
case None ⇒ response
}
}
val response = processRedirects(url, method, headers, content)
new HttpResponse {
lazy val statusCode = response.statusCode
lazy val headers = response.capitalizedHeaders
lazy val lastModified = Headers.firstDateHeaderIgnoreCase(headers, Headers.LastModified)
lazy val content = response.streamedContent
def disconnect() = content.close()
}
}
override def shutdown() = ()
}
| brunobuzzi/orbeon-forms | src/main/scala/org/orbeon/oxf/util/InternalHttpClient.scala | Scala | lgpl-2.1 | 4,787 |
package com.dslplatform.json
package runtime
import java.lang.reflect.Type
import scala.collection.mutable
final class ArrayBufferDecoder[E](
manifest: Type,
decoder: JsonReader.ReadObject[E],
empty: () => scala.collection.Iterable[E],
finalize: mutable.ArrayBuffer[E] => scala.collection.Iterable[E]
) extends JsonReader.ReadObject[scala.collection.Iterable[E]] {
require(manifest ne null, "manifest can't be null")
require(decoder ne null, "decoder can't be null")
require(finalize ne null, "finalize can't be null")
override def read(reader: JsonReader[_]): scala.collection.Iterable[E] = {
if (reader.last != '[') {
throw reader.newParseError("Expecting '[' for array start")
}
if (reader.getNextToken() == ']') {
empty()
} else {
val buffer = new mutable.ArrayBuffer[E](4)
buffer += decoder.read(reader)
while (reader.getNextToken() == ',') {
reader.getNextToken()
buffer += decoder.read(reader)
}
if (reader.last() != ']') {
throw reader.newParseError("Expecting ']' for array end")
}
finalize(buffer)
}
}
}
| ngs-doo/dsl-json | scala/src/main/scala/com/dslplatform/json/runtime/ArrayBufferDecoder.scala | Scala | bsd-3-clause | 1,173 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.api
import _root_.scalapb.internal.compat.JavaConverters._
/** Api is a light-weight descriptor for an API Interface.
*
* Interfaces are also described as "protocol buffer services" in some contexts,
* such as by the "service" keyword in a .proto file, but they are different
* from API Services, which represent a concrete implementation of an interface
* as opposed to simply a description of methods and bindings. They are also
* sometimes simply referred to as "APIs" in other contexts, such as the name of
* this message itself. See https://cloud.google.com/apis/design/glossary for
* detailed terminology.
*
* @param name
* The fully qualified name of this interface, including package name
* followed by the interface's simple name.
* @param methods
* The methods of this interface, in unspecified order.
* @param options
* Any metadata attached to the interface.
* @param version
* A version string for this interface. If specified, must have the form
* `major-version.minor-version`, as in `1.10`. If the minor version is
* omitted, it defaults to zero. If the entire version field is empty, the
* major version is derived from the package name, as outlined below. If the
* field is not empty, the version in the package name will be verified to be
* consistent with what is provided here.
*
* The versioning schema uses [semantic
* versioning](http://semver.org) where the major version number
* indicates a breaking change and the minor version an additive,
* non-breaking change. Both version numbers are signals to users
* what to expect from different versions, and should be carefully
* chosen based on the product plan.
*
* The major version is also reflected in the package name of the
* interface, which must end in `v<major-version>`, as in
* `google.feature.v1`. For major versions 0 and 1, the suffix can
* be omitted. Zero major versions must only be used for
* experimental, non-GA interfaces.
* @param sourceContext
* Source context for the protocol buffer service represented by this
* message.
* @param mixins
* Included interfaces. See [Mixin][].
* @param syntax
* The source syntax of the service.
*/
@SerialVersionUID(0L)
final case class Api(
name: _root_.scala.Predef.String = "",
methods: _root_.scala.Seq[com.google.protobuf.api.Method] = _root_.scala.Seq.empty,
options: _root_.scala.Seq[com.google.protobuf.`type`.OptionProto] = _root_.scala.Seq.empty,
version: _root_.scala.Predef.String = "",
sourceContext: _root_.scala.Option[com.google.protobuf.source_context.SourceContext] = _root_.scala.None,
mixins: _root_.scala.Seq[com.google.protobuf.api.Mixin] = _root_.scala.Seq.empty,
syntax: com.google.protobuf.`type`.Syntax = com.google.protobuf.`type`.Syntax.SYNTAX_PROTO2,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[Api] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
{
val __value = name
if (!__value.isEmpty) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(1, __value)
}
};
methods.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
options.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
{
val __value = version
if (!__value.isEmpty) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(4, __value)
}
};
if (sourceContext.isDefined) {
val __value = sourceContext.get
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
};
mixins.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
{
val __value = syntax.value
if (__value != 0) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeEnumSize(7, __value)
}
};
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = name
if (!__v.isEmpty) {
_output__.writeString(1, __v)
}
};
methods.foreach { __v =>
val __m = __v
_output__.writeTag(2, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
options.foreach { __v =>
val __m = __v
_output__.writeTag(3, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
{
val __v = version
if (!__v.isEmpty) {
_output__.writeString(4, __v)
}
};
sourceContext.foreach { __v =>
val __m = __v
_output__.writeTag(5, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
mixins.foreach { __v =>
val __m = __v
_output__.writeTag(6, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
{
val __v = syntax.value
if (__v != 0) {
_output__.writeEnum(7, __v)
}
};
unknownFields.writeTo(_output__)
}
def withName(__v: _root_.scala.Predef.String): Api = copy(name = __v)
def clearMethods = copy(methods = _root_.scala.Seq.empty)
def addMethods(__vs: com.google.protobuf.api.Method*): Api = addAllMethods(__vs)
def addAllMethods(__vs: Iterable[com.google.protobuf.api.Method]): Api = copy(methods = methods ++ __vs)
def withMethods(__v: _root_.scala.Seq[com.google.protobuf.api.Method]): Api = copy(methods = __v)
def clearOptions = copy(options = _root_.scala.Seq.empty)
def addOptions(__vs: com.google.protobuf.`type`.OptionProto*): Api = addAllOptions(__vs)
def addAllOptions(__vs: Iterable[com.google.protobuf.`type`.OptionProto]): Api = copy(options = options ++ __vs)
def withOptions(__v: _root_.scala.Seq[com.google.protobuf.`type`.OptionProto]): Api = copy(options = __v)
def withVersion(__v: _root_.scala.Predef.String): Api = copy(version = __v)
def getSourceContext: com.google.protobuf.source_context.SourceContext = sourceContext.getOrElse(com.google.protobuf.source_context.SourceContext.defaultInstance)
def clearSourceContext: Api = copy(sourceContext = _root_.scala.None)
def withSourceContext(__v: com.google.protobuf.source_context.SourceContext): Api = copy(sourceContext = Option(__v))
def clearMixins = copy(mixins = _root_.scala.Seq.empty)
def addMixins(__vs: com.google.protobuf.api.Mixin*): Api = addAllMixins(__vs)
def addAllMixins(__vs: Iterable[com.google.protobuf.api.Mixin]): Api = copy(mixins = mixins ++ __vs)
def withMixins(__v: _root_.scala.Seq[com.google.protobuf.api.Mixin]): Api = copy(mixins = __v)
def withSyntax(__v: com.google.protobuf.`type`.Syntax): Api = copy(syntax = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => {
val __t = name
if (__t != "") __t else null
}
case 2 => methods
case 3 => options
case 4 => {
val __t = version
if (__t != "") __t else null
}
case 5 => sourceContext.orNull
case 6 => mixins
case 7 => {
val __t = syntax.javaValueDescriptor
if (__t.getNumber() != 0) __t else null
}
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PString(name)
case 2 => _root_.scalapb.descriptors.PRepeated(methods.iterator.map(_.toPMessage).toVector)
case 3 => _root_.scalapb.descriptors.PRepeated(options.iterator.map(_.toPMessage).toVector)
case 4 => _root_.scalapb.descriptors.PString(version)
case 5 => sourceContext.map(_.toPMessage).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 6 => _root_.scalapb.descriptors.PRepeated(mixins.iterator.map(_.toPMessage).toVector)
case 7 => _root_.scalapb.descriptors.PEnum(syntax.scalaValueDescriptor)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.api.Api
}
object Api extends scalapb.GeneratedMessageCompanion[com.google.protobuf.api.Api] with scalapb.JavaProtoSupport[com.google.protobuf.api.Api, com.google.protobuf.Api] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.api.Api] with scalapb.JavaProtoSupport[com.google.protobuf.api.Api, com.google.protobuf.Api] = this
def toJavaProto(scalaPbSource: com.google.protobuf.api.Api): com.google.protobuf.Api = {
val javaPbOut = com.google.protobuf.Api.newBuilder
javaPbOut.setName(scalaPbSource.name)
javaPbOut.addAllMethods(_root_.scalapb.internal.compat.toIterable(scalaPbSource.methods.iterator.map(com.google.protobuf.api.Method.toJavaProto)).asJava)
javaPbOut.addAllOptions(_root_.scalapb.internal.compat.toIterable(scalaPbSource.options.iterator.map(com.google.protobuf.`type`.OptionProto.toJavaProto)).asJava)
javaPbOut.setVersion(scalaPbSource.version)
scalaPbSource.sourceContext.map(com.google.protobuf.source_context.SourceContext.toJavaProto).foreach(javaPbOut.setSourceContext)
javaPbOut.addAllMixins(_root_.scalapb.internal.compat.toIterable(scalaPbSource.mixins.iterator.map(com.google.protobuf.api.Mixin.toJavaProto)).asJava)
javaPbOut.setSyntaxValue(scalaPbSource.syntax.value)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.Api): com.google.protobuf.api.Api = com.google.protobuf.api.Api(
name = javaPbSource.getName,
methods = javaPbSource.getMethodsList.asScala.iterator.map(com.google.protobuf.api.Method.fromJavaProto).toSeq,
options = javaPbSource.getOptionsList.asScala.iterator.map(com.google.protobuf.`type`.OptionProto.fromJavaProto).toSeq,
version = javaPbSource.getVersion,
sourceContext = if (javaPbSource.hasSourceContext) Some(com.google.protobuf.source_context.SourceContext.fromJavaProto(javaPbSource.getSourceContext)) else _root_.scala.None,
mixins = javaPbSource.getMixinsList.asScala.iterator.map(com.google.protobuf.api.Mixin.fromJavaProto).toSeq,
syntax = com.google.protobuf.`type`.Syntax.fromValue(javaPbSource.getSyntaxValue.intValue)
)
def merge(`_message__`: com.google.protobuf.api.Api, `_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.api.Api = {
var __name = `_message__`.name
val __methods = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.api.Method] ++= `_message__`.methods)
val __options = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.`type`.OptionProto] ++= `_message__`.options)
var __version = `_message__`.version
var __sourceContext = `_message__`.sourceContext
val __mixins = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.api.Mixin] ++= `_message__`.mixins)
var __syntax = `_message__`.syntax
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__name = _input__.readStringRequireUtf8()
case 18 =>
__methods += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.api.Method.defaultInstance)
case 26 =>
__options += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.`type`.OptionProto.defaultInstance)
case 34 =>
__version = _input__.readStringRequireUtf8()
case 42 =>
__sourceContext = Option(_root_.scalapb.LiteParser.readMessage(_input__, __sourceContext.getOrElse(com.google.protobuf.source_context.SourceContext.defaultInstance)))
case 50 =>
__mixins += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.api.Mixin.defaultInstance)
case 56 =>
__syntax = com.google.protobuf.`type`.Syntax.fromValue(_input__.readEnum())
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.api.Api(
name = __name,
methods = __methods.result(),
options = __options.result(),
version = __version,
sourceContext = __sourceContext,
mixins = __mixins.result(),
syntax = __syntax,
unknownFields = if (_unknownFields__ == null) _message__.unknownFields else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.api.Api] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.api.Api(
name = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""),
methods = __fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).map(_.as[_root_.scala.Seq[com.google.protobuf.api.Method]]).getOrElse(_root_.scala.Seq.empty),
options = __fieldsMap.get(scalaDescriptor.findFieldByNumber(3).get).map(_.as[_root_.scala.Seq[com.google.protobuf.`type`.OptionProto]]).getOrElse(_root_.scala.Seq.empty),
version = __fieldsMap.get(scalaDescriptor.findFieldByNumber(4).get).map(_.as[_root_.scala.Predef.String]).getOrElse(""),
sourceContext = __fieldsMap.get(scalaDescriptor.findFieldByNumber(5).get).flatMap(_.as[_root_.scala.Option[com.google.protobuf.source_context.SourceContext]]),
mixins = __fieldsMap.get(scalaDescriptor.findFieldByNumber(6).get).map(_.as[_root_.scala.Seq[com.google.protobuf.api.Mixin]]).getOrElse(_root_.scala.Seq.empty),
syntax = com.google.protobuf.`type`.Syntax.fromValue(__fieldsMap.get(scalaDescriptor.findFieldByNumber(7).get).map(_.as[_root_.scalapb.descriptors.EnumValueDescriptor]).getOrElse(com.google.protobuf.`type`.Syntax.SYNTAX_PROTO2.scalaValueDescriptor).number)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = ApiProto.javaDescriptor.getMessageTypes().get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = ApiProto.scalaDescriptor.messages(0)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 2 => __out = com.google.protobuf.api.Method
case 3 => __out = com.google.protobuf.`type`.OptionProto
case 5 => __out = com.google.protobuf.source_context.SourceContext
case 6 => __out = com.google.protobuf.api.Mixin
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 7 => com.google.protobuf.`type`.Syntax
}
}
lazy val defaultInstance = com.google.protobuf.api.Api(
name = "",
methods = _root_.scala.Seq.empty,
options = _root_.scala.Seq.empty,
version = "",
sourceContext = _root_.scala.None,
mixins = _root_.scala.Seq.empty,
syntax = com.google.protobuf.`type`.Syntax.SYNTAX_PROTO2
)
implicit class ApiLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.api.Api]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.api.Api](_l) {
def name: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.name)((c_, f_) => c_.copy(name = f_))
def methods: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.api.Method]] = field(_.methods)((c_, f_) => c_.copy(methods = f_))
def options: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.`type`.OptionProto]] = field(_.options)((c_, f_) => c_.copy(options = f_))
def version: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.version)((c_, f_) => c_.copy(version = f_))
def sourceContext: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.source_context.SourceContext] = field(_.getSourceContext)((c_, f_) => c_.copy(sourceContext = Option(f_)))
def optionalSourceContext: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[com.google.protobuf.source_context.SourceContext]] = field(_.sourceContext)((c_, f_) => c_.copy(sourceContext = f_))
def mixins: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.api.Mixin]] = field(_.mixins)((c_, f_) => c_.copy(mixins = f_))
def syntax: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.`type`.Syntax] = field(_.syntax)((c_, f_) => c_.copy(syntax = f_))
}
final val NAME_FIELD_NUMBER = 1
final val METHODS_FIELD_NUMBER = 2
final val OPTIONS_FIELD_NUMBER = 3
final val VERSION_FIELD_NUMBER = 4
final val SOURCE_CONTEXT_FIELD_NUMBER = 5
final val MIXINS_FIELD_NUMBER = 6
final val SYNTAX_FIELD_NUMBER = 7
def of(
name: _root_.scala.Predef.String,
methods: _root_.scala.Seq[com.google.protobuf.api.Method],
options: _root_.scala.Seq[com.google.protobuf.`type`.OptionProto],
version: _root_.scala.Predef.String,
sourceContext: _root_.scala.Option[com.google.protobuf.source_context.SourceContext],
mixins: _root_.scala.Seq[com.google.protobuf.api.Mixin],
syntax: com.google.protobuf.`type`.Syntax
): _root_.com.google.protobuf.api.Api = _root_.com.google.protobuf.api.Api(
name,
methods,
options,
version,
sourceContext,
mixins,
syntax
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.Api])
}
| trueaccord/ScalaPB | scalapb-runtime/src/main/scalajvm/com/google/protobuf/api/Api.scala | Scala | apache-2.0 | 19,746 |
package com.twitter.finagle.http
import com.twitter.finagle.benchmark.StdBenchAnnotations
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra.Blackhole
import scala.util.Random
@State(Scope.Benchmark)
abstract class HeaderMapBenchmark extends StdBenchAnnotations {
protected def newMap(): HeaderMap
// We supply 18 random strings of the length of 14 and build a 9-element
// header map of them. The 10th element is foo -> bar so we can reliably
// query it in the benchmark.
private val map = Iterator.fill(9 * 2)(Random.alphanumeric.take(14).mkString)
.grouped(2)
.foldLeft(newMap())((map, h) => map.add(h.head, h.last))
.add("Content-Length", "100")
@Benchmark
def create(): HeaderMap = newMap()
@Benchmark
def get(): Option[String] = map.get("Content-Length")
@Benchmark
def createAndAdd(): HeaderMap = newMap().add("Content-Length", "100")
@Benchmark
def iterate(b: Blackhole): Unit = map.foreach(h => b.consume(h))
}
class DefaultHeaderMapBenchmark extends HeaderMapBenchmark {
protected def newMap(): HeaderMap = HeaderMap()
}
| mkhq/finagle | finagle-benchmark/src/main/scala/com/twitter/finagle/http/HeaderMapBenchmark.scala | Scala | apache-2.0 | 1,096 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.api.java
import java.util.{List => JList}
import java.lang.{Long => JLong}
import scala.collection.JavaConversions._
import scala.reflect.ClassTag
import org.apache.spark.streaming._
import org.apache.spark.api.java.{JavaPairRDD, JavaRDDLike, JavaRDD}
import org.apache.spark.api.java.function.{Function => JFunction, Function2 => JFunction2}
import org.apache.spark.api.java.function.{Function3 => JFunction3, _}
import java.util
import org.apache.spark.rdd.RDD
import JavaDStream._
import org.apache.spark.streaming.dstream.DStream
trait JavaDStreamLike[T, This <: JavaDStreamLike[T, This, R], R <: JavaRDDLike[T, R]]
extends Serializable {
implicit val classTag: ClassTag[T]
def dstream: DStream[T]
def wrapRDD(in: RDD[T]): R
implicit def scalaIntToJavaLong(in: DStream[Long]): JavaDStream[JLong] = {
in.map(new JLong(_))
}
/**
* Print the first ten elements of each RDD generated in this DStream. This is an output
* operator, so this DStream will be registered as an output stream and there materialized.
*/
def print() = dstream.print()
/**
* Return a new DStream in which each RDD has a single element generated by counting each RDD
* of this DStream.
*/
def count(): JavaDStream[JLong] = dstream.count()
/**
* Return a new DStream in which each RDD contains the counts of each distinct value in
* each RDD of this DStream. Hash partitioning is used to generate the RDDs with
* Spark's default number of partitions.
*/
def countByValue(): JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(dstream.countByValue())
}
/**
* Return a new DStream in which each RDD contains the counts of each distinct value in
* each RDD of this DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
* @param numPartitions number of partitions of each RDD in the new DStream.
*/
def countByValue(numPartitions: Int): JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(dstream.countByValue(numPartitions))
}
/**
* Return a new DStream in which each RDD has a single element generated by counting the number
* of elements in a window over this DStream. windowDuration and slideDuration are as defined in the
* window() operation. This is equivalent to window(windowDuration, slideDuration).count()
*/
def countByWindow(windowDuration: Duration, slideDuration: Duration) : JavaDStream[JLong] = {
dstream.countByWindow(windowDuration, slideDuration)
}
/**
* Return a new DStream in which each RDD contains the count of distinct elements in
* RDDs in a sliding window over this DStream. Hash partitioning is used to generate the RDDs with
* Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def countByValueAndWindow(windowDuration: Duration, slideDuration: Duration)
: JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(
dstream.countByValueAndWindow(windowDuration, slideDuration))
}
/**
* Return a new DStream in which each RDD contains the count of distinct elements in
* RDDs in a sliding window over this DStream. Hash partitioning is used to generate the RDDs with `numPartitions`
* partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions number of partitions of each RDD in the new DStream.
*/
def countByValueAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int)
: JavaPairDStream[T, JLong] = {
JavaPairDStream.scalaToJavaLong(
dstream.countByValueAndWindow(windowDuration, slideDuration, numPartitions))
}
/**
* Return a new DStream in which each RDD is generated by applying glom() to each RDD of
* this DStream. Applying glom() to an RDD coalesces all elements within each partition into
* an array.
*/
def glom(): JavaDStream[JList[T]] = {
new JavaDStream(dstream.glom().map(x => new java.util.ArrayList[T](x.toSeq)))
}
/** Return the [[org.apache.spark.streaming.StreamingContext]] associated with this DStream */
def context(): StreamingContext = dstream.context()
/** Return a new DStream by applying a function to all elements of this DStream. */
def map[R](f: JFunction[T, R]): JavaDStream[R] = {
new JavaDStream(dstream.map(f)(f.returnType()))(f.returnType())
}
/** Return a new DStream by applying a function to all elements of this DStream. */
def map[K2, V2](f: PairFunction[T, K2, V2]): JavaPairDStream[K2, V2] = {
def cm = implicitly[ClassTag[Tuple2[_, _]]].asInstanceOf[ClassTag[Tuple2[K2, V2]]]
new JavaPairDStream(dstream.map(f)(cm))(f.keyType(), f.valueType())
}
/**
* Return a new DStream by applying a function to all elements of this DStream,
* and then flattening the results
*/
def flatMap[U](f: FlatMapFunction[T, U]): JavaDStream[U] = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.apply(x).asScala
new JavaDStream(dstream.flatMap(fn)(f.elementType()))(f.elementType())
}
/**
* Return a new DStream by applying a function to all elements of this DStream,
* and then flattening the results
*/
def flatMap[K2, V2](f: PairFlatMapFunction[T, K2, V2]): JavaPairDStream[K2, V2] = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.apply(x).asScala
def cm = implicitly[ClassTag[Tuple2[_, _]]].asInstanceOf[ClassTag[Tuple2[K2, V2]]]
new JavaPairDStream(dstream.flatMap(fn)(cm))(f.keyType(), f.valueType())
}
/**
* Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs
* of this DStream. Applying mapPartitions() to an RDD applies a function to each partition
* of the RDD.
*/
def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaDStream[U] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
new JavaDStream(dstream.mapPartitions(fn)(f.elementType()))(f.elementType())
}
/**
* Return a new DStream in which each RDD is generated by applying mapPartitions() to each RDDs
* of this DStream. Applying mapPartitions() to an RDD applies a function to each partition
* of the RDD.
*/
def mapPartitions[K2, V2](f: PairFlatMapFunction[java.util.Iterator[T], K2, V2])
: JavaPairDStream[K2, V2] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
new JavaPairDStream(dstream.mapPartitions(fn))(f.keyType(), f.valueType())
}
/**
* Return a new DStream in which each RDD has a single element generated by reducing each RDD
* of this DStream.
*/
def reduce(f: JFunction2[T, T, T]): JavaDStream[T] = dstream.reduce(f)
/**
* Return a new DStream in which each RDD has a single element generated by reducing all
* elements in a sliding window over this DStream.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByWindow(
reduceFunc: (T, T) => T,
windowDuration: Duration,
slideDuration: Duration
): DStream[T] = {
dstream.reduceByWindow(reduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream in which each RDD has a single element generated by reducing all
* elements in a sliding window over this DStream. However, the reduction is done incrementally
* using the old window's reduced value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient than reduceByWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByWindow(
reduceFunc: JFunction2[T, T, T],
invReduceFunc: JFunction2[T, T, T],
windowDuration: Duration,
slideDuration: Duration
): JavaDStream[T] = {
dstream.reduceByWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration)
}
/**
* Return all the RDDs between 'fromDuration' to 'toDuration' (both included)
*/
def slice(fromTime: Time, toTime: Time): JList[R] = {
new util.ArrayList(dstream.slice(fromTime, toTime).map(wrapRDD(_)).toSeq)
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*
* @deprecated As of release 0.9.0, replaced by foreachRDD
*/
@Deprecated
def foreach(foreachFunc: JFunction[R, Void]) {
foreachRDD(foreachFunc)
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*
* @deprecated As of release 0.9.0, replaced by foreachRDD
*/
@Deprecated
def foreach(foreachFunc: JFunction2[R, Time, Void]) {
foreachRDD(foreachFunc)
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*/
def foreachRDD(foreachFunc: JFunction[R, Void]) {
dstream.foreachRDD(rdd => foreachFunc.call(wrapRDD(rdd)))
}
/**
* Apply a function to each RDD in this DStream. This is an output operator, so
* 'this' DStream will be registered as an output stream and therefore materialized.
*/
def foreachRDD(foreachFunc: JFunction2[R, Time, Void]) {
dstream.foreachRDD((rdd, time) => foreachFunc.call(wrapRDD(rdd), time))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transform[U](transformFunc: JFunction[R, JavaRDD[U]]): JavaDStream[U] = {
implicit val cm: ClassTag[U] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[U]]
def scalaTransform (in: RDD[T]): RDD[U] =
transformFunc.call(wrapRDD(in)).rdd
dstream.transform(scalaTransform(_))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transform[U](transformFunc: JFunction2[R, Time, JavaRDD[U]]): JavaDStream[U] = {
implicit val cm: ClassTag[U] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[U]]
def scalaTransform (in: RDD[T], time: Time): RDD[U] =
transformFunc.call(wrapRDD(in), time).rdd
dstream.transform(scalaTransform(_, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transform[K2, V2](transformFunc: JFunction[R, JavaPairRDD[K2, V2]]):
JavaPairDStream[K2, V2] = {
implicit val cmk: ClassTag[K2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K2]]
implicit val cmv: ClassTag[V2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V2]]
def scalaTransform (in: RDD[T]): RDD[(K2, V2)] =
transformFunc.call(wrapRDD(in)).rdd
dstream.transform(scalaTransform(_))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream.
*/
def transform[K2, V2](transformFunc: JFunction2[R, Time, JavaPairRDD[K2, V2]]):
JavaPairDStream[K2, V2] = {
implicit val cmk: ClassTag[K2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K2]]
implicit val cmv: ClassTag[V2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V2]]
def scalaTransform (in: RDD[T], time: Time): RDD[(K2, V2)] =
transformFunc.call(wrapRDD(in), time).rdd
dstream.transform(scalaTransform(_, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWith[U, W](
other: JavaDStream[U],
transformFunc: JFunction3[R, JavaRDD[U], Time, JavaRDD[W]]
): JavaDStream[W] = {
implicit val cmu: ClassTag[U] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[U]]
implicit val cmv: ClassTag[W] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[W]]
def scalaTransform (inThis: RDD[T], inThat: RDD[U], time: Time): RDD[W] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[U, W](other.dstream, scalaTransform(_, _, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWith[U, K2, V2](
other: JavaDStream[U],
transformFunc: JFunction3[R, JavaRDD[U], Time, JavaPairRDD[K2, V2]]
): JavaPairDStream[K2, V2] = {
implicit val cmu: ClassTag[U] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[U]]
implicit val cmk2: ClassTag[K2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K2]]
implicit val cmv2: ClassTag[V2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V2]]
def scalaTransform (inThis: RDD[T], inThat: RDD[U], time: Time): RDD[(K2, V2)] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[U, (K2, V2)](other.dstream, scalaTransform(_, _, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWith[K2, V2, W](
other: JavaPairDStream[K2, V2],
transformFunc: JFunction3[R, JavaPairRDD[K2, V2], Time, JavaRDD[W]]
): JavaDStream[W] = {
implicit val cmk2: ClassTag[K2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K2]]
implicit val cmv2: ClassTag[V2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V2]]
implicit val cmw: ClassTag[W] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[W]]
def scalaTransform (inThis: RDD[T], inThat: RDD[(K2, V2)], time: Time): RDD[W] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[(K2, V2), W](other.dstream, scalaTransform(_, _, _))
}
/**
* Return a new DStream in which each RDD is generated by applying a function
* on each RDD of 'this' DStream and 'other' DStream.
*/
def transformWith[K2, V2, K3, V3](
other: JavaPairDStream[K2, V2],
transformFunc: JFunction3[R, JavaPairRDD[K2, V2], Time, JavaPairRDD[K3, V3]]
): JavaPairDStream[K3, V3] = {
implicit val cmk2: ClassTag[K2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K2]]
implicit val cmv2: ClassTag[V2] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V2]]
implicit val cmk3: ClassTag[K3] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[K3]]
implicit val cmv3: ClassTag[V3] =
implicitly[ClassTag[AnyRef]].asInstanceOf[ClassTag[V3]]
def scalaTransform (inThis: RDD[T], inThat: RDD[(K2, V2)], time: Time): RDD[(K3, V3)] =
transformFunc.call(wrapRDD(inThis), other.wrapRDD(inThat), time).rdd
dstream.transformWith[(K2, V2), (K3, V3)](other.dstream, scalaTransform(_, _, _))
}
/**
* Enable periodic checkpointing of RDDs of this DStream.
* @param interval Time interval after which generated RDD will be checkpointed
*/
def checkpoint(interval: Duration) = {
dstream.checkpoint(interval)
}
}
| dotunolafunmiloye/spark | streaming/src/main/scala/org/apache/spark/streaming/api/java/JavaDStreamLike.scala | Scala | apache-2.0 | 17,595 |
package org.jetbrains.plugins.hocon.lexer
import com.intellij.lexer.LexerBase
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.hocon.HoconConstants
import scala.annotation.tailrec
import scala.util.matching.Regex
object HoconLexer {
case class State(raw: Int) extends AnyVal
final val Initial = State(0)
final val Value = State(1)
final val SubStarting = State(2)
final val SubStarted = State(3)
final val Substitution = State(4)
final val States = Array(Initial, Value, SubStarting, SubStarted, Substitution)
final val ForbiddenChars = """$"{}[]:=,+#`^?!@*&\\""".toSet
final val UnquotedSpecialChars = """.()"""
final val KeyForbiddenChars = ForbiddenChars + '.'
final val SpecialWhitespace = "\\u00A0\\u2007\\u202F\\uFEFF"
}
class HoconLexer extends LexerBase {
import org.jetbrains.plugins.hocon.CommonUtil._
import org.jetbrains.plugins.hocon.lexer.HoconLexer._
import org.jetbrains.plugins.hocon.lexer.HoconTokenType._
case class TokenMatch(token: HoconTokenType, length: Int, newState: State)
abstract class TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch]
}
class LiteralTokenMatcher(str: String,
token: HoconTokenType,
condition: State => Boolean = _ => true,
transitionFun: State => State = identity
) extends TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch] =
if (condition(state) && seq.startsWith(str))
Some(TokenMatch(token, str.length, transitionFun(state)))
else None
}
class RegexTokenMatcher(regex: Regex,
token: HoconTokenType,
condition: State => Boolean = _ => true,
transitionFun: State => State = identity
) extends TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch] =
if (condition(state))
regex.findPrefixMatchOf(seq).map(m => TokenMatch(token, m.end, transitionFun(state)))
else None
}
def forceState(state: State): State => State =
_ => state
def always: State => Boolean =
_ => true
def onContents(state: State): State = state match {
case Initial | SubStarting => Value
case SubStarted => Substitution
case _ => state
}
def onDollar(state: State): State = state match {
case Initial | Value => SubStarting
case SubStarted => Substitution
case _ => state
}
def isAnyOf(states: State*): State => Boolean =
states.contains
def isNoneOf(states: State*): State => Boolean =
!states.contains(_)
val notSubstitution = isAnyOf(Initial, Value)
val matchers = List(
WhitespaceMatcher,
new RegexTokenMatcher( """\\$""".r, Dollar, always, onDollar),
new LiteralTokenMatcher("{", SubLBrace, isAnyOf(SubStarting), forceState(SubStarted)),
new LiteralTokenMatcher("?", QMark, isAnyOf(SubStarted), forceState(Substitution)),
new LiteralTokenMatcher("}", SubRBrace, isAnyOf(SubStarted, Substitution), forceState(Value)),
new LiteralTokenMatcher("{", LBrace, always, forceState(Initial)),
new LiteralTokenMatcher("}", RBrace, always, forceState(Value)),
new LiteralTokenMatcher("[", LBracket, always, forceState(Initial)),
new LiteralTokenMatcher("]", RBracket, always, forceState(Value)),
new LiteralTokenMatcher("(", LParen, always, forceState(Initial)),
new LiteralTokenMatcher(")", RParen, always, forceState(Value)),
new LiteralTokenMatcher(":", Colon, always, forceState(Initial)),
new LiteralTokenMatcher(",", Comma, always, forceState(Initial)),
new LiteralTokenMatcher("=", Equals, always, forceState(Initial)),
new LiteralTokenMatcher("+=", PlusEquals, always, forceState(Initial)),
new LiteralTokenMatcher(".", Period, always, onContents),
new RegexTokenMatcher( """#[^\\n]*""".r, HashComment, always, identity),
new RegexTokenMatcher( """//[^\\n]*""".r, DoubleSlashComment, always, identity),
UnquotedCharsMatcher,
MultilineStringMatcher,
QuotedStringMatcher,
new RegexTokenMatcher(".".r, BadCharacter, always, identity)
)
def isHoconWhitespace(char: Char): Boolean = char.isWhitespace || SpecialWhitespace.contains(char)
def isCStyleComment(seq: CharSequence, index: Int): Boolean =
seq.subSequence(index, seq.length).startsWith("//")
def continuesUnquotedChars(seq: CharSequence, index: Int): Boolean = index < seq.length && {
val char = seq.charAt(index)
!UnquotedSpecialChars.contains(char) && !ForbiddenChars.contains(char) &&
!isHoconWhitespace(char) && !isCStyleComment(seq, index)
}
object QuotedStringMatcher extends TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch] = if (seq.charAt(0) == '\\"') {
@tailrec
def drain(offset: Int, escaping: Boolean): Int =
if (offset < seq.length) {
seq.charAt(offset) match {
case '\\n' => offset
case '\\"' if !escaping => offset + 1
case '\\\\' if !escaping => drain(offset + 1, escaping = true)
case _ => drain(offset + 1, escaping = false)
}
} else offset
Some(TokenMatch(QuotedString, drain(1, escaping = false), onContents(state)))
} else None
}
object MultilineStringMatcher extends TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch] =
if (seq.startsWith("\\"\\"\\"")) {
val strWithoutOpening = seq.subSequence(3, seq.length)
val length = HoconConstants.MultilineStringEnd.findFirstMatchIn(strWithoutOpening)
.map(m => m.end + 3).getOrElse(seq.length)
Some(TokenMatch(MultilineString, length, onContents(state)))
} else None
}
object UnquotedCharsMatcher extends TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch] = {
var c = 0
while (continuesUnquotedChars(seq, c)) {
c += 1
}
if (c > 0) Some(TokenMatch(UnquotedChars, c, onContents(state))) else None
}
}
object WhitespaceMatcher extends TokenMatcher {
def matchToken(seq: CharSequence, state: State): Option[TokenMatch] = {
var c = 0
var nl = false
def char = seq.charAt(c)
while (c < seq.length && isHoconWhitespace(char)) {
nl ||= char == '\\n'
c += 1
}
if (c > 0) {
val token = if (nl) LineBreakingWhitespace else InlineWhitespace
Some(TokenMatch(token, c, newState(state, nl)))
} else None
}
def newState(state: State, newLine: Boolean): State = state match {
case _ if newLine => Initial
case SubStarting => Value
case SubStarted => Substitution
case _ => state
}
}
private var input: CharSequence = _
private var endOffset: Int = _
private var stateBefore: State = Initial
private var stateAfter: State = Initial
private var tokenStart: Int = _
private var tokenEnd: Int = _
private var token: IElementType = _
def getBufferEnd: Int = endOffset
def getBufferSequence: CharSequence = input
def advance(): Unit = {
tokenStart = tokenEnd
val seq = input.subSequence(tokenStart, endOffset)
if (seq.length > 0) {
val TokenMatch(newToken, length, newState) =
matchers.iterator.flatMap(_.matchToken(seq, stateAfter)).next()
tokenEnd = tokenStart + length
token = newToken
stateBefore = stateAfter
stateAfter = newState
} else {
stateBefore = Initial
stateAfter = Initial
token = null
}
}
def getTokenEnd: Int = tokenEnd
def getTokenStart: Int = tokenStart
def getTokenType: IElementType = {
if (token == null) {
advance()
}
token
}
def getState: Int = stateBefore.raw
def start(buffer: CharSequence, startOffset: Int, endOffset: Int, initialState: Int): Unit = {
this.token = null
this.input = buffer
this.tokenStart = startOffset
this.tokenEnd = startOffset
this.endOffset = endOffset
this.stateBefore = States(initialState)
}
}
| ghik/intellij-hocon | src/org/jetbrains/plugins/hocon/lexer/HoconLexer.scala | Scala | apache-2.0 | 7,987 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.{BoxValidationFixture, CATO21}
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
class CP89Spec extends WordSpec with Matchers with MockitoSugar with BoxValidationFixture[ComputationsBoxRetriever] {
val boxRetriever = mock[ComputationsBoxRetriever]
"CP89" should {
"be mandatory if CPQ8 is false and CPAux2 + CP78 > CP672" in {
val mockRetriever = setupRetriever()
when(mockRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(mockRetriever.cpAux2()).thenReturn(CPAux2(50))
when(mockRetriever.cp78()).thenReturn(CP78(Some(50)))
when(mockRetriever.cp672()).thenReturn(CP672(Some(50)))
CP89(None).validate(mockRetriever) shouldBe Set(CtValidation(Some("CP89"), "error.CP89.mainPoolAllowanceRequired"))
}
"return no error if CPQ8 is false and CPAux2 + CP78 > CP672 and CP89 has a value of 0" in {
val mockRetriever = setupRetriever()
when(mockRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(mockRetriever.cpAux2()).thenReturn(CPAux2(50))
when(mockRetriever.cp78()).thenReturn(CP78(Some(50)))
when(mockRetriever.cp672()).thenReturn(CP672(Some(50)))
CP89(Some(0)).validate(mockRetriever) shouldBe Set.empty
}
"return a negative number error if CPQ8 is false and CPAux2 + CP78 > CP672 and CP89 has a negative value" in {
val mockRetriever = setupRetriever()
when(mockRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(mockRetriever.cpAux2()).thenReturn(CPAux2(50))
when(mockRetriever.cp78()).thenReturn(CP78(Some(50)))
when(mockRetriever.cp672()).thenReturn(CP672(Some(50)))
CP89(Some(-20)).validate(mockRetriever) shouldBe Set(CtValidation(Some("CP89"), "error.CP89.mustBeZeroOrPositive"))
}
"not be mandatory if CPQ8 is true" in {
val mockRetriever = setupRetriever()
when(mockRetriever.cpQ8()).thenReturn(CPQ8(Some(true)))
when(mockRetriever.cpAux2()).thenReturn(CPAux2(50))
when(mockRetriever.cp78()).thenReturn(CP78(Some(50)))
when(mockRetriever.cp672()).thenReturn(CP672(Some(50)))
CP89(None).validate(mockRetriever) shouldBe empty
}
"not be mandatory if CPQ8 is false and CPAux2 + CP78 is equal to then CP672" in {
val mockRetriever = setupRetriever()
when(mockRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(mockRetriever.cpAux2()).thenReturn(CPAux2(25))
when(mockRetriever.cp78()).thenReturn(CP78(Some(25)))
when(mockRetriever.cp672()).thenReturn(CP672(Some(50)))
CP89(None).validate(mockRetriever) shouldBe empty
}
"not be mandatory if CPQ8 is false and CP672 is greater then CPAux2 + CP78" in {
val mockRetriever = setupRetriever()
when(mockRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(mockRetriever.cpAux2()).thenReturn(CPAux2(25))
when(mockRetriever.cp78()).thenReturn(CP78(Some(25)))
when(mockRetriever.cp672()).thenReturn(CP672(Some(100)))
CP89(None).validate(mockRetriever) shouldBe empty
}
testCannotExistWhen("CP89", CP89.apply) {
val boxRetriever = setupRetriever
when(boxRetriever.cato21()).thenReturn(CATO21(10))
when(boxRetriever.cp81()).thenReturn(CP81(1000))
when(boxRetriever.cp88()).thenReturn(CP88(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(150))
when(boxRetriever.cp78()).thenReturn(CP78(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(50)))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(true))).getMock[ComputationsBoxRetriever]
}
}
private def setupRetriever(): ComputationsBoxRetriever = {
val mockRetriever = mock[ComputationsBoxRetriever]
when(mockRetriever.cp81()).thenReturn(CP81(0))
when(mockRetriever.cp82()).thenReturn(CP82(0))
when(mockRetriever.cp83()).thenReturn(CP83(0))
when(mockRetriever.cp87()).thenReturn(CP87(0))
when(mockRetriever.cp88()).thenReturn(CP88(0))
when(mockRetriever.cpAux1()).thenReturn(CPAux1(0))
when(mockRetriever.cato21()).thenReturn(CATO21(0))
mockRetriever
}
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP89Spec.scala | Scala | apache-2.0 | 4,893 |
package org.kale.dkim
import java.security.spec.X509EncodedKeySpec
import java.security.{KeyFactory, PublicKey}
import java.util.Base64
import org.apache.logging.log4j.LogManager
import scala.collection.concurrent.RDCSS_Descriptor
object DkimDnsLookup {
val logger = LogManager.getLogger(getClass)
val DKIM1 = "DKIM1"
val RSA = "rsa"
def removeWhiteSpace(text: String) = text.replaceAll("""[ \\t\\n\\r"]""", "")
}
class DkimDnsLookup(helper: DnsHelper) {
import DkimDnsLookup._
// Find (and create) the first valid key we find
def getPublicKey(dnsHost: String): PublicKey = {
val records = helper.getDnsRecords(dnsHost, "TXT")
if (records.length == 0)
throw new Exception(s"No TXT records found in DNS entry for : $dnsHost")
val maps = records.map{record: String => DkimSignature.mapFields(removeWhiteSpace(record))}
val mapOption = maps.find(isValid(_))
if (mapOption.isEmpty){
val recordText = records.mkString(",")
throw new Exception(s"No valid TXT record found for $dnsHost, records: $recordText")
}
val fieldMap = mapOption.get
generatePublicKey(fieldMap.get("p").get)
}
private def isValid(map: Map[String, String]): Boolean = {
if (map.getOrElse("v", DKIM1) != DKIM1)
false
else if (!map.contains("p"))
false
else
true
}
private def generatePublicKey(encodedPublicKey: String): PublicKey = {
logger.debug(s"encoded key: $encodedPublicKey")
val decodedKey = Base64.getDecoder().decode(encodedPublicKey)
val keyFactory = KeyFactory.getInstance("RSA")
keyFactory.generatePublic(new X509EncodedKeySpec(decodedKey))
}
} | OdysseusLevy/kale | dkim/src/main/scala/org/kale/dkim/DkimDnsLookup.scala | Scala | apache-2.0 | 1,667 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.testkit
import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.Paths
import com.lightbend.lagom.scaladsl.api.Descriptor
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.persistence.cassandra.CassandraPersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcPersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.PersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.scaladsl.playjson.EmptyJsonSerializerRegistry
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.lightbend.lagom.scaladsl.server._
import org.scalatest.Matchers
import org.scalatest.WordSpec
import play.api.db.HikariCPComponents
import play.api.libs.ws.ahc.AhcWSComponents
import scala.collection.JavaConverters._
import scala.util.Properties
class ServiceTestSpec extends WordSpec with Matchers {
"ServiceTest" when {
"started with Cassandra" should {
"create a temporary directory" in {
val temporaryFileCountBeforeRun = listTemporaryFiles().size
ServiceTest.withServer(ServiceTest.defaultSetup.withCassandra())(new CassandraTestApplication(_)) { _ =>
val temporaryFilesDuringRun = listTemporaryFiles()
temporaryFilesDuringRun should have size (temporaryFileCountBeforeRun + 1)
}
}
}
"stopped after starting" should {
"remove its temporary directory" in {
val temporaryFileCountBeforeRun = listTemporaryFiles().size
ServiceTest.withServer(ServiceTest.defaultSetup.withCassandra())(new CassandraTestApplication(_)) { _ =>
()
}
val temporaryFilesAfterRun = listTemporaryFiles()
temporaryFilesAfterRun should have size temporaryFileCountBeforeRun
}
}
"started with JDBC" should {
"start successfully" in {
ServiceTest.withServer(ServiceTest.defaultSetup.withJdbc())(new JdbcTestApplication(_)) { _ =>
()
}
}
}
}
def listTemporaryFiles(): Iterator[Path] = {
val tmpDir = Paths.get(Properties.tmpDir)
Files
.newDirectoryStream(tmpDir, "ServiceTest_*")
.iterator()
.asScala
}
}
trait TestService extends Service {
import Service._
final override def descriptor: Descriptor = named("test")
}
class TestServiceImpl(persistentEntityRegistry: PersistentEntityRegistry) extends TestService
class TestApplication(context: LagomApplicationContext)
extends LagomApplication(context)
with LocalServiceLocator
with AhcWSComponents { self: PersistenceComponents =>
override lazy val jsonSerializerRegistry: JsonSerializerRegistry = EmptyJsonSerializerRegistry
override lazy val lagomServer: LagomServer = serverFor[TestService](new TestServiceImpl(persistentEntityRegistry))
}
class CassandraTestApplication(context: LagomApplicationContext)
extends TestApplication(context)
with CassandraPersistenceComponents
class JdbcTestApplication(context: LagomApplicationContext)
extends TestApplication(context)
with JdbcPersistenceComponents
with HikariCPComponents {
persistentEntityRegistry
}
| lagom/lagom | testkit/scaladsl/src/test/scala/com/lightbend/lagom/scaladsl/testkit/ServiceTestSpec.scala | Scala | apache-2.0 | 3,307 |
/* ------------------- sse-jmx ------------------- *\
* Licensed under the Apache License, Version 2.0. *
* Author: Spiros Tzavellas *
\* ----------------------------------------------- */
package com.tzavellas.sse.jmx.export
import javax.management.MBeanOperationInfo
import javax.management.modelmbean._
/**
* A hack to remove getters and setters from Model MBean operations.
*
* <p>This is a hack to fix bug: <i>RFE 6339571</i>.</p>
*
* @see http://weblogs.java.net/blog/2007/02/13/removing-getters-model-mbean-operations
*/
private class NoGetterAndSetterMBeanInfo(info: ModelMBeanInfo) extends ModelMBeanInfoSupport(info) {
override def clone() = new NoGetterAndSetterMBeanInfo(this)
private def writeReplace(): AnyRef = {
def isGetterOrSetter(role: String) = "getter".equalsIgnoreCase(role) || "setter".equalsIgnoreCase(role)
def role(info: MBeanOperationInfo) = info.getDescriptor.getFieldValue("role").asInstanceOf[String]
val operationsWithNoGettersOrSetters = getOperations.collect {
case info if !isGetterOrSetter(role(info)) => info.asInstanceOf[ModelMBeanOperationInfo]
}
return new ModelMBeanInfoSupport(
getClassName,
getDescription,
getAttributes.asInstanceOf[Array[ModelMBeanAttributeInfo]],
getConstructors.asInstanceOf[Array[ModelMBeanConstructorInfo]],
operationsWithNoGettersOrSetters,
getNotifications.asInstanceOf[Array[ModelMBeanNotificationInfo]],
getMBeanDescriptor)
}
}
| sptz45/sse-jmx | src/main/scala/com/tzavellas/sse/jmx/export/NoGetterAndSetterMBeanInfo.scala | Scala | apache-2.0 | 1,533 |
package coursier.publish.upload
import coursier.core.Authentication
import coursier.publish.upload.logger.UploadLogger
import coursier.util.Task
final case class DummyUpload(underlying: Upload) extends Upload {
def upload(
url: String,
authentication: Option[Authentication],
content: Array[Byte],
logger: UploadLogger,
loggingId: Option[Object]
): Task[Option[Upload.Error]] =
Task.point(None)
}
| alexarchambault/coursier | modules/publish/src/main/scala/coursier/publish/upload/DummyUpload.scala | Scala | apache-2.0 | 427 |
package com.twitter.scalding.parquet.thrift
import cascading.scheme.Scheme
import com.twitter.scalding.typed.{PartitionSchemed, PartitionUtil}
import com.twitter.scalding.{FixedPathSource, HadoopSchemeInstance, TupleConverter, TupleSetter}
import scala.reflect.ClassTag
/**
* Scalding source to read or write partitioned Parquet thrift data.
*
* For writing it expects a pair of `(P, T)`, where `P` is the data used for partitioning and `T` is the
* thrift object. `P` must be either a String or a tuple of Strings. Below is an example.
* {{{
* val data: TypedPipe[MyThriftObject] = ???
* data.map{ obj =>
* ( (obj.country, obj.city), obj)
* }.write(PartitionedParquetThriftSource[(String, String), MyThriftObject](path, "%s/%s"))
* }}}
*
* For reading it produces a pair `(P, T)` where `P` is the partition data, `T` is the corresponding thrift
* object. Below is an example.
* {{{
* val in: TypedPipe[(String, String), MyThriftObject] =
* TypedPipe.from( PartitionedParquetThriftSource[(String, String), MyThriftObject](path, "%s/%s") )
* }}}
*/
case class PartitionedParquetThriftSource[P, T <: ParquetThrift.ThriftBase](path: String, template: String)(
implicit
val ct: ClassTag[T],
val valueSetter: TupleSetter[T],
val valueConverter: TupleConverter[T],
val partitionSetter: TupleSetter[P],
val partitionConverter: TupleConverter[P]
) extends FixedPathSource(path)
with ParquetThriftBase[T]
with PartitionSchemed[P, T]
with Serializable {
override val fields = PartitionUtil.toFields(0, implicitly[TupleSetter[T]].arity)
assert(
fields.size == valueSetter.arity,
"The number of fields needs to be the same as the arity of the value setter"
)
// Create the underlying scheme and explicitly set the source, sink fields to be only the specified fields
override def hdfsScheme = {
// See docs in Parquet346TBaseScheme
val baseScheme = new Parquet346TBaseScheme[T](this.config)
val scheme = HadoopSchemeInstance(baseScheme.asInstanceOf[Scheme[_, _, _, _, _]])
scheme.setSinkFields(fields)
scheme.setSourceFields(fields)
scheme
}
}
| twitter/scalding | scalding-parquet/src/main/scala/com/twitter/scalding/parquet/thrift/PartitionedParquetThriftSource.scala | Scala | apache-2.0 | 2,137 |
package org.bizzle.pathfinding
import
org.bizzle.tester.cluster.{ TestFuncConstructionBundle, TestFuncFlagBundle, TestFunction }
import
pathingmap.PathingMapString
/**
* Created by IntelliJ IDEA.
* User: Jason
* Date: 1/19/12
* Time: 9:58 PM
*/
class PathingTestFunction(testString: PathingMapString,
analysisFunction: (PathingStatus[StepData], PathingAnalysisFlagBundle) => PathingAnalysisResultBundle,
testNumber: Int,
shouldPass: Boolean,
expectedLength: Int)
extends TestFunction[PathFinder[StepData], PathingMapString, PathingStatus[StepData],
PathingAnalysisFlagBundle, PathingAnalysisResultBundle](testString, analysisFunction, testNumber, shouldPass) {
def apply(pathFinder: PathFinder[StepData], flags: TestFuncFlagBundle) : Boolean = {
val analysisFlags = extractAnalysisFlags(flags)
val bundle = analysisFunc(pathFinder(testSubject), analysisFlags)
bundle.wasSuccess && (bundle.path.length - 1) == expectedLength
}
protected def extractAnalysisFlags(flags: TestFuncFlagBundle) = new PathingAnalysisFlagBundle(flags.toggles)
}
case class PTFConstructionBundle(expectedPathLength: Int) extends TestFuncConstructionBundle
| TheBizzle/PathFindingCore | src/test/org/bizzle/pathfinding/PathingTestFunction.scala | Scala | bsd-3-clause | 1,328 |
/*
* Copyright 2015 Functional Objects, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.funobjects.hue
import java.net.InetAddress
import java.util.concurrent.Executors
import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.persistence.{RecoveryCompleted, PersistentActor}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import org.funobjects.hue.model.HueJson.HubState
import org.funobjects.r34.ResourceModule
import org.funobjects.seeker.Seeker.Device
import org.funobjects.seeker.{Seeker, UpnpSeeker}
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal
import scala.util.{Failure, Success}
/**
* Hue manager
*/
class HueModule(implicit sys: ActorSystem, exec: ExecutionContext, mat: ActorMaterializer) extends ResourceModule {
override val name: String = HueModule.modName
override val routes = None
}
object HueModule {
val modName = "hue"
val seekPeriod = 1.minute
val checkButtonPeriod = 28.seconds
private[hue] case object Seek
private[hue] case class Found(devs: Map[Seeker.Device, InetAddress])
private[hue] case class Hub(dev: Seeker.Device, addr: InetAddress)
private[hue] case class GetHubCredentials(user: String)
private[hue] case class NewHubCredentials(user: String)
// indicates that the credentials actor should try to create a new user (token, really)
private[hue] case object CreateUser
class HueMaster extends Actor {
var addrMap: Map[Seeker.Device, InetAddress] = Map.empty
var refMap: Map[Hub, ActorRef] = Map.empty
var seekerRef: Option[ActorRef] = None
override def preStart(): Unit = {
super.preStart()
seekerRef = Some(context.actorOf(Props(classOf[SeekerActor], self), "seeker"))
}
override def receive: Receive = {
case Found(hubs) => found(hubs)
case _ =>
}
def found(map: Map[Seeker.Device, InetAddress]): Unit = {
// look at each entry for new hubs or changed address;
// right now, nothing is done about hubs that disappear from discovery,
// but at some point they should be expired if they don't come back for a long time
map foreach {
case (dev, addr) =>
addrMap.get(dev) match {
case Some(oldAddr) =>
if (addr != oldAddr) {
addrMap += (dev -> addr)
newAddress(dev, oldAddr, addr)
}
// For right now, take no action for entries that are already present,
// but they should update a timestamp in order to allow eventual expiration
// TODO: update timestamp for hub expiration
case None =>
// new hub
addrMap += (dev -> addr)
newHub(Hub(dev, addr))
}
}
}
def newHub(hub: Hub): Unit = {
refMap += (hub -> newHubRef(hub.dev, hub.addr))
}
def newHubRef(dev: Seeker.Device, addr: InetAddress) = {
context.actorOf(Props(classOf[HubActor], dev, addr), "hub:" + dev.id.id)
}
def newAddress(dev: Seeker.Device, oldAddr: InetAddress, newAddr: InetAddress): Unit = {
val oldHub = Hub(dev, oldAddr)
refMap.get(oldHub).foreach { ref =>
context.stop(ref)
refMap -= oldHub
val newRef = newHubRef(dev, newAddr)
refMap += (Hub(dev, newAddr) -> newRef)
}
}
}
class HubCredentialsActor(hub: Hub, hubActor: ActorRef) extends PersistentActor with ActorLogging with ImplicitMaterializer {
// hue credentials consist solely of a long, hard-to-guess user name (a token, really)
// that can be created only when the button on the hub is pressed
var cred: Option[String] = None
val baseUrl = s"http://${hub.addr}/api"
implicit val exec = context.dispatcher
val http = Http(context.system)
override def persistenceId: String = hub.dev.id.id
override def receiveCommand: Receive = {
case CreateUser =>
if (cred.isEmpty) {
createUser()
}
if (cred.isEmpty) {
context.system.scheduler.scheduleOnce(checkButtonPeriod, self, CreateUser)
}
case _ =>
}
override def receiveRecover: Receive = {
case RecoveryCompleted =>
if (cred.isEmpty) {
// if we don't already have credentials, try to create a new user
self ! CreateUser
}
case _ =>
}
def createUser(): Unit = {
implicit val json4sFormats = org.json4s.DefaultFormats
val json=s"""{ "devicetype": "r34" }"""
http.singleRequest(
HttpRequest(method = HttpMethods.POST, uri = baseUrl, entity = HttpEntity(ContentTypes.`application/json`, json)))
.flatMap(_.entity.dataBytes.runWith(Sink.head))
.map { bytes =>
val json = parse(bytes.utf8String)
println("create user response: " + pretty(render(json)))
}
.recover {
case NonFatal(ex) => log.error(ex, "Error creating user: ")
}
}
}
class HubActor(dev: Device, address: InetAddress, cred: String) extends Actor with ActorLogging {
var hubState: Option[HubState] = None
override def preStart(): Unit = {
super.preStart()
log.info(s"hub running for $dev")
}
override def receive: Actor.Receive = {
case _ =>
}
}
/**
* Periodically discover all hue devices and send them to the actor specified in the constructor
*
* @param watcher The actor to notify.
*/
class SeekerActor(watcher: ActorRef, seeker: Seeker) extends Actor with ActorLogging {
var cancel: Option[Cancellable] = None
override def preStart(): Unit = {
super.preStart()
implicit val exec = ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor())
cancel = Some(context.system.scheduler.schedule(0.seconds, seekPeriod, self, Seek))
}
override def postStop(): Unit = {
cancel.foreach(_.cancel())
super.postStop()
}
override def receive: Actor.Receive = {
case Seek =>
val map = seeker.seek(10.seconds)
if (map.nonEmpty)
watcher ! Found(map)
case _ =>
}
// According to the hue docs, hubs are identified by UPnP by looking for a server property
// that starts with "IpBridge".
def filterHues(map: Map[Seeker.Device, InetAddress]): Map[Seeker.Device, InetAddress] =
map.filter { case (dev, addr) => dev.server.startsWith("IpBridge/") }
}
}
| funobjects/hue-and-cry | src/main/scala/org/funobjects/hue/HueModule.scala | Scala | apache-2.0 | 7,098 |
package com.gu.core.models
import org.joda.time.DateTime
case class Filters(
status: Status,
since: Option[DateTime],
until: Option[DateTime],
order: Option[OrderBy]
)
| guardian/discussion-avatar | api/src/main/scala/com/gu/core/models/Filters.scala | Scala | apache-2.0 | 178 |
package promisewell
import org.scalatest.FunSpec
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
class CappedSpec extends FunSpec {
describe("Cache#capped") {
it ("should cap items") {
val evictions = new mutable.ArrayBuffer[Int]
val well = Cache.capped[Int, Int](maxCapacity = 2)
.onEviction {
case (k, v) =>
evictions += k
}
well(1, () => Future.successful(1))
well(2, () => Future.successful(2))
well(3, () => Future.successful(3))
assert(well.get(1) === None)
assert(well.get(2).isDefined === true)
assert(well.get(3).isDefined === true)
assert(evictions.toList === List(1))
}
it ("should not cache failed futures") {
val well = Cache.capped[Int, Int]()
well(1, () => Future.failed(new Exception())).onComplete {
case _ => assert(well.get(1).isDefined === false)
}
}
}
}
| softprops/promise-well | src/test/scala/CappedSpec.scala | Scala | mit | 988 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package water.api
import org.apache.spark.SparkContext
import org.apache.spark.h2o._
import org.apache.spark.h2o.util.SparkTestContext
import org.apache.spark.sql.SQLContext
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import water.api.scalaInt._
/**
* Test suite for scalaint end-points
*/
@RunWith(classOf[JUnitRunner])
class ScalaCodeHandlerSuite extends FunSuite with SparkTestContext {
sc = new SparkContext("local[*]", "test-local", conf = defaultSparkConf)
hc = new H2OContext(sc).start()
// Shared h2oContext
val h2oContext = hc
// Shared sqlContext
implicit val sqlContext = new SQLContext(sc)
test("ScalaCodeHandler.initSession() method"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
val req = new ScalaSessionIdV3
val result = scalaCodeHandler.initSession(3,req)
assert(result.session_id == 1,"First id should be equal to 1")
// new interpreter is automatically created, so the last ID used should be equal to 2
assert(scalaCodeHandler.mapIntr.size == 1, "Number of currently used interpreters should be equal to 1")
assert(scalaCodeHandler.mapIntr.get(1).nonEmpty, "The value in the interpreters hashmap with the key 1 should not be empty")
assert(scalaCodeHandler.mapIntr.get(1).get._1.sessionID == 1, "ID attached to the interpreter should be equal to 1")
}
test("ScalaCodeHandler.destroySession() method, destroy existing session"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
// create new session
val reqSession = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession)
val reqMsg = new ScalaMsgV3
reqMsg.session_id=reqSession.session_id
val result = scalaCodeHandler.destroySession(3,reqMsg)
assert(result.msg.equals("Session closed"),"Message should be equal to \\"Session closed\\"")
assert(scalaCodeHandler.mapIntr.size == 0, "Number of currently used interpreters should be equal to 0")
assert(scalaCodeHandler.mapIntr.get(1).isEmpty, "The value in the interpreters hashmap with the key 1 should be empty")
}
test("ScalaCodeHandler.destroySession() method, destroy non-existing session"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
val reqMsg = new ScalaMsgV3
reqMsg.session_id=3
val result = scalaCodeHandler.destroySession(3,reqMsg)
assert(result.msg.equals("Session does not exist"),"Message should be equal to \\"Session does not exist\\"")
assert(scalaCodeHandler.mapIntr.size == 0, "Number of currently used interpreters should be equal to 0")
assert(scalaCodeHandler.mapIntr.get(3).isEmpty, "The value in the interpreters hashmap with the key 3 should be empty")
}
test("ScalaCodeHandler.getSessions() method"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
// create first interpreter
val reqSession1 = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession1)
// create second interpreter
val reqSession2 = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession2)
val req = new ScalaSessionsV3
val result = scalaCodeHandler.getSessions(3,req)
assert(result.sessions.sameElements(Array(1,2)),"Array of active sessions should contain 1 and 2")
assert(scalaCodeHandler.mapIntr.size == 2, "Number of currently used interpreters should be equal to 2")
}
test("ScalaCodeHandler.interpret() method, printing"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
// create interpreter
val reqSession = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession)
val req = new ScalaCodeV3
req.session_id = reqSession.session_id
req.code = "println(\\"text\\")"
val result = scalaCodeHandler.interpret(3,req)
assert(result.output.equals("text\\n"),"Printed output should equal to text")
assert(result.status.equals("Success"),"Status should be Success")
assert(result.response.equals(""),"Response should be empty")
}
test("ScalaCodeHandler.interpret() method, using unknown function"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
// create interpreter
val reqSession = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession)
val req = new ScalaCodeV3
req.session_id = reqSession.session_id
req.code = "foo"
val result = scalaCodeHandler.interpret(3,req)
assert(result.output.equals(""),"Printed output should be empty")
assert(result.status.equals("Error"),"Status should be Error")
assert(result.response.equals("<console>:29: error: not found: value foo\\n foo\\n ^\\n"),"Response should not be empty")
}
test("ScalaCodeHandler.interpret() method, using previously defined class"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
// create interpreter
val reqSession = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession)
val req1 = new ScalaCodeV3
req1.session_id = reqSession.session_id
req1.code = "case class Foo(num: Int)"
val result1 = scalaCodeHandler.interpret(3,req1)
assert(result1.output.equals(""),"Printed output should be empty")
assert(result1.status.equals("Success"),"Status should be Success")
assert(result1.response.equals("defined class Foo\\n"),"Response should not be empty")
val req2= new ScalaCodeV3
req2.session_id = reqSession.session_id
req2.code = "val num = Foo(42)"
val result2 = scalaCodeHandler.interpret(3,req2)
assert(result2.output.equals(""),"Printed output should equal to text")
assert(result2.status.equals("Success"),"Status should be Success")
assert(result2.response.equals("num: Foo = Foo(42)\\n"),"Response should not be empty")
}
test("ScalaCodeHandler.interpret() method, using sqlContext,h2oContext and sparkContext"){
val scalaCodeHandler = new ScalaCodeHandler(sc,h2oContext)
// create interpreter
val reqSession = new ScalaSessionIdV3
scalaCodeHandler.initSession(3,reqSession)
val req1 = new ScalaCodeV3
req1.session_id = reqSession.session_id
req1.code = "val rdd = sc.parallelize(1 to 100).map(v=>v+10);rdd.cache"
val result1 = scalaCodeHandler.interpret(3,req1)
assert(result1.output.equals(""),"Printed output should be empty")
assert(result1.status.equals("Success"),"Status should be Success")
assert(result1.response.equals("rdd: org.apache.spark.rdd.RDD[Int] = MapPartitionsRDD[5] at map at <console>:28\\nres0: rdd.type = MapPartitionsRDD[5] at map at <console>:28\\n"),"Response should not be empty")
val req2 = new ScalaCodeV3
req2.session_id = reqSession.session_id
req2.code = "val h2oFrame = h2oContext.asH2OFrame(rdd)"
val result2 = scalaCodeHandler.interpret(3,req2)
assert(result2.output.equals(""),"Printed output should be empty")
assert(result2.status.equals("Success"),"Status should be Success")
assert(!result2.response.equals(""),"Response should not be empty")
val req3 = new ScalaCodeV3
req3.session_id = reqSession.session_id
// this code is using implicitly sqlContext
req3.code = "val dataframe = h2oContext.asDataFrame(h2oFrame)"
val result3 = scalaCodeHandler.interpret(3,req3)
assert(result3.output.equals(""),"Printed output should be empty")
assert(result3.status.equals("Success"),"Status should be Success")
assert(!result3.response.equals(""),"Response should not be empty")
}
}
| printedheart/sparkling-water | core/src/test/scala/water/api/ScalaCodeHandlerSuite.scala | Scala | apache-2.0 | 8,279 |
/**
* Copyright 2013 Gianluca Amato
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.domains.numerical
import it.unich.jandom.domains.{EmptyExistsSuite, SeparatedTopAndBottomSuite}
import breeze.linalg.{DenseMatrix, DenseVector}
/**
* Test suite for the parallelotope domain.
* @author Gianluca Amato <[email protected]>
*/
class ParallelotopeDomainSuite extends NumericalDomainSuite with SeparatedTopAndBottomSuite with EmptyExistsSuite {
lazy val dom = ParallelotopeDomain()
val box = dom(DenseVector(-1, -1), DenseMatrix.eye(2), DenseVector(1, 1))
val diamond = dom(DenseVector(-1, -1), DenseMatrix((1.0, 1.0), (1.0, -1.0)), DenseVector(1, 1))
val empty = dom.bottom(2)
val full = dom.top(2)
describe("constructors") {
they("should only work with compatible sizes of bounds and shapes") {
intercept[IllegalArgumentException] { dom(DenseVector(0, 2), DenseMatrix.eye(2), DenseVector(0, 2, 3)) }
}
}
describe("constructors and extractors for non-trivial parallelotopes") {
assertResult(2) { box.dimension }
assertResult(false) { box.isEmpty }
assertResult(false) { box.isTop }
}
describe("constructors and extractors for full parallelotopes") {
assertResult(2) { full.dimension }
assertResult(false) { full.isEmpty }
assertResult(true) { full.isTop }
}
describe("constructors and extractors for empty parallelotopes") {
assertResult(2) { empty.dimension }
assertResult(true) { empty.isEmpty }
assertResult(false) { empty.isTop }
}
describe("comparison of parallelotopes") {
assert(empty < box)
assert(box < full)
assert(empty < full)
assert(diamond < box)
assert(diamond <= box)
assert(box > diamond)
assert(box >= diamond)
assertResult(Some(1)) { box.tryCompareTo(diamond) }
assertResult(Some(-1)) { diamond.tryCompareTo(box) }
assert(box == box)
assertResult(Some(0)) { box.tryCompareTo(box) }
val box2 = dom(DenseVector(-0.5, -0.5), DenseMatrix.eye(2), DenseVector(0.5, 0.5))
assert(box2 <= box)
assert(box >= box2)
assert(box2 < box)
assert(box > box2)
val box3 = dom(DenseVector(0, 0), DenseMatrix.eye(2), DenseVector(2, 2))
assertResult(None) { box.tryCompareTo(box3) }
}
describe("rotation of shapes") {
val m = DenseMatrix((1.0, 1.0), (-1.0, 1.0))
val protcalc = box.rotate(m)
val protdef = dom(DenseVector(-2, -2), m, DenseVector(2, 2))
assertResult(protdef) { protcalc }
}
describe("linear invertible assignment") {
val li1 = dom(DenseVector(0, -1), DenseMatrix((1.0, -1.0), (0.0, 1.0)), DenseVector(2, 1))
assertResult(li1) { box.linearAssignment(0, LinearForm(1.0, 1, 1)) }
val li2 = dom(DenseVector(1, -1), DenseMatrix((1.0, 0.0), (-1.0, 1.0)), DenseVector(1, 0))
val li3 = dom(DenseVector(2, -2), DenseMatrix((1.0, 0.0), (-1.0, 1.0)), DenseVector(2, -1))
assertResult(li3) { li2.linearAssignment(0, LinearForm(1.0, 1, 0)) }
assertResult(li3) { li2.linearAssignment(0, LinearForm(1.0, 1)) }
val li4 = dom(DenseVector(-1, -2), DenseMatrix((1.0, 0.0), (-1.0, 1.0)), DenseVector(1, 2))
assertResult(li4) { box.linearAssignment(1, LinearForm(0.0, 1, 2)) }
assert(empty.linearAssignment(1, LinearForm(0.0, 1, 1)).isEmpty)
}
describe("non-invertible linear assignment") {
val ln1 = dom(DenseVector(2, -1), DenseMatrix((1.0, -1.0), (0.0, 1.0)), DenseVector(2, 1))
assertResult(ln1) { box.linearAssignment(0, LinearForm(2.0, 0, 1)) }
val ln2 = dom(DenseVector(0, Double.NegativeInfinity), DenseMatrix((-1.0, 1.0), (0.0, 1.0)), DenseVector(0, Double.PositiveInfinity))
val ln3 = dom(DenseVector(Double.NegativeInfinity, 0), DenseMatrix((1.0, -1.0), (0.0, 1.0)), DenseVector(Double.PositiveInfinity, 0))
assertResult(ln2) { ln3.linearAssignment(1, LinearForm(0.0, 1, 0)) }
assertResult(ln2) { ln3.linearAssignment(1, LinearForm(0.0, 1)) }
assert(empty.linearAssignment(1, LinearForm(0.0, 1, 0)).isEmpty)
}
describe("non-deterministic assignment") {
val nd1 = dom(DenseVector(Double.NegativeInfinity, -1), DenseMatrix.eye(2), DenseVector(Double.PositiveInfinity, 1))
assertResult(nd1) { box.nonDeterministicAssignment(0) }
assertResult(nd1) { nd1.nonDeterministicAssignment(0) }
assertResult(nd1) { diamond.nonDeterministicAssignment(0) }
val nd2 = dom(DenseVector(0, 0), DenseMatrix((2.0, 1.0), (2.0, -1.0)), DenseVector(1, 1))
val nd3 = dom(DenseVector(Double.NegativeInfinity, -1), DenseMatrix((2.0, 1.0), (0.0, -2.0)), DenseVector(Double.PositiveInfinity, 1))
assertResult(nd3) { nd2.nonDeterministicAssignment(0) }
val nd4 = dom(DenseVector(Double.NegativeInfinity, 0), DenseMatrix((2.0, 1.0), (4.0, 0.0)), DenseVector(Double.PositiveInfinity, 2))
assertResult(nd4) { nd2.nonDeterministicAssignment(1) }
val nd5 = dom(DenseVector(10, -1), DenseMatrix((1.0, 0.0), (1.0, 1.0)), DenseVector(10, 1))
val nd6 = dom(DenseVector(Double.NegativeInfinity, -11), DenseMatrix.eye(2), DenseVector(Double.PositiveInfinity, -9))
assertResult(nd6) { nd5.nonDeterministicAssignment(0) }
assert(empty.nonDeterministicAssignment(0).isEmpty)
}
describe("linear inequalities") {
val li1 = dom(DenseVector(-1, -1), DenseMatrix((1.0, 1.0), (1.0, -1.0)), DenseVector(0, 0))
assertResult(li1) { diamond.linearInequality(LinearForm(1.0, 2, 0)) }
assertResult(li1) { diamond.linearInequality(LinearForm(1.0, 2)) }
assert(empty.linearInequality(LinearForm(-1.0, 1, 0)).isEmpty)
}
describe("linear disequalities") {
val li1 = dom(DenseVector(-1, 0), DenseMatrix((1.0, 1.0), (1.0, -2.0)), DenseVector(0, 0))
assertResult(li1) { li1.linearDisequality(1.0) }
assertResult(empty) { li1.linearDisequality(0.0) }
assertResult(li1) { li1.linearDisequality(LinearForm(1.0, 0, 1)) }
assertResult(li1) { li1.linearDisequality(LinearForm(0.5, 1, -2)) }
assertResult(empty) { li1.linearDisequality(LinearForm(0.0, 1, -2)) }
}
describe("union") {
val u1 = dom(DenseVector(2, 0), DenseMatrix.eye(2), DenseVector(4, 2))
val u2 = dom(DenseVector(-4, -1), DenseMatrix((-1.0, 3.0), (0.0, 1.0)), DenseVector(4, 2))
assertResult(u2) { box union u1 }
val u3 = dom(DenseVector(-1, -1), DenseMatrix((0.0, 1.0), (1.0, -1.0)), DenseVector(2, 4))
assertResult(u3) { u1 union diamond }
val u4 = dom(DenseVector(-4, 0), DenseMatrix.eye(2), DenseVector(-2, 2))
val u5 = dom(DenseVector(-4, 0), DenseMatrix.eye(2), DenseVector(4, 2))
assertResult(u5) { u4 union u1 }
val u6 = dom(DenseVector(1, Double.NegativeInfinity), DenseMatrix((1.0, 0.0), (1.0, -1.0)), DenseVector(1, 1))
val u7 = dom(DenseVector(0, Double.NegativeInfinity), DenseMatrix((1.0, 0.0), (0.0, -1.0)), DenseVector(0, 0))
val u8 = dom(DenseVector(0, 0), DenseMatrix.eye(2), DenseVector(1, Double.PositiveInfinity))
assertResult(u8) { u6 union u7 }
val u9 = dom(DenseVector(0, 0), DenseMatrix.eye(2), DenseVector(0, Double.PositiveInfinity))
assertResult(u8) { u9 union u8 }
assertResult(u8) { u8 union u9 }
val u10 = dom(DenseVector(2, 0), DenseMatrix.eye(2), DenseVector(2, 0))
val u11 = dom(DenseVector(0, 2), DenseMatrix((0.0, 1.0), (1.0, -2.0)), DenseVector(1, 6))
assertResult(u11) { u10 union u11 }
}
describe("minimization, maximization and frequency") {
val i = dom(DenseVector(-4, -1, 0), DenseMatrix((-1.0, 3.0, 0.0), (0.0, 1.0, 0.0), (-1.0, -1.0, 1.0)), DenseVector(4, 2, 0))
assertResult(12)(i.maximize(LinearForm(0, 1, 1, 0)))
assertResult(-8)(i.minimize(LinearForm(0, 1, 1, 0)))
assertResult(None)(i.frequency(LinearForm(0, 1, 1, 0)))
assertResult(Some(0))(i.frequency(LinearForm(0, -1, -1, 1)))
}
describe("dimensional variation") {
val i = diamond
val j = dom(DenseVector(-1, -1, Double.NegativeInfinity), DenseMatrix((1.0, 1.0, 0.0),
(1.0, -1.0, 0.0), (0.0, 0.0, 1.0)), DenseVector(1, 1, Double.PositiveInfinity))
val h = dom(DenseVector(-1, Double.NegativeInfinity), DenseMatrix((1.0, 0.0),
(0.0, 1.0)), DenseVector(1, Double.PositiveInfinity))
assertResult(j)(i.addVariable())
assertResult(h)(j.delVariable(0))
assertResult(h)(j.delVariable(1))
assertResult(i)(j.delVariable(2))
}
describe("dimensional maps") {
val i = diamond
val h = dom(DenseVector(-1), DenseMatrix((1.0)), DenseVector(1))
assertResult(diamond)(diamond.mapVariables(Seq(1, 0)))
assertResult(diamond)(i.mapVariables(Seq(0, 1)))
assertResult(h)(i.mapVariables(Seq(-1, 0)))
assertResult(diamond)(diamond.addVariable.mapVariables(Seq(1, 0, -1)))
}
describe("string representation") {
assertResult("[ -1.0 <= x+y <= 1.0 , -1.0 <= x-y <= 1.0 ]") { diamond.mkString(Seq("x", "y")) }
assertResult("empty") { empty.toString }
assertResult("[ -Infinity <= v0 <= Infinity , -Infinity <= v1 <= Infinity ]") { full.toString }
}
describe("all parallelotopes are polyhedral") {
forAll(someProperties) { (p) => assert(p.isPolyhedral) }
}
// I am not sure it works for all possible cases due to rounding errors.
describe("all parallelotopes may be rebuilt from constraints") {
forAll(someProperties) { (p) =>
assertResult(p) { p.constraints.foldLeft(p.top) { (prop, lf) => prop.linearInequality(lf) } }
}
}
}
| francescaScozzari/Jandom | core/src/test/scala/it/unich/jandom/domains/numerical/ParallelotopeDomainSuite.scala | Scala | lgpl-3.0 | 9,975 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.IndexSink
import com.intellij.psi.stubs.StubElement
import com.intellij.psi.stubs.StubInputStream
import com.intellij.psi.stubs.StubOutputStream
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScExtendsBlock
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.templates.ScExtendsBlockImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScExtendsBlockStubImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.util.ScalaInheritors
import scala.collection.immutable.ArraySeq
/**
* @author ilyas
*/
class ScExtendsBlockElementType extends ScStubElementType[ScExtendsBlockStub, ScExtendsBlock]("extends block") {
override def serialize(stub: ScExtendsBlockStub, dataStream: StubOutputStream): Unit = {
dataStream.writeNames(stub.baseClasses)
}
override def deserialize(dataStream: StubInputStream,
parentStub: StubElement[_ <: PsiElement]) = new ScExtendsBlockStubImpl(
parentStub,
this,
baseClasses = ArraySeq.unsafeWrapArray(dataStream.readNames)
)
override def createStubImpl(block: ScExtendsBlock,
parentStub: StubElement[_ <: PsiElement]) = new ScExtendsBlockStubImpl(
parentStub,
this,
baseClasses = ScalaInheritors.directSupersNames(block)
)
override def indexStub(stub: ScExtendsBlockStub, sink: IndexSink): Unit = {
sink.occurrences(index.ScalaIndexKeys.SUPER_CLASS_NAME_KEY, stub.baseClasses: _*)
}
override def createElement(node: ASTNode) = new ScExtendsBlockImpl(node)
override def createPsi(stub: ScExtendsBlockStub) = new ScExtendsBlockImpl(stub)
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScExtendsBlockElementType.scala | Scala | apache-2.0 | 1,805 |
/*
* MUSIT is a museum database to archive natural and cultural history data.
* Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License,
* or any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package models
/**
* Value class to encode Millimeter precision
*
* @param underlying Double representing the mm value.
*/
case class Millimeters(underlying: Double) extends AnyVal
object Millimeters {
implicit def asDouble(mm: Millimeters): Double = mm.underlying
implicit def fromDouble(d: Double): Millimeters = Millimeters(d)
}
| kpmeen/musit | service_barcode/app/models/Millimeters.scala | Scala | gpl-2.0 | 1,209 |
import org.scalatra.ScalatraServlet
class ExtremeStartup extends ScalatraServlet {
get("/") {
"The server is running"
}
}
| JoakimMisund/extreme_startup_servers | scala/scalatra/src/main/scala/ExtremeStartup.scala | Scala | bsd-2-clause | 156 |
package breeze.linalg
import breeze.math.Complex
import org.scalatest.funsuite.AnyFunSuite
class kronTest extends AnyFunSuite {
test("kron complex") {
val m = DenseMatrix((Complex(0, 1), Complex(2, 1)), (Complex(3, 3), Complex(4, 4)))
val result = DenseMatrix(
(Complex(-1.0, 0.0), Complex(-1.0, 2.0), Complex(-1.0, 2.0), Complex(3.0, 4.0)),
(Complex(-3.0, 3.0), Complex(-4.0, 4.0), Complex(3.0, 9.0), Complex(4.0, 12.0)),
(Complex(-3.0, 3.0), Complex(3.0, 9.0), Complex(-4.0, 4.0), Complex(4.0, 12.0)),
(Complex(0.0, 18.0), Complex(0.0, 24.0), Complex(0.0, 24.0), Complex(0.0, 32.0))
)
assert(kron(m, m) == result)
}
}
| scalanlp/breeze | math/src/test/scala/breeze/linalg/functions/kronTest.scala | Scala | apache-2.0 | 668 |
package hrscala.validation
object Models {
// INPUT
case class Row(cells: String*)
val people = List(
Row("Ivan", "30", "scala, java, c"),
Row("Mirko", "millenial", "js"),
Row("Josip", "30", "scala, haskell"),
Row("Miro", "400", "java, php, scala")
)
// OUTPUT
case class ScalaDeveloper(name: String, age: Int, languages: Seq[String]) {
assert(name != null && name.nonEmpty, {
"Name cannot be empty"
})
require(age >= 18, "cannont be minor")
require(languages.contains("scala"), "The developer must have scala language.")
}
}
object RunIt extends App {
println(Models.ScalaDeveloper("aa", 13, Seq("js", "haskell", "scala")))
}
| HRScala/validations-keep-us-sane | model/src/main/scala/Models.scala | Scala | unlicense | 715 |
package com.typesafe.sbt.packager.windows
import sbt._
import sbt.Keys.{mappings, name, packageBin, sourceDirectory, streams, target, version}
import com.typesafe.sbt.SbtNativePackager.Universal
import com.typesafe.sbt.packager.Keys.{maintainer, packageDescription, packageName, packageSummary}
import com.typesafe.sbt.packager.universal.UniversalPlugin
import com.typesafe.sbt.packager.Compat._
import com.typesafe.sbt.packager.SettingsHelper
/**
* == Windows Plugin ==
*
* This plugin generates ''msi'' packages that can be installed on windows systems.
*
* == Configuration ==
*
* In order to configure this plugin take a look at the available [[com.typesafe.sbt.packager.windows.WindowsKeys]]
*
* == Requirements ==
*
* <ul>
* <li>Windows System</li>
* <li>Wix Toolset ([[http://wixtoolset.org/]]) installed
* </ul>
*
* @example Enable the plugin in the `build.sbt`
* {{{
* enablePlugins(WindowsPlugin)
* }}}
*/
object WindowsPlugin extends AutoPlugin {
object autoImport extends WindowsKeys {
val Windows: Configuration = config("windows")
}
import autoImport._
override lazy val projectSettings: Seq[Setting[_]] = windowsSettings ++ mapGenericFilesToWindows
override def requires = UniversalPlugin
override def projectConfigurations: Seq[Configuration] = Seq(Windows)
/**
* default windows settings
*/
def windowsSettings: Seq[Setting[_]] =
Seq(
sourceDirectory in Windows := sourceDirectory.value / "windows",
target in Windows := target.value / "windows",
// TODO - Should this use normalized name like the linux guys?
name in Windows := name.value,
packageName in Windows := packageName.value,
// Defaults so that our simplified building works
candleOptions := Seq("-ext", "WixUtilExtension"),
lightOptions := Seq("-ext", "WixUIExtension", "-ext", "WixUtilExtension", "-cultures:en-us"),
wixProductId := WixHelper.makeGUID,
wixProductUpgradeId := WixHelper.makeGUID,
wixMajorVersion := 3,
maintainer in Windows := maintainer.value,
packageSummary in Windows := packageSummary.value,
packageDescription in Windows := packageDescription.value,
wixProductLicense := {
// TODO - document this default.
val default = (sourceDirectory in Windows).value / "License.rtf"
if (default.exists) Some(default)
else None
},
wixPackageInfo := WindowsProductInfo(
id = wixProductId.value,
title = (packageSummary in Windows).value,
version = (version in Windows).value,
maintainer = (maintainer in Windows).value,
description = (packageDescription in Windows).value,
upgradeId = wixProductUpgradeId.value,
comments = "TODO - we need comments." // TODO - allow comments
),
wixFeatures := Seq.empty,
wixProductConfig := WixHelper.makeWixProductConfig(
(name in Windows).value,
wixPackageInfo.value,
wixFeatures.value,
wixProductLicense.value
),
wixConfig := WixHelper.makeWixConfig(
(name in Windows).value,
wixPackageInfo.value,
WixHelper.getNameSpaceDefinitions(wixMajorVersion.value),
wixProductConfig.value
),
wixConfig in Windows := wixConfig.value,
wixProductConfig in Windows := wixProductConfig.value,
wixFile := {
val config = (wixConfig in Windows).value
val wixConfigFile = (target in Windows).value / ((name in Windows).value + ".wxs")
IO.write(wixConfigFile, config.toString)
wixConfigFile
},
wixFiles := Seq(wixFile.value)
) ++ inConfig(Windows)(Seq(packageBin := {
val wsxSources = wixFiles.value
val msi = target.value / (name.value + ".msi")
// First we have to move everything (including the WIX scripts)
// to our target directory.
val targetFlat: Path.FileMap = Path.flat(target.value)
val wsxFiles = wsxSources.map(targetFlat(_).get)
val wsxCopyPairs = wsxSources.zip(wsxFiles).filter {
case (src, dest) => src.getAbsolutePath != dest.getAbsolutePath
}
IO.copy(wsxCopyPairs)
IO.copy(for ((f, to) <- mappings.value) yield (f, target.value / to))
// Now compile WIX
val candleCmd = findWixExecutable("candle") +:
wsxFiles.map(_.getAbsolutePath) ++:
candleOptions.value
val wixobjFiles = wsxFiles.map { wsx =>
wsx.getParentFile / (wsx.base + ".wixobj")
}
streams.value.log.debug(candleCmd mkString " ")
sys.process.Process(candleCmd, Some(target.value)) ! streams.value.log match {
case 0 => ()
case exitCode => sys.error(s"Unable to run WIX compilation to wixobj. Exited with ${exitCode}")
}
// Now create MSI
val lightCmd = List(findWixExecutable("light"), "-out", msi.getAbsolutePath) ++ wixobjFiles
.map(_.getAbsolutePath) ++
lightOptions.value
streams.value.log.debug(lightCmd mkString " ")
sys.process.Process(lightCmd, Some(target.value)) ! streams.value.log match {
case 0 => ()
case exitCode => sys.error(s"Unable to run build msi. Exited with ${exitCode}")
}
msi
}))
/**
* set the `mappings in Windows` and the `wixFeatures`
*/
def mapGenericFilesToWindows: Seq[Setting[_]] =
Seq(
mappings in Windows := (mappings in Universal).value,
wixFeatures := makeWindowsFeatures((packageName in Windows).value, (mappings in Windows).value)
)
/**
* Generates the wix configuration features
*
* @param name - title of the core package
* @param mappings - use to generate different features
* @return windows features
*/
def makeWindowsFeatures(name: String, mappings: Seq[(File, String)]): Seq[WindowsFeature] = {
// TODO select main script! Filter Config links!
val files =
for {
(file, name) <- mappings
if !file.isDirectory
} yield ComponentFile(name, editable = name startsWith "conf")
val corePackage =
WindowsFeature(
id = WixHelper.cleanStringForId(name + "_core").takeRight(38), // Must be no longer
title = name,
desc = "All core files.",
absent = "disallow",
components = files
)
// TODO - Detect bat files to add paths...
val addBinToPath =
// TODO - we may have issues here...
WindowsFeature(
id = "AddBinToPath",
title = "Update Environment Variables",
desc = "Update PATH environment variables (requires restart).",
components = Seq(AddDirectoryToPath("bin"))
)
val configLinks = for {
(file, name) <- mappings
if !file.isDirectory
if name startsWith "conf/"
} yield name.replaceAll("//", "/").stripSuffix("/").stripSuffix("/")
val menuLinks =
WindowsFeature(
id = "AddConfigLinks",
title = "Configuration start menu links",
desc = "Adds start menu shortcuts to edit configuration files.",
components = Seq(AddShortCuts(configLinks))
)
// TODO - Add feature for shortcuts to binary scripts.
Seq(corePackage, addBinToPath, menuLinks)
}
private def findWixExecutable(name: String): String = {
val wixDir = Option(System.getenv("WIX"))
.map(file)
.getOrElse(sys.error("WIX environment not found. Please ensure WIX is installed on this computer."))
val candidates = List(wixDir / (name + ".exe"), wixDir / "bin" / (name + ".exe"))
candidates.find(_.exists).getOrElse(sys.error(s"WIX executable $name.exe was not found in $wixDir")).getAbsolutePath
}
}
object WindowsDeployPlugin extends AutoPlugin {
import WindowsPlugin.autoImport._
override def requires = WindowsPlugin
override def projectSettings: Seq[Setting[_]] =
SettingsHelper.makeDeploymentSettings(Windows, packageBin in Windows, "msi")
}
| kardapoltsev/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/windows/WindowsPlugin.scala | Scala | bsd-2-clause | 7,939 |
/*
* Copyright 2020 Daniel Spiewak
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shims.conversions
import shims.util.Capture
trait EqConversions {
private[conversions] trait EqShimS2C[A] extends cats.kernel.Eq[A] with Synthetic {
val A: scalaz.Equal[A]
override def eqv(x: A, y: A): Boolean = A.equal(x, y)
}
implicit def equalToCats[A](implicit AC: Capture[scalaz.Equal[A]]): cats.kernel.Eq[A] with Synthetic =
new EqShimS2C[A] { val A = AC.value }
private[conversions] trait EqShimC2S[A] extends scalaz.Equal[A] with Synthetic {
val A: cats.kernel.Eq[A]
override def equal(x: A, y: A): Boolean = A.eqv(x, y)
}
implicit def eqToScalaz[A](implicit AC: Capture[cats.kernel.Eq[A]]): scalaz.Equal[A] with Synthetic =
new EqShimC2S[A] { val A = AC.value }
}
trait OrderConversions extends EqConversions {
private[conversions] trait OrderShimS2C[A] extends cats.kernel.Order[A] with EqShimS2C[A] {
val A: scalaz.Order[A]
override def compare(x: A, y: A): Int = A.order(x, y).toInt
}
implicit def orderToCats[A](implicit AC: Capture[scalaz.Order[A]]): cats.kernel.Order[A] with Synthetic =
new OrderShimS2C[A] { val A = AC.value }
private[conversions] trait OrderShimC2S[A] extends scalaz.Order[A] with EqShimC2S[A] {
val A: cats.kernel.Order[A]
override def order(x: A, y: A): scalaz.Ordering = scalaz.Ordering.fromInt(A.compare(x, y))
}
implicit def orderToScalaz[A](implicit AC: Capture[cats.kernel.Order[A]]): scalaz.Order[A] with Synthetic =
new OrderShimC2S[A] { val A = AC.value }
}
trait SemigroupConversions extends OrderConversions {
private[conversions] trait SemigroupShimS2C[A] extends cats.Semigroup[A] with Synthetic {
val A: scalaz.Semigroup[A]
override def combine(x: A, y: A): A = A.append(x, y)
}
implicit def semigroupToCats[A](implicit FC: Capture[scalaz.Semigroup[A]]): cats.Semigroup[A] with Synthetic =
new SemigroupShimS2C[A] { val A = FC.value }
private[conversions] trait SemigroupShimC2S[A] extends scalaz.Semigroup[A] with Synthetic {
val A: cats.Semigroup[A]
override def append(f1: A, f2: => A): A = A.combine(f1, f2)
}
implicit def semigroupToScalaz[A](implicit FC: Capture[cats.Semigroup[A]]): scalaz.Semigroup[A] with Synthetic =
new SemigroupShimC2S[A] { val A = FC.value }
}
trait MonoidConversions extends SemigroupConversions {
private[conversions] trait MonoidShimS2C[A] extends cats.Monoid[A] with SemigroupShimS2C[A] {
val A: scalaz.Monoid[A]
override def empty: A = A.zero
}
implicit def monoidToCats[A](implicit FC: Capture[scalaz.Monoid[A]]): cats.Monoid[A] with Synthetic =
new MonoidShimS2C[A] { val A = FC.value }
private[conversions] trait MonoidShimC2S[A] extends scalaz.Monoid[A] with SemigroupShimC2S[A] {
val A: cats.Monoid[A]
override def zero: A = A.empty
}
implicit def monoidToScalaz[A](implicit FC: Capture[cats.Monoid[A]]): scalaz.Monoid[A] with Synthetic =
new MonoidShimC2S[A] { val A = FC.value }
}
trait SemigroupKConversions {
private[conversions] trait SemigroupKShimS2C[F[_]] extends cats.SemigroupK[F] with Synthetic {
val F: scalaz.Plus[F]
def combineK[A](x: F[A], y: F[A]): F[A] = F.plus(x, y)
}
implicit def plusToCats[F[_]](implicit FC: Capture[scalaz.Plus[F]]): cats.SemigroupK[F] with Synthetic =
new SemigroupKShimS2C[F] { val F = FC.value }
private[conversions] trait PlusShimC2S[F[_]] extends scalaz.Plus[F] with Synthetic {
val F: cats.SemigroupK[F]
def plus[A](a: F[A], b: => F[A]): F[A] = F.combineK(a, b)
}
implicit def semigroupKToScalaz[F[_]](implicit FC: Capture[cats.SemigroupK[F]]): scalaz.Plus[F] with Synthetic =
new PlusShimC2S[F] { val F = FC.value }
}
trait MonoidKConversions extends SemigroupKConversions {
private[conversions] trait MonoidKShimS2C[F[_]] extends cats.MonoidK[F] with SemigroupKShimS2C[F] {
val F: scalaz.PlusEmpty[F]
def empty[A]: F[A] = F.empty[A]
}
implicit def plusEmptyToCats[F[_]](implicit FC: Capture[scalaz.PlusEmpty[F]]): cats.MonoidK[F] with Synthetic =
new MonoidKShimS2C[F] { val F = FC.value }
private[conversions] trait PlusEmptyShimC2S[F[_]] extends scalaz.PlusEmpty[F] with PlusShimC2S[F] {
val F: cats.MonoidK[F]
def empty[A]: F[A] = F.empty[A]
}
implicit def monoidKToScalaz[F[_]](implicit FC: Capture[cats.MonoidK[F]]): scalaz.PlusEmpty[F] with Synthetic =
new PlusEmptyShimC2S[F] { val F = FC.value }
}
// "kernel" is such an ill-defined thing...
trait ShowConversions {
private[conversions] trait ShowShimS2C[A] extends cats.Show[A] with Synthetic {
val A: scalaz.Show[A]
override def show(a: A): String = A.shows(a)
}
implicit def showToCats[A](implicit FC: Capture[scalaz.Show[A]]): cats.Show[A] with Synthetic =
new ShowShimS2C[A] { val A = FC.value }
private[conversions] trait ShowShimC2S[A] extends scalaz.Show[A] with Synthetic {
val A: cats.Show[A]
override def shows(a: A): String = A.show(a)
}
implicit def showToScalaz[A](implicit FC: Capture[cats.Show[A]]): scalaz.Show[A] with Synthetic =
new ShowShimC2S[A] { val A = FC.value }
}
| djspiewak/shims | core/src/main/scala/shims/conversions/kernel.scala | Scala | apache-2.0 | 5,694 |
package org.etl.server
import org.restlet.resource.ServerResource
import org.restlet.resource.Get
import java.io.BufferedReader
import java.io.FileReader
import org.etl.SparrowStandaloneSetup
import org.etl.parser.antlr.SparrowParser
import org.etl.config.ConfigurationService
import scala.collection.immutable.HashMap
import org.etl.command.TryContext
import org.etl.command.TryContext
import com.typesafe.scalalogging.LazyLogging
import org.etl.sparrow.Catch
import org.etl.command.ErrorContext
import org.etl.sparrow.Finally
import org.etl.command.FinallyContext
import org.etl.command.FinallyContext
import org.etl.audit.AuditService
import java.net.InetAddress
import org.restlet.resource.Post
import org.restlet.representation.Representation
import org.restlet.ext.json.JsonRepresentation
class StartProcess extends ServerResource with LazyLogging {
val runMode = "org.etl.process.onethread"
//sample url - http://localhost:8080/process/publish.demandforecast.process#1/start
@Get("application/json")
def represent(): String = {
val inboundValue = getRequest.getAttributes.get("instance");
val alias = getRequest().getAttributes().get("alias");
val instanceName: String = inboundValue.asInstanceOf[String] + "#" + alias.asInstanceOf[String]
logger.info("Starting the process=" + instanceName)
val runtimeContext = ProcessAST.loadProcessAST(instanceName, "{}")
try {
ProcessExecutor.execute(runMode, runtimeContext)
} catch {
case ex: Throwable => {
handleError(ex)
}
} finally {
handleFinally()
}
"{\"start_status\": \"SUCCESS\"}"
}
@Post("application/json")
def acceptAndReturnJson(json: JsonRepresentation): String = {
val inboundValue = getRequest.getAttributes.get("instance");
val alias = getRequest().getAttributes().get("alias");
val instanceName: String = inboundValue.asInstanceOf[String] + "#" + alias.asInstanceOf[String]
logger.info("Starting the process=" + instanceName + ", with incoming json=" + json)
val jsonObject = json.getJsonObject.toString
val runtimeContext = ProcessAST.loadProcessAST(instanceName, jsonObject)
try {
ProcessExecutor.execute(runMode, runtimeContext)
} catch {
case ex: Throwable => {
handleError(ex)
}
} finally {
handleFinally()
}
"{\"start_status\": \"SUCCESS\"}"
}
def handleError(ex: Throwable) = {
ex.printStackTrace()
}
def handleFinally() = {
println("Processing the request completed")
}
} | jpvelsamy/sparrow | sparrow-server/src/main/scala/org/etl/server/StartProcess.scala | Scala | apache-2.0 | 2,547 |
// Copyright 2013 Christopher Swenson.
// Author: Christopher Swenson ([email protected])
package com.caswenson.pizza
import com.caswenson.pizza.data.{Pizza, Cities}
import com.simple.simplespec.Spec
import org.junit.Test
object PizzaSpec {
lazy val cities = Cities("census/place")
lazy val orPizza = Pizza(cities, "census/addrfeat", Set("OR"))
}
class PizzaSpec extends Spec {
def isNear(a: LatLon, b: LatLon, tolerance: Double = 0.0001): Boolean = {
((a.lat - b.lat) * (a.lat - b.lat) + (a.lon - b.lon) * (a.lon - b.lon)) <= tolerance
}
class `Pizza tests` {
@Test def `Check 720 nw davis st is reasonable`() {
isNear(PizzaSpec.orPizza.geocode(1005, "W Burnside St", "Portland", "OR", Some("97209")).get,
LatLon(45.522973, -122.681172)).must(be(true))
}
@Test def `Check 720 nw davis street freeform works`() {
isNear(PizzaSpec.orPizza.geocode("1005 W Burnside St Portland OR 97209").get,
LatLon(45.522973, -122.681172)).must(be(true))
}
@Test def `Check reverse geocode of 720 nw davis st`() {
PizzaSpec.orPizza.reverseGeocode(LatLon(45.522973,-122.681172)).must(be(
Location(street = Some("1005 W Davis St"),
city = Some("Portland"),
state = Some("OR"),
zip = Some("97209"),
country = Some("USA"),
lat = Some(45.52297339583333),
lon = Some(-122.68117162499999))))
}
}
}
| swenson/pizza | src/test/scala/com/caswenson/pizza/PizzaSpec.scala | Scala | mit | 1,473 |
package amphip.stoch
import spire.implicits._
import org.junit.Assert._
import org.junit.Test
import amphip.dsl._
import amphip.model.ast._
import amphip.data.ModelData._
class TestStochData {
@Test
def testModel1(): Unit = {
val S = set("S")
val T = set("T")
val prob = param("prob", S)
//val link = param("link", S, S, T)
val link = set("link", T)
val p1 = param("p1", T, S)
val p2 = param("p2", T, S)
val p3 = param("p3", T, S)
val model = Model(List(p1, p2, p3)).stochastic(T, S, prob, link)
val model1 = model
.stochDefault(p1, 1)
.stochDefault(p2, 8)
.stochDefault(p3, 0)
val (t1, t2, t3) = (Stage("1"), Stage("2"), Stage("3"))
val model2 = model1.stochStages(t1, t2, t3) // to stablish the order
val BS = BasicScenario
val (low1, med1, high1) = (BS("Low1"), BS("Medium1"), BS("High1"))
val (low2, high2) = (BS("Low2"), BS("High2"))
val (low3, high3) = (BS("Low3"), BS("High3"))
// 3 * 2 * 2 = 12 scenarios
val model3 = model2
.stochBasicScenarios(t1, low1 -> r"1/6", med1 -> r"1/2", high1 -> r"1/3")
.stochBasicScenarios(t2, low2 -> r"2/3", high2 -> r"1/3")
.stochBasicScenarios(t3, low3 -> r"1/3", high3 -> r"2/3")
val model4 = model3
.stochBasicData(p1, t1, low1, 0)
.stochBasicData(p1, t1, high1, 2)
.stochBasicData(p1, t2, high2, 0.3)
.stochBasicData(p2, t2, low2, 0.5)
.stochBasicData(p2, t2, BS("nonexistant"), 0.5)
/*
changes probability of (low1, low2, high3),
adds (low1, low2, new3),
removes (low1, low2, low3)
total = 12 scenarios
*/
val model5 = model4
.stochCustomScenarios(List(low1, low2), high3 -> r"4/5", BS("New3") -> r"1/5")
val model6 = model5
.stochScenarioData(p1, List(low1, low2, high3), 2.5)
.stochScenarioData(p1, List(low1, high2, BS("High3a")), 2.3)
.stochScenarioData(p1, List(low1, high2, BS("High3b")), 2.4)
val stochData = model6.stochData
val TData = stochData.TData
val SData = stochData.SData
println(s"TData: $TData")
println(s"SData: $SData")
val finalScenarios = stochData.finalScenarios
println()
println("finalScenarios:")
println(finalScenarios.mkString("", "\\n", "\\n"))
println("finalProbabilities:")
val finalProbabilities = stochData.finalProbabilities
println(finalProbabilities.mkString("", "\\n", "\\n"))
println()
println("mix:")
val mix = finalScenarios.zip(finalProbabilities).map { case (ss, ps) => ss.zip(ps) }
println(mix.mkString("", "\\n", "\\n"))
val probData = stochData.probabilityData
println(s"probabilityData: $probData")
println(s"probabilityData.sum: ${probData.sum}")
println()
val p1Data = stochData.paramData(p1).toMap
assertEquals(SimpleNum(0) , p1Data(List(1, 1))) // stage 1, "low1"
assertEquals(SimpleNum(1) , p1Data(List(1, 5))) // stage 1, "med1"
assertEquals(SimpleNum(2) , p1Data(List(1, 9))) // stage 1, "high1"
assertEquals(SimpleNum(1) , p1Data(List(2, 1))) // stage 2, "low2"
assertEquals(SimpleNum(0.3), p1Data(List(2, 3))) // stage 2, "high2"
assertEquals(SimpleNum(2.5), p1Data(List(3, 1))) // stage 3, "high3"
assertEquals(SimpleNum(1) , p1Data(List(3, 2))) // stage 3, "new3"
val p1ModelData = StochData.filter(model6.data, p1).params
println("p1ModelData:")
println(amphip.sem.mathprog.genData.getParamData(p1ModelData))
println()
val linkModelData = StochData.filter(model6.data, link).sets
println("linkModelData:")
println(amphip.sem.mathprog.genData.getSetData(linkModelData))
println()
}
//@Test
def testModel2(): Unit = {
val stocks = "stocks"
val bonds = "bonds"
val H = param("H") // horizon of stages
val nS = param("nS")
val S = set("S") default (1 to nS) // scenarios.
val s = dummy("s")
val p = param("p", S)
val T = set("T") default (1 to H) // stages
val t = dummy("t")
val G = param("G") // goal money
val b = param("b") // initial money
val I = set("I") default List(stocks, bonds) // investments.
val i = dummy("i")
val ret = param("ret", S, T, I) // random returns of investment
val x = xvar("x", S, T, I) >= 0 // investment decisions
val q = param("q") // excess utility rate
val r = param("r") // deficit utility rate
val y = xvar("y", S) >= 0 // excess below G
val w = xvar("w", S) >= 0 // deficit over G
val maxUtility = maximize {
sum(s in S)(p(s) * (-r * w(s) + q * y(s)))
}
val initial = st(s in S) { sum(i in I)(x(s, 1, i)) === b }
val balance = st(s in S, t in (2 to H)) {
sum(i in I)(ret(s, t - 1, i) * x(s, t - 1, i)) === sum(i in I)(x(s, t, i))
}
val close = st(s in S) {
sum(i in I)(ret(s, H, i) * x(s, H, i)) + w(s) - y(s) === G
}
val financeMIP = model(maxUtility,
initial,
balance,
close)
//val link = param("link", S, S, T)
val link = set("link", T)
val HVal = 3
val financeStoch = financeMIP.stochastic(T, S, p, link)
.paramData(H, HVal)
.paramData(b, 55000)
.paramData(G, 80000)
.paramData(q, 1)
.paramData(r, 1)
val BS = BasicScenario
val (low, high) = (BS("Low"), BS("High"))
val stages = (1 to HVal).map(i => Stage(i.toString))
val financeStoch1 = financeStoch.stochStages(stages: _*)
val financeStoch2 = stages.foldLeft(financeStoch1) {
(financeStoch1, ti) =>
financeStoch1
.stochBasicScenarios(ti, low -> 0.5, high -> 0.5)
.stochBasicData(ret, ti, low,
List(stocks) -> 1.06,
List(bonds) -> 1.12)
.stochBasicData(ret, ti, high,
List(stocks) -> 1.25,
List(bonds) -> 1.14)
}
val financeStoch3 = financeStoch2.alreadySeparated
val stochData = financeStoch3.stochData
val TData = stochData.TData
val SData = stochData.SData
println(TData)
println(SData)
val finalProbabilities = stochData.finalProbabilities
val probData = stochData.probabilityData
println(probData.sum)
println(finalProbabilities.mkString("\\n"))
println(probData)
//val linkData = stochData.linkData
//println(linkData.mkString("\\n"))
val finalScenarios = stochData.finalScenarios
println(finalScenarios.mkString("\\n"))
val retData = stochData.paramData(ret)
println(retData.mkString("\\n"))
// XXX add test for tree stages and two stages in basicScnarios and stage 3 only in customScenarios
val finance = financeStoch3.paramData(nS, SData.size)
println(amphip.sem.mathprog.genModel(finance.mip.model))
println(amphip.sem.mathprog.genData(finance.data))
println(finance.solve)
}
} | gerferra/amphip | core/src/test/scala/amphip/stoch/TestStochData.scala | Scala | mpl-2.0 | 6,890 |
package org.jetbrains.plugins.scala
package editor.enterHandler
import com.intellij.codeInsight.CodeInsightSettings
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegate.Result
import com.intellij.codeInsight.editorActions.enter.EnterHandlerDelegateAdapter
import com.intellij.openapi.actionSystem.DataContext
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.editor.actionSystem.EditorActionHandler
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.util.{Ref, TextRange}
import com.intellij.psi.PsiFile
import org.jetbrains.plugins.scala.format.StringConcatenationParser
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.util.MultilineStringSettings
import org.jetbrains.plugins.scala.util.MultilineStringUtil._
/**
* User: Dmitry Naydanov
* Date: 2/27/12
*/
class MultilineStringEnterHandler extends EnterHandlerDelegateAdapter {
private var wasInMultilineString: Boolean = false
private var whiteSpaceAfterCaret: String = ""
override def preprocessEnter(file: PsiFile, editor: Editor, caretOffsetRef: Ref[Integer], caretAdvance: Ref[Integer],
dataContext: DataContext, originalHandler: EditorActionHandler): Result = {
val document = editor.getDocument
val text = document.getText
val caretOffset = caretOffsetRef.get.intValue
if (caretOffset == 0 || caretOffset >= text.length()) return Result.Continue
val element = file findElementAt caretOffset
if (!inMultilineString(element)) return Result.Continue
else wasInMultilineString = true
val ch1 = text.charAt(caretOffset - 1)
val ch2 = text.charAt(caretOffset)
whiteSpaceAfterCaret = text.substring(caretOffset).takeWhile(c => c == ' ' || c == '\\t')
document.deleteString(caretOffset, caretOffset + whiteSpaceAfterCaret.length)
if ((ch1 != '(' || ch2 != ')')&&(ch1 != '{' || ch2 != '}') || !CodeInsightSettings.getInstance.SMART_INDENT_ON_ENTER)
return Result.Continue
originalHandler.execute(editor, editor.getCaretModel.getCurrentCaret, dataContext)
Result.DefaultForceIndent
}
override def postProcessEnter(file: PsiFile, editor: Editor, dataContext: DataContext): Result = {
if (!file.isInstanceOf[ScalaFile]) return Result.Continue
val caretModel = editor.getCaretModel
val document = editor.getDocument
val offset = caretModel.getOffset
val caretMarker = document.createRangeMarker(offset, offset)
caretMarker.setGreedyToRight(true)
def caretOffset = caretMarker.getEndOffset
val project = file.getProject
val element = file.findElementAt(offset)
if (!wasInMultilineString) return Result.Continue
wasInMultilineString = false
val marginChar = getMarginChar(element)
val settings = new MultilineStringSettings(project)
import settings._
val literal = findParentMLString(element).getOrElse(return Result.Continue)
val literalOffset = literal.getTextRange.getStartOffset
val firstMLQuote = interpolatorPrefix(literal) + multilineQuotes
val firstMLQuoteLength = firstMLQuote.length
if (supportLevel == ScalaCodeStyleSettings.MULTILINE_STRING_NONE || offset - literalOffset < firstMLQuoteLength) return Result.Continue
def getLineByNumber(number: Int): String =
document.getText(new TextRange(document.getLineStartOffset(number), document.getLineEndOffset(number)))
def getSpaces(count: Int) = StringUtil.repeat(" ", count)
def getSmartSpaces(count: Int) = if (useTabs) {
StringUtil.repeat("\\t", count/tabSize) + StringUtil.repeat(" ", count%tabSize)
} else {
StringUtil.repeat(" ", count)
}
def getSmartLength(line: String) = if (useTabs) line.length + line.count(_ == '\\t')*(tabSize - 1) else line.length
def insertNewLine(nlOffset: Int, indent: Int, trimPreviousLine: Boolean) {
document.insertString(nlOffset, "\\n")
forceIndent(nlOffset + 1, indent, None)
if (trimPreviousLine) {
val line = getLineByNumber(document.getLineNumber(nlOffset))
var i = 0
def charToCheck = line.charAt(line.length - 1 - i)
while (i <= line.length - 1 && (charToCheck == ' ' || charToCheck == '\\t')) {
i += 1
}
document.deleteString(nlOffset - i, nlOffset)
}
}
def forceIndent(offset: Int, indent: Int, marginChar: Option[Char]) {
val lineNumber = document.getLineNumber(offset)
val lineStart = document.getLineStartOffset(lineNumber)
val line = getLineByNumber(lineNumber)
val wsPrefix = line.takeWhile(c => c == ' ' || c == '\\t')
document.replaceString(lineStart, lineStart + wsPrefix.length, getSmartSpaces(indent) + marginChar.getOrElse(""))
}
extensions inWriteAction {
val prevLineNumber = document.getLineNumber(offset) - 1
assert(prevLineNumber >= 0)
val prevLine = getLineByNumber(prevLineNumber)
val currentLine = getLineByNumber(prevLineNumber + 1)
val nextLine = if (document.getLineCount > prevLineNumber + 2) getLineByNumber(prevLineNumber + 2) else ""
def prevLinePrefixAfterDelimiter(offsetInLine: Int): Int =
if (prevLine.length > offsetInLine) prevLine.substring(offsetInLine).prefixLength(c => c == ' ' || c == '\\t') else 0
val wasSingleLine = literal.getText.indexOf("\\n") == literal.getText.lastIndexOf("\\n")
val lines = literal.getText.split("\\n")
val marginCharFromSettings = selectBySettings[Option[Char]](None)(Some(marginChar))
val marginCharOpt =
marginCharFromSettings match {
case Some(mChar) if hasMarginChars(element, mChar.toString) ||
(!hasMarginChars(element, mChar.toString) && lines.length > 3) || needAddByType(literal) => marginCharFromSettings
case _ => None
}
if (wasSingleLine || lines.length == 3 &&
(lines(0).endsWith("(") && lines(2).trim.startsWith(")") || lines(0).endsWith("{") && lines(2).trim.startsWith("}"))) {
val trimmedStartLine = getLineByNumber(document.getLineNumber(offset) - 1).trim()
val inConcatenation = literal.getParent match {
//TODO: probably replace
case ScInfixExpr(lit: ScLiteral, op, `literal`) if op.refName.inName == "+" && lit.isString => Option(lit)
//TODO: probably replace
case ScInfixExpr(expr, op, `literal`) if op.refName.inName == "+" && StringConcatenationParser.isString(expr) => Option(expr)
case _ => None
}
val needInsertNLBefore = (!trimmedStartLine.startsWith(firstMLQuote) || inConcatenation.isDefined) && quotesOnNewLine
selectBySettings()(if (needAddByType(literal)) insertStripMargin(document, literal, marginChar))
val prevIndent =
if (inConcatenation.isDefined) inConcatenation.map { expr =>
val exprStart = expr.getTextRange.getStartOffset
val lineStart = document.getLineStartOffset(document.getLineNumber(exprStart))
getSmartLength(document.getText.substring(lineStart, exprStart))
}.get
else prefixLength(prevLine)
val needInsertIndentInt =
if (needInsertNLBefore && inConcatenation.isEmpty) regularIndent
else 0
if (needInsertNLBefore) {
insertNewLine(literalOffset, prevIndent + needInsertIndentInt, trimPreviousLine = true)
}
val indentSize = prevIndent + needInsertIndentInt + interpolatorPrefixLength(literal) + marginIndent
if (literal.getText.substring(offset - literalOffset) == multilineQuotes) {
forceIndent(caretOffset, indentSize, marginCharOpt)
caretMarker.setGreedyToRight(false)
insertNewLine(caretOffset, indentSize - marginIndent, trimPreviousLine = false)
caretMarker.setGreedyToRight(true)
} else {
forceIndent(caretOffset, indentSize, marginCharOpt)
}
if (!wasSingleLine) {
val currentPrefix = getPrefix(getLineByNumber(document.getLineNumber(caretOffset)))
forceIndent(caretOffset + 1, getSmartLength(currentPrefix), marginCharOpt)
}
} else {
val isCurrentLineEmpty = currentLine.trim.length == 0
val currentLineOffset = document.getLineStartOffset(prevLineNumber + 1)
val isPrevLineFirst = prevLine startsWith firstMLQuote
val isPrevLineTrimmedFirst = prevLine.trim startsWith firstMLQuote
val prevLineStartOffset = document getLineStartOffset prevLineNumber
val wsPrefix =
if (isPrevLineFirst) prevLinePrefixAfterDelimiter(firstMLQuoteLength) + firstMLQuoteLength
else prevLine.prefixLength(c => c == ' ' || c == '\\t')
val prefixStriped = prevLine.substring(wsPrefix)
if (supportLevel == ScalaCodeStyleSettings.MULTILINE_STRING_QUOTES_AND_INDENT ||
!prefixStriped.startsWith(Seq(marginChar)) && !prefixStriped.startsWith(firstMLQuote) ||
!lines.map(_.trim).exists(_.startsWith(Seq(marginChar)))) {
if (prevLineStartOffset < literalOffset) {
val beforeQuotes = prevLinePrefixAfterDelimiter(0)
val elementStart = prevLine.indexOf(firstMLQuote) + firstMLQuoteLength
val prevLineWsPrefixAfterQuotes = prevLinePrefixAfterDelimiter(elementStart)
val spacesToInsert =
if (isPrevLineTrimmedFirst) beforeQuotes + firstMLQuoteLength + prevLineWsPrefixAfterQuotes
else (if (isCurrentLineEmpty) elementStart else elementStart - wsPrefix) + prevLineWsPrefixAfterQuotes
forceIndent(currentLineOffset, getSmartLength(getSmartSpaces(spacesToInsert)), None)
}
else if (isCurrentLineEmpty && prevLine.length > 0)
forceIndent(caretOffset, wsPrefix, None)
else if (prevLine.trim.length == 0)
forceIndent(caretOffset, prevLine.length, None)
else if (isPrevLineTrimmedFirst) {
val wsAfterQuotes = prevLinePrefixAfterDelimiter(wsPrefix + firstMLQuoteLength) + firstMLQuoteLength
forceIndent(caretOffset, wsAfterQuotes, None)
}
} else {
val wsAfterMargin =
if (isPrevLineFirst) firstMLQuoteLength else prevLinePrefixAfterDelimiter(wsPrefix + 1)
if (!currentLine.trim.startsWith(Seq(marginChar))) {
val inBraces = prevLine.endsWith("{") && nextLine.trim.startsWith("}") || prevLine.endsWith("(") && nextLine.trim.startsWith(")")
val prefix =
if (inBraces) getPrefix(nextLine)
else if (prevLine.trim.startsWith(Seq(marginChar))) getPrefix(prevLine)
else if (nextLine.trim.startsWith(Seq(marginChar))) getPrefix(nextLine)
else getPrefix(currentLine)
forceIndent(caretOffset, getSmartLength(prefix), marginCharOpt)
document.insertString(caretOffset, getSpaces(wsAfterMargin))
if (inBraces) {
val nextLineOffset = document.getLineStartOffset(prevLineNumber + 2)
forceIndent(nextLineOffset, 0, None)
document.insertString(nextLineOffset, marginChar + getSpaces(wsAfterMargin))
forceIndent(nextLineOffset, getSmartLength(prefix), None)
}
}
}
}
document.insertString(caretOffset, whiteSpaceAfterCaret)
caretModel.moveToOffset(caretOffset)
caretMarker.dispose()
}
Result.Stop
}
} | katejim/intellij-scala | src/org/jetbrains/plugins/scala/editor/enterHandler/MultilineStringEnterHandler.scala | Scala | apache-2.0 | 11,699 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.plugins.impl
import javax.script.{ScriptEngine, ScriptException}
import cmwell.blueprints.jena.{JenaGraph, QueryException}
import cmwell.plugins.spi.SgEngineClient
import org.apache.jena.query.Dataset
import com.tinkerpop.gremlin.groovy.jsr223.GremlinGroovyScriptEngine
import com.tinkerpop.pipes.Pipe
import com.tinkerpop.pipes.util.iterators.SingleIterator
import scala.util.{Failure, Success, Try}
class GremlinParser extends SgEngineClient {
import scala.collection.JavaConverters._
override def eval(ds: Dataset, query: String): String = {
val graph: com.tinkerpop.blueprints.Graph = new JenaGraph(ds.getDefaultModel) // todo figure out if Quads cannot be supported on Gremlin!!!
val engine: ScriptEngine = new GremlinGroovyScriptEngine()
val bindings = engine.createBindings
bindings.put("g", graph)
def eval = engine.eval(query, bindings)
// evil hack:
def extractStartElementFromQuery = {
// should match the first v("URI") OR e("URI") and extract the URI out of it:
"""(?:[v|e]\\(")(.+?)(?:"\\))""".r.findFirstMatchIn(query).map(_.group(1))
// how does it work? glad you asked:
// (?:[v|e]\\(") --> non-capturing group of v or e with a (" afterwards
// (.+?) --> capturing group of non-greedy whatever with one or more chars
// (?:"\\)) --> non-capturing group of ")
// get the first occurrence of that if exists, or return None
//todo one possible improvement is to have [v|e] captured and returned along with the URI,
//todo so the code invoking will know whether to getVertex(URI) or getEdge(URI)
//todo 2 - does g.e("... even legal?!
}
def makeTypedPipe[T](starts: T) = {
val pipe = eval // must re-eval per type, otherwise setStarts overrides itself and the universe collapses.
val typedPipe = pipe.asInstanceOf[Pipe[T, String]]
typedPipe.setStarts(new SingleIterator[T](starts))
typedPipe
}
def read(p: Pipe[_, _]) = p.iterator().asScala.mkString("\\n")
val firstNode = extractStartElementFromQuery.map(e => Try(graph.getVertex(e)).getOrElse(graph.getEdge(e)))
Try(eval) match {
case Failure(e) =>
e match {
case e: QueryException => s"[ Error: ${e.getMessage} ]"
case e: ScriptException => "[ Gremlin Syntax Error ]"
}
case Success(r) =>
r match {
case p: Pipe[_, _] => {
Seq(Some(graph), firstNode)
.collect { case Some(x) => x }
.map(makeTypedPipe)
.map(
p =>
Try(read(p)) match {
case Success(r) => Some(r)
case Failure(e) =>
e match {
case e: ClassCastException => None
case _ => Some("[ Unknown Error ]")
}
}
)
.collect { case Some(s) => s }
.mkString
}
case null => "[ Requested element not present in Graph! ]"
case v => v.toString
}
}
}
}
| dudi3001/CM-Well | server/cmwell-plugin-gremlin/src/main/scala/cmwell/plugins/impl/GremlinParser.scala | Scala | apache-2.0 | 3,765 |
package io.eels.component.kudu
import java.util.concurrent.atomic.AtomicBoolean
import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import io.eels.datastream.{DataStream, Publisher, Subscriber, Subscription}
import io.eels.schema._
import io.eels.{Row, Source}
import org.apache.kudu.client.{KuduClient, KuduScanner, RowResultIterator}
import scala.collection.JavaConverters._
case class KuduSource(tableName: String)(implicit client: KuduClient) extends Source with Logging {
override lazy val schema: StructType = {
val schema = client.openTable(tableName).getSchema
KuduSchemaFns.fromKuduSchema(schema)
}
override def parts(): Seq[Publisher[Seq[Row]]] = Seq(new KuduPublisher(tableName))
class KuduPublisher(tableName: String) extends Publisher[Seq[Row]] with Using {
override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = {
val projectColumns = schema.fieldNames()
val table = client.openTable(tableName)
val scanner = client.newScannerBuilder(table)
.setProjectedColumnNames(projectColumns.asJava)
.build
try {
val iterator = new ScannerIterator(scanner, schema)
val running = new AtomicBoolean(true)
subscriber.subscribed(Subscription.fromRunning(running))
iterator.takeWhile(_ => running.get).grouped(DataStream.DefaultBatchSize).foreach(subscriber.next)
subscriber.completed()
} catch {
case t: Throwable => subscriber.error(t)
} finally {
scanner.close()
}
}
}
}
object ResultsIterator {
def apply(schema: StructType, iter: RowResultIterator) = new Iterator[Row] {
private val zipped = schema.fields.zipWithIndex
override def hasNext: Boolean = iter.hasNext
override def next(): Row = {
val next = iter.next()
val values = zipped.map { case (field, index) =>
field.dataType match {
case BinaryType => BinaryValueReader.read(next, index)
case BooleanType => BooleanValueReader.read(next, index)
case _: ByteType => ByteValueReader.read(next, index)
case DoubleType => DoubleValueReader.read(next, index)
case FloatType => FloatValueReader.read(next, index)
case _: IntType => IntValueReader.read(next, index)
case _: LongType => LongValueReader.read(next, index)
case _: ShortType => ShortValueReader.read(next, index)
case StringType => StringValueReader.read(next, index)
case TimeMicrosType => LongValueReader.read(next, index)
case TimeMillisType => LongValueReader.read(next, index)
case TimestampMillisType => LongValueReader.read(next, index)
}
}
Row(schema, values)
}
}
}
class ScannerIterator(scanner: KuduScanner, schema: StructType) extends Iterator[Row] {
var iter: Iterator[Row] = Iterator.empty
override def hasNext: Boolean = iter.hasNext || {
if (scanner.hasMoreRows) {
iter = ResultsIterator(schema, scanner.nextRows)
iter.hasNext
} else {
false
}
}
override def next(): Row = iter.next()
}
object KuduSource {
def apply(master: String, table: String): KuduSource = {
implicit val client = new KuduClient.KuduClientBuilder(master).build()
KuduSource(table)
}
}
| sksamuel/eel-sdk | eel-kudu/src/main/scala/io/eels/component/kudu/KuduSource.scala | Scala | apache-2.0 | 3,297 |
package ElevenToTwenty
/**
* Created by Farrell on 5/20/15.
*/
object P17 {
}
| Spinlocks/99Problems | src/ElevenToTwenty/P17.scala | Scala | apache-2.0 | 82 |
package com.adamkunicki.kiji.twitter
import org.slf4j.LoggerFactory
import akka.actor.{ActorRef, Props, Actor, ActorSystem}
import com.adamkunicki.akka._
import com.adamkunicki.avro._
import akka.routing.RoundRobinRouter
import org.kiji.schema._
import java.io.IOException
import twitter4j._
import scala.collection.JavaConverters._
import org.kiji.schema.util.ToJson
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.transport.InetSocketTransportAddress
object TweetIngester extends App {
lazy val log = LoggerFactory.getLogger(getClass.getName)
val kijiUri = KijiURI.newBuilder(args(0)).build()
val tweetTableName = "tweet"
val COL_F = "info"
val COL_Q = "tweet"
val esIndexName = "twitter"
val esDocType = "tweet"
val esHostAddress = args(1)
val initialTweets = 1000
// "amandabynes", "rihanna", "katyperry", "jtimberlake", "ActuallyNPH", "wibidata"
val usersToFollow = Array[Long](243442402, 79293791, 21447363, 26565946, 90420314, 377018652)
val numWriters = 4
val numIndexers = 4
// Try to open a Kiji instance.
val kiji = Kiji.Factory.open(kijiUri)
val tablePool = KijiTablePool.newBuilder(kiji)
.withIdleTimeout(10)
.withIdlePollPeriod(1)
.build()
val system = ActorSystem("TweetIngestSystem")
val listener = system.actorOf(Props[Listener], name = "listener")
val master = system.actorOf(Props(new Master(numWriters, numIndexers, listener)))
master ! Start
class KijiTweetWriter extends Actor {
var tweetCount: Int = _
val table = tablePool.get(tweetTableName)
val writer = table.openTableWriter()
def receive = {
case TweetReceived(status) =>
try {
makeTweet(status) match {
case Some(tweet) =>
writer.put(table.getEntityId(status.getUser.getId: java.lang.Long), COL_F, COL_Q, tweet.getCreatedAt, tweet)
tweetCount += 1
sender ! AvroTweet(tweet)
if (tweetCount % 500 == 0) {
writer.flush()
sender ! Progress(500)
}
case None => log.warn("Unable to parse Tweet")
}
} catch {
case e: IOException =>
log.error("Error while writing tweet", e)
}
}
override def postStop(): Unit = {
writer.close()
table.release()
}
}
class Indexer extends Actor {
val client = new TransportClient()
.addTransportAddress(new InetSocketTransportAddress(esHostAddress, 9300))
val bulkRequest = client.prepareBulk()
var tweetCount: Int = _
var errorCount: Int = _
def receive = {
case AvroTweet(tweet) =>
bulkRequest.add(client.prepareIndex(esIndexName, esDocType, tweet.getId.toString)
.setSource(ToJson.toJsonString(tweet, tweet.getSchema)))
tweetCount += 1
if (tweetCount % 500 == 0) {
val response = bulkRequest.execute().actionGet()
if (response.hasFailures) {
log.error(response.buildFailureMessage())
errorCount += 1
}
}
case _ => log.warn("Indexer received unsupported message type.")
}
override def postStop(): Unit = {
client.close()
}
}
class Listener extends Actor {
def receive = {
case Finish =>
log.info("Shutting down.")
context.system.shutdown()
tablePool.close()
kiji.release()
}
}
class Master(numWriters: Int, numIndexers: Int, listener: ActorRef) extends Actor {
val writerRouter = context.actorOf(Props[KijiTweetWriter].withRouter(RoundRobinRouter(numWriters)), name = "writerRouter")
val indexerRouter = context.actorOf(Props[Indexer].withRouter(RoundRobinRouter(numIndexers)), name = "indexerRouter")
var totalTweets: Int = _
def receive = {
case Start =>
// Connect to the Twitter API and start sending out tweets
val tweetStream = new TwitterStreamFactory().getInstance()
val statusListener = new StatusListener {
def onStallWarning(warning: StallWarning) {
log.info("Got stall warning: " + warning)
}
def onException(e: Exception) {
log.error("Exception occurred", e)
}
def onDeletionNotice(deletionNotice: StatusDeletionNotice) {}
def onScrubGeo(userId: Long, upToStatusId: Long) {}
def onStatus(status: Status) {
writerRouter ! TweetReceived(status)
}
def onTrackLimitationNotice(numLimitedStatuses: Int) {}
}
tweetStream.addListener(statusListener)
tweetStream.filter(new FilterQuery(initialTweets, usersToFollow))
case Progress(numTweets) =>
totalTweets += numTweets
log.info(f"Wrote $totalTweets%d tweets.")
case AvroTweet(tweet) =>
indexerRouter ! AvroTweet(tweet)
case _ => log.warn("Dropping unrecognized message.")
}
}
private def makeTweet(status: Status): Option[Tweet] = {
if (status != null) {
Some(Tweet.newBuilder()
.setCreatedAt(status.getCreatedAt.getTime)
.setId(status.getId)
.setText(status.getText)
.setSource(status.getSource)
.setIsTruncated(status.isTruncated)
.setInReplyToStatusId(status.getInReplyToStatusId)
.setInReplyToUserId(status.getInReplyToUserId)
.setInReplyToScreenName(status.getInReplyToScreenName)
.setGeolocation(makeGeolocation(status.getGeoLocation).getOrElse(null))
.setPlace(makePlace(status.getPlace).getOrElse(null))
.setIsFavorited(status.isFavorited)
.setIsRetweeted(status.isRetweeted)
.setFavoriteCount(status.getFavoriteCount)
.setUser(makeUser(status.getUser))
.setRetweetedStatus(makeTweet(status.getRetweetedStatus).getOrElse(null))
.setContributors(status.getContributors.toList.map(Long.box).asJava)
.setRetweetCount(status.getRetweetCount)
.setIsPossiblySensitive(status.isPossiblySensitive)
.setIsoLanguageCode(status.getIsoLanguageCode)
.build())
} else {
None
}
}
private def makeUser(u: twitter4j.User): com.adamkunicki.avro.User = {
com.adamkunicki.avro.User.newBuilder()
.setId(u.getId)
.setName(u.getName)
.setScreenName(u.getScreenName)
.setLocation(u.getLocation)
.setDescription(u.getDescription)
.setFollowersCount(u.getFollowersCount)
.setStatusesCount(u.getStatusesCount)
.build()
}
private def makeGeolocation(geo: GeoLocation): Option[Geolocation] = {
if (geo != null) {
Some(Geolocation.newBuilder()
.setLongitude(geo.getLongitude)
.setLatitude(geo.getLatitude)
.build())
} else {
None
}
}
private def makePlace(p: twitter4j.Place): Option[com.adamkunicki.avro.Place] = {
if (p != null) {
val place = com.adamkunicki.avro.Place.newBuilder()
.setName(p.getName)
.setStreetAddress(p.getStreetAddress)
.setCountry(p.getCountry)
.setCountryCode(p.getCountryCode)
.setId(p.getId)
.setPlaceType(p.getPlaceType)
.setUrl(p.getURL)
.setFullName(p.getFullName)
.setBoundingBoxType(p.getBoundingBoxType)
.setGeometryType(p.getGeometryType)
.build()
try {
place.setBoundingBoxCoordinates(p.getBoundingBoxCoordinates
.map(_.map(makeGeolocation(_).getOrElse(null)).toList.asJava).toList.asJava)
} catch {
case e: NullPointerException => place.setBoundingBoxCoordinates(null)
}
try {
place.setGeometryCoordinates(p.getGeometryCoordinates
.map(_.map(makeGeolocation(_).getOrElse(null)).toList.asJava).toList.asJava)
} catch {
case e: NullPointerException => place.setGeometryCoordinates(null)
}
try {
place.setContainedWithin(p.getContainedWithIn.toList
.map(makePlace(_).getOrElse(null)).asJava)
} catch {
case e: NullPointerException => place.setContainedWithin(null)
}
Some(place)
} else {
None
}
}
}
| kunickiaj/kiji-tweet-stream | src/main/scala/com/adamkunicki/kiji/twitter/TweetIngester.scala | Scala | apache-2.0 | 8,142 |
package com.classcat.ccnsm2
import org.apache.spark.SparkContext
class DataBasic {
protected val sc : SparkContext = GData.sc
protected val bro_logs : String = MyConfig.bro_logs
// protected val bro_logs : String = "%s/logs".format(bro_home)
protected var is_error : Boolean = false
protected var msg_error : String = ""
def getError : (Boolean, String) = {
return (is_error, msg_error)
}
}
| classcat/cc-nsm2-ui | src/main/scala/com/classcat/ccnsm2/DataBasic.scala | Scala | gpl-3.0 | 435 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.internal.collection.queues
import monix.execution.ChannelType
import monix.execution.ChannelType.{SingleConsumer, SingleProducer}
import monix.execution.internal.atomic.UnsafeAccess
import monix.execution.internal.collection.LowLevelConcurrentQueue
import monix.execution.internal.jctools.queues.MessagePassingQueue
import sun.misc.Unsafe
import scala.collection.mutable
private[internal] abstract class FromCircularQueue[A](queue: MessagePassingQueue[A])
extends LowLevelConcurrentQueue[A] {
def fenceOffer(): Unit
def fencePoll(): Unit
final def isEmpty: Boolean =
queue.isEmpty
final def offer(elem: A): Int =
if (queue.offer(elem)) 0 else 1
final def poll(): A =
queue.poll()
final def clear(): Unit =
queue.clear()
final def drainToBuffer(buffer: mutable.Buffer[A], limit: Int): Int = {
val consume = new QueueDrain[A](buffer)
queue.drain(consume, limit)
consume.count
}
}
private[internal] object FromCircularQueue {
/**
* Builds a [[FromCircularQueue]] instance.
*/
def apply[A](queue: MessagePassingQueue[A], ct: ChannelType): FromCircularQueue[A] =
ct match {
case ChannelType.MPMC =>
new MPMC[A](queue)
case ChannelType.MPSC =>
if (UnsafeAccess.HAS_JAVA8_INTRINSICS) new Java8MPSC[A](queue)
else new Java7[A](queue, ct)
case ChannelType.SPMC =>
if (UnsafeAccess.HAS_JAVA8_INTRINSICS) new Java8SPMC[A](queue)
else new Java7[A](queue, ct)
case ChannelType.SPSC =>
if (UnsafeAccess.HAS_JAVA8_INTRINSICS) new Java8SPSC[A](queue)
else new Java7[A](queue, ct)
}
private final class MPMC[A](queue: MessagePassingQueue[A]) extends FromCircularQueue[A](queue) {
def fenceOffer(): Unit = ()
def fencePoll(): Unit = ()
}
private final class Java8SPMC[A](queue: MessagePassingQueue[A]) extends FromCircularQueue[A](queue) {
private[this] val UNSAFE =
UnsafeAccess.getInstance().asInstanceOf[Unsafe]
def fenceOffer(): Unit = UNSAFE.fullFence()
def fencePoll(): Unit = ()
}
private final class Java8MPSC[A](queue: MessagePassingQueue[A]) extends FromCircularQueue[A](queue) {
private[this] val UNSAFE =
UnsafeAccess.getInstance().asInstanceOf[Unsafe]
def fenceOffer(): Unit = ()
def fencePoll(): Unit = UNSAFE.fullFence()
}
private final class Java8SPSC[A](queue: MessagePassingQueue[A]) extends FromCircularQueue[A](queue) {
private[this] val UNSAFE =
UnsafeAccess.getInstance().asInstanceOf[Unsafe]
def fenceOffer(): Unit = UNSAFE.fullFence()
def fencePoll(): Unit = UNSAFE.fullFence()
}
private final class Java7[A](queue: MessagePassingQueue[A], ct: ChannelType)
extends FromCircularQueue[A](queue) {
def fenceOffer(): Unit =
if (ct.producerType == SingleProducer) {
raise()
}
def fencePoll(): Unit =
if (ct.consumerType == SingleConsumer) {
raise()
}
private def raise(): Unit = {
throw new IllegalAccessException("Unsafe.fullFence not supported on this platform! (please report bug)")
}
}
}
| monifu/monifu | monix-execution/jvm/src/main/scala/monix/execution/internal/collection/queues/FromCircularQueue.scala | Scala | apache-2.0 | 3,810 |
package com.lkroll.ep.mapviewer.datamodel
import java.util.UUID;
import squants._
import squants.motion._
object Designation {
def apply(p: Planet, ordinal: Int): (String, String) =
("Designation" -> s"${p.name} ${RomanNumerals.toRomanNumerals(ordinal)}")
}
class Moon(_name: String, _id: UUID, _mass: Mass, val radius: Length, val planet: Planet, val ordinal: Int)
extends AstronomicalBody(_name, _id, _mass)
with SingleViewable {
override def `type`: String = "Moon";
def surfaceGravity: Acceleration = {
val g = Constants.G * mass.toKilograms / Math.pow(radius.toMeters, 2);
MetersPerSecondSquared(g)
}
}
| Bathtor/ep-explorer | src/main/scala/com/lkroll/ep/mapviewer/datamodel/Moon.scala | Scala | mit | 639 |
package mesosphere.marathon
import javax.inject.{ Inject, Named }
import akka.actor.{ ActorRef, ActorSystem }
import akka.event.EventStream
import com.fasterxml.jackson.databind.ObjectMapper
import mesosphere.marathon.MarathonSchedulerActor.ScaleApp
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.event._
import mesosphere.marathon.health.HealthCheckManager
import mesosphere.marathon.state.{ AppDefinition, AppRepository, PathId, Timestamp }
import mesosphere.marathon.tasks.TaskQueue.QueuedTask
import mesosphere.marathon.tasks._
import mesosphere.mesos.util.FrameworkIdUtil
import mesosphere.mesos.{ TaskBuilder, protos }
import org.apache.log4j.Logger
import org.apache.mesos.Protos._
import org.apache.mesos.{ Scheduler, SchedulerDriver }
import scala.collection.JavaConverters._
import scala.collection.immutable.Seq
import scala.concurrent.{ Await, Future }
import scala.util.{ Failure, Success }
trait SchedulerCallbacks {
def disconnected(): Unit
}
class MarathonScheduler @Inject() (
@Named(EventModule.busName) eventBus: EventStream,
offerMatcher: OfferMatcher,
@Named("schedulerActor") schedulerActor: ActorRef,
appRepo: AppRepository,
healthCheckManager: HealthCheckManager,
taskTracker: TaskTracker,
taskQueue: TaskQueue,
frameworkIdUtil: FrameworkIdUtil,
taskIdUtil: TaskIdUtil,
system: ActorSystem,
config: MarathonConf,
schedulerCallbacks: SchedulerCallbacks) extends Scheduler {
private[this] val log = Logger.getLogger(getClass.getName)
import mesosphere.mesos.protos.Implicits._
import mesosphere.util.ThreadPoolContext.context
implicit val zkTimeout = config.zkFutureTimeout
override def registered(
driver: SchedulerDriver,
frameworkId: FrameworkID,
master: MasterInfo): Unit = {
log.info(s"Registered as ${frameworkId.getValue} to master '${master.getId}'")
frameworkIdUtil.store(frameworkId)
eventBus.publish(SchedulerRegisteredEvent(frameworkId.getValue, master.getHostname))
}
override def reregistered(driver: SchedulerDriver, master: MasterInfo): Unit = {
log.info("Re-registered to %s".format(master))
eventBus.publish(SchedulerReregisteredEvent(master.getHostname))
}
override def resourceOffers(driver: SchedulerDriver, offers: java.util.List[Offer]): Unit = {
// Check for any tasks which were started but never entered TASK_RUNNING
// TODO resourceOffers() doesn't feel like the right place to run this
val toKill = taskTracker.checkStagedTasks
if (toKill.nonEmpty) {
log.warn(s"There are ${toKill.size} tasks stuck in staging for more " +
s"than ${config.taskLaunchTimeout()}ms which will be killed")
log.info(s"About to kill these tasks: $toKill")
for (task <- toKill)
driver.killTask(protos.TaskID(task.getId))
}
// remove queued tasks with stale (non-current) app definition versions
val appVersions: Map[PathId, Timestamp] =
Await.result(appRepo.currentAppVersions(), config.zkTimeoutDuration)
taskQueue.retain {
case QueuedTask(app, _) =>
appVersions.get(app.id) contains app.version
}
offerMatcher.processResourceOffers(driver, offers.asScala)
}
override def offerRescinded(driver: SchedulerDriver, offer: OfferID): Unit = {
log.info("Offer %s rescinded".format(offer))
}
override def statusUpdate(driver: SchedulerDriver, status: TaskStatus): Unit = {
log.info("Received status update for task %s: %s (%s)"
.format(status.getTaskId.getValue, status.getState, status.getMessage))
val appId = taskIdUtil.appId(status.getTaskId)
// forward health changes to the health check manager
val maybeTask = taskTracker.fetchTask(appId, status.getTaskId.getValue)
for (marathonTask <- maybeTask)
healthCheckManager.update(status, Timestamp(marathonTask.getVersion))
import org.apache.mesos.Protos.TaskState._
val killedForFailingHealthChecks =
status.getState == TASK_KILLED && status.hasHealthy && !status.getHealthy
if (status.getState == TASK_ERROR || status.getState == TASK_FAILED || killedForFailingHealthChecks)
appRepo.currentVersion(appId).foreach {
_.foreach(taskQueue.rateLimiter.addDelay)
}
status.getState match {
case TASK_ERROR | TASK_FAILED | TASK_FINISHED | TASK_KILLED | TASK_LOST =>
// Remove from our internal list
taskTracker.terminated(appId, status).foreach { taskOption =>
taskOption match {
case Some(task) => postEvent(status, task)
case None => log.warn(s"Couldn't post event for ${status.getTaskId}")
}
schedulerActor ! ScaleApp(appId)
}
case TASK_RUNNING if !maybeTask.exists(_.hasStartedAt) => // staged, not running
taskTracker.running(appId, status).onComplete {
case Success(task) =>
appRepo.app(appId, Timestamp(task.getVersion)).onSuccess {
case maybeApp => maybeApp.foreach(taskQueue.rateLimiter.resetDelay)
}
postEvent(status, task)
case Failure(t) =>
log.warn(s"Couldn't post event for ${status.getTaskId}", t)
log.warn(s"Killing task ${status.getTaskId}")
driver.killTask(status.getTaskId)
}
case TASK_STAGING if !taskTracker.contains(appId) =>
log.warn(s"Received status update for unknown app $appId")
log.warn(s"Killing task ${status.getTaskId}")
driver.killTask(status.getTaskId)
case _ =>
taskTracker.statusUpdate(appId, status).onSuccess {
case None =>
log.warn(s"Killing task ${status.getTaskId}")
driver.killTask(status.getTaskId)
}
}
}
override def frameworkMessage(
driver: SchedulerDriver,
executor: ExecutorID,
slave: SlaveID,
message: Array[Byte]): Unit = {
log.info("Received framework message %s %s %s ".format(executor, slave, message))
eventBus.publish(MesosFrameworkMessageEvent(executor.getValue, slave.getValue, message))
}
override def disconnected(driver: SchedulerDriver) {
log.warn("Disconnected")
eventBus.publish(SchedulerDisconnectedEvent())
// Disconnection from the Mesos master has occurred.
// Thus, call the scheduler callbacks.
schedulerCallbacks.disconnected()
}
override def slaveLost(driver: SchedulerDriver, slave: SlaveID) {
log.info(s"Lost slave $slave")
}
override def executorLost(
driver: SchedulerDriver,
executor: ExecutorID,
slave: SlaveID,
p4: Int) {
log.info(s"Lost executor $executor slave $p4")
}
override def error(driver: SchedulerDriver, message: String) {
log.warn("Error: %s".format(message))
suicide()
}
private def suicide(): Unit = {
log.fatal("Committing suicide")
// Asynchronously call sys.exit() to avoid deadlock due to the JVM shutdown hooks
Future {
sys.exit(9)
} onFailure {
case t: Throwable => log.fatal("Exception while committing suicide", t)
}
}
private def postEvent(status: TaskStatus, task: MarathonTask): Unit = {
log.info("Sending event notification.")
eventBus.publish(
MesosStatusUpdateEvent(
status.getSlaveId.getValue,
status.getTaskId.getValue,
status.getState.name,
if (status.hasMessage) status.getMessage else "",
taskIdUtil.appId(status.getTaskId),
task.getHost,
task.getPortsList.asScala,
task.getVersion
)
)
}
}
| quamilek/marathon | src/main/scala/mesosphere/marathon/MarathonScheduler.scala | Scala | apache-2.0 | 7,547 |
/*
* =========================================================================================
* Copyright © 2015 the khronus project <https://github.com/hotels-tech/khronus>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package com.searchlight.khronus.influx.finder
import com.searchlight.khronus.influx.service.Dashboard
import com.searchlight.khronus.influx.store.CassandraDashboards
import org.apache.commons.codec.binary.Base64
import org.scalatest.{FunSuite, Matchers}
class InfluxDashboardResolverSpec extends FunSuite with Matchers with BaseInfluxIntegrationSpec {
override def tableNames: Seq[String] = Seq("dashboard")
test("Store dashboard saves dashboard ok") {
val plainName = "dashboardName"
val dashboard = getDashboard(plainName)
val futureStore = CassandraDashboards.influxDashboardResolver.store(dashboard)
val result = await(futureStore)
result should be(plainName)
val dashboards = await(CassandraDashboards.influxDashboardResolver.lookup(plainName))
dashboards.size should be(1)
dashboards(0).name should be(dashboard.name)
dashboards(0).columns should be(dashboard.columns)
dashboards(0).points should be(dashboard.points)
}
test("list dashboards returns all dashboards that matches criteria") {
val dashboardTest1 = getDashboard("test1")
await(CassandraDashboards.influxDashboardResolver.store(dashboardTest1))
val otroDashboard = getDashboard("otroDashboard")
await(CassandraDashboards.influxDashboardResolver.store(otroDashboard))
val dashboardTest2 = getDashboard("test2")
await(CassandraDashboards.influxDashboardResolver.store(dashboardTest2))
val criteria = "test"
val listDashboardsGrafanaExpression = s"select * from /grafana.dashboard_.*/ where title =~ /.*$criteria.*/i&time_precision=s"
val futureDashboards = CassandraDashboards.influxDashboardResolver.dashboardOperation(listDashboardsGrafanaExpression)
val results = await(futureDashboards)
results.size should be(2)
results(0).name should be(dashboardTest1.name)
results(1).name should be(dashboardTest2.name)
}
test("Get dashboard returns the dashboard ok") {
val dashboardTest = getDashboard("test")
await(CassandraDashboards.influxDashboardResolver.store(dashboardTest))
val encodedName = dashboardTest.name
val getDashboardGrafanaExpression = s"""select dashboard from \\"grafana.dashboard_$encodedName\\"&time_precision=s"""
val futureDashboard = CassandraDashboards.influxDashboardResolver.dashboardOperation(getDashboardGrafanaExpression)
val result = await(futureDashboard)
result.size should be(1)
result(0).name should be(dashboardTest.name)
}
test("Drop dashboard deletes dashboard ok") {
val plainName = "dashboardName"
val dashboard = getDashboard(plainName)
await(CassandraDashboards.influxDashboardResolver.store(dashboard))
await(CassandraDashboards.influxDashboardResolver.lookup(plainName)).size should be(1)
// Drop
val encodedName = dashboard.name
val dropDashboardGrafanaExpression = s"""drop series \\"grafana.dashboard_$encodedName\\""""
await(CassandraDashboards.influxDashboardResolver.dashboardOperation(dropDashboardGrafanaExpression))
await(CassandraDashboards.influxDashboardResolver.lookup(plainName)).size should be(0)
}
test("Unknown grafana expression throws exception") {
val unknownGrafanaExpression = "Unknown grafana expression"
intercept[UnsupportedOperationException] {
CassandraDashboards.influxDashboardResolver.dashboardOperation(unknownGrafanaExpression)
}
}
private def getDashboard(dashboardName: String): Dashboard = {
val timestamp = System.currentTimeMillis().toString
val columns = Vector("time", "sequence_number", "title", "tags", "dashboard", "id")
val points = Vector(Vector(timestamp, "123", "Title", "", "{}", "dashboard1"))
Dashboard(Base64.encodeBase64String(dashboardName.getBytes()), columns, points)
}
} | despegar/khronus | khronus-influx-api/src/it/scala/com.searchlight.khronus.influx/finder/InfluxDashboardResolverSpec.scala | Scala | apache-2.0 | 4,597 |
/*
* Copyright 2018 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package domain
import model.Pence
import play.api.libs.json.{JsPath, Reads, Json}
import play.api.libs.functional.syntax._
case class Order(id: String,
netAmountInPence: Pence,
commissionAmountInPence: Pence,
grossAmountInPence: Pence,
rate: BigDecimal) {
val hasAmountInfo = grossAmountInPence.value != 0L
}
object Order {
implicit val orderReads: Reads[Order] = (
(JsPath \\ "id").read[String] and
(JsPath \\ "netAmountInPence").read[Pence] and
(JsPath \\ "commissionAmountInPence").read[Pence] and
(JsPath \\ "totalAmountInPence").read[Pence] and
(JsPath \\ "rate").read[BigDecimal]
)(Order.apply _)
}
| hmrc/worldpay-downloader | app/domain/Order.scala | Scala | apache-2.0 | 1,310 |
package lore.compiler.transformation
import lore.compiler.core.Position
import lore.compiler.feedback.{Feedback, Reporter, StructFeedback}
import lore.compiler.resolution.TypeExpressionEvaluator
import lore.compiler.semantics.NamePath
import lore.compiler.semantics.expressions.Expression
import lore.compiler.semantics.scopes.{BindingScope, StructConstructorBinding, StructObjectBinding, TypeScope}
import lore.compiler.semantics.structures.{StructDefinition, StructPropertyDefinition}
import lore.compiler.syntax.TypeExprNode
object StructTransformation {
/**
* Gets the constructor binding corresponding to the given struct `name`.
*/
def getConstructorBinding(
name: NamePath,
position: Position,
)(implicit bindingScope: BindingScope, reporter: Reporter): Option[StructConstructorBinding] = {
bindingScope.resolveStatic(name, position).flatMap {
case binding: StructConstructorBinding => Some(binding)
case _: StructObjectBinding =>
reporter.error(StructFeedback.Object.NoConstructor(name, position))
None
case _ =>
reporter.error(StructFeedback.ConstructorExpected(name, position))
None
}
}
/**
* Instantiates the given struct constructor binding with the given type arguments.
*/
def getConstructorValue(
binding: StructConstructorBinding,
typeArgumentNodes: Vector[TypeExprNode],
position: Position,
)(implicit bindingScope: BindingScope, typeScope: TypeScope, reporter: Reporter): Expression.ConstructorValue = {
val typeArguments = typeArgumentNodes.map(TypeExpressionEvaluator.evaluate)
val structType = binding.instantiateStructType(typeArguments, position)
Expression.ConstructorValue(binding, structType, position)
}
case class DuplicateProperty(name: String, override val position: Position) extends Feedback.Error(position) {
override def message: String = s"The property $name occurs more than once in the instantiation. Properties must be unique here."
}
case class MissingProperty(name: String, override val position: Position) extends Feedback.Error(position) {
override def message: String = s"This map-style instantiation is missing a property $name."
}
case class IllegalProperty(name: String, override val position: Position) extends Feedback.Error(position) {
override def message: String = s"The struct to be instantiated does not have a property $name."
}
case class IllegallyTypedProperty(property: StructPropertyDefinition, expression: Expression) extends Feedback.Error(expression) {
override def message: String =
s"The property ${property.name} is supposed to be assigned a value of type ${expression.tpe}. However," +
s" the property itself has the type ${property.tpe}, which is not a subtype of ${expression.tpe}."
}
/**
* Transforms the name/expression pairs in `entries` to an ordered list of arguments with which the struct's
* constructor may be invoked.
*/
def entriesToArguments(struct: StructDefinition, entries: Vector[(String, Expression)], position: Position)(implicit reporter: Reporter): Vector[Expression] = {
verifyNamesUnique(entries, position)
correlateEntries(struct, entries.toMap, position)
}
private def verifyNamesUnique(entries: Vector[(String, Expression)], position: Position)(implicit reporter: Reporter): Unit = {
entries.map(_._1).groupBy(identity).foreach {
case (_, Vector(_)) =>
case (name, _) => reporter.error(DuplicateProperty(name, position))
}
}
/**
* Assigns entries to properties, potentially filling missing properties with their default values. Missing
* properties without a default value and illegal properties are reported as errors. The result is a list of struct
* constructor arguments in the correct order.
*/
private def correlateEntries(
struct: StructDefinition,
entries: Map[String, Expression],
position: Position,
)(implicit reporter: Reporter): Vector[Expression] = {
var arguments = Vector.empty[Expression]
var missing = Vector.empty[String]
val illegal = entries.keys.toVector.diff(struct.properties.map(_.name))
struct.properties.foreach { property =>
entries.get(property.name) match {
case Some(expression) =>
arguments = arguments :+ expression
case None =>
property.defaultValue match {
case Some(defaultValue) =>
// TODO (assembly): This needs to be changed to a PropertyDefaultValue expression, because this being a
// call is an assembly detail.
val expression = Expression.Call(defaultValue.callTarget, Vector.empty, defaultValue.tpe, position)
arguments = arguments :+ expression
case None => missing = missing :+ property.name
}
}
}
reporter.error(missing.map(MissingProperty(_, position)))
reporter.error(illegal.map(IllegalProperty(_, position)))
arguments
}
}
| marcopennekamp/lore | compiler/src/lore/compiler/transformation/StructTransformation.scala | Scala | mit | 5,025 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.frontend.v2_3.ast
import org.neo4j.cypher.internal.frontend.v2_3.SemanticCheck
import org.neo4j.cypher.internal.frontend.v2_3.ast.Expression.SemanticContext
import org.neo4j.cypher.internal.frontend.v2_3.helpers.NonEmptyList
case class AndedPropertyInequalities(identifier: Identifier, property: Property, inequalities: NonEmptyList[InequalityExpression]) extends Expression {
def position = identifier.position
override def semanticCheck(ctx: SemanticContext): SemanticCheck =
inequalities.map(_.semanticCheck(ctx)).reduceLeft(_ chain _)
}
| HuangLS/neo4j | community/cypher/frontend-2.3/src/main/scala/org/neo4j/cypher/internal/frontend/v2_3/ast/AndedPropertyInequalities.scala | Scala | apache-2.0 | 1,382 |
package hello
import org.springframework.boot.SpringApplication
//import HelloConfig.
/**
* This object bootstraps Spring Boot web application.
* Via Gradle: gradle bootRun
*
* @author saung
* @since 1.0
*/
object HelloWebApplication {
def main(args: Array[String]) {
SpringApplication.run(classOf[HelloConfig])
}
}
| GauravBuche/hello-world | src/main/scala/hello/HelloWebApplication.scala | Scala | mit | 365 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.effect
import com.typesafe.scalalogging.LazyLogging
import io.truthencode.ddo.enhancement.BonusType
import org.scalatest.TryValues.convertTryToSuccessOrFailure
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class EffectParameterBuilderTest extends AnyFunSpec with Matchers with LazyLogging {
describe("Effect Parameter Builder") {
it("Should be able to be built with just required values") {
def builder: EffectParameterList =
EffectParameterBuilder()
.toggleOnValue(TriggerEvent.Passive)
.toggleOffValue(TriggerEvent.Never)
.addBonusType(BonusType.Alchemical)
.build
noException shouldBe thrownBy(builder)
builder.modifiers.foreach { p => (p.parameter should be).a(Symbol("success")) }
}
it("Should function with All optional values") {
def builder: EffectParameterList = EffectParameterBuilder()
.toggleOnValue(TriggerEvent.Passive)
.toggleOffValue(TriggerEvent.Never)
.addBonusType(BonusType.Feat)
.addMagnitude()
.addDifficultyCheck()
.build
noException shouldBe thrownBy(builder)
builder.modifiers.foreach { p => (p.parameter should be).a(Symbol("success")) }
}
it("Should ensure no duplicate values") {
// set the toggleOffValue multiple times
def builder: EffectParameterList =
EffectParameterBuilder()
.toggleOnValue(TriggerEvent.Passive)
.toggleOffValue(TriggerEvent.OnRest)
.addBonusType(BonusType.Alchemical)
.toggleOffValue(TriggerEvent.OnToggle)
.toggleOffValue(TriggerEvent.Never)
.build
val list =
for { m <- builder.modifiers } yield (
m.parameter.success.value.entryName,
m.parameter.success.value)
list.foreach { v =>
logger.info(v._1)
}
}
it("Should allow optional values") {
"""
def makePizzaWithOptional: EffectParameterList =
EffectParameterBuilder()
.toggleOnValue(TriggerEvent.OnRest)
.addBonusType(BonusType.Festive)
.addMagnitude()
.toggleOffValue(TriggerEvent.OnStance)
.build
""".stripMargin should compile
}
it("should not allow omitting required values") {
// Missing required BonusType
"""
| def makePizzaWithOptional: EffectParameterList =
| EffectParameterBuilder()
| .toggleOnValue(TriggerEvent.OnRest)
| .addMagnitude()
| .toggleOffValue(TriggerEvent.OnStance)
| .build""".stripMargin shouldNot compile
}
}
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/test/scala/io/truthencode/ddo/model/effect/EffectParameterBuilderTest.scala | Scala | apache-2.0 | 3,405 |
package de.hsaugsburg.ego.planetsim
import de.hsaugsburg.smas.startup.{BasicSystemBuilder, XmlSystemBuilder}
import de.hsaugsburg.smas.util.HolonUtil
object StartGui
{
val configFile = "/config/planetsim/gui.xml"
def main(args: Array[String])
{
XmlSystemBuilder.runOverXmlFileAndBuildSystem(configFile)
}
}
object StartWorkers
{
val configFile = "/config/planetsim/workers.xml"
def main(args: Array[String])
{
val nodes = XmlSystemBuilder.runOverXmlFileAndBuildSystem(configFile)
val workerManager = nodes.head
val guiManager = HolonUtil.getHolonAddressesFromXml(StartGui.configFile).head
BasicSystemBuilder.introduceTwoHolonsToEachOther(workerManager, guiManager)
}
} | kitingChris/PlanetSim | src/main/scala/de/hsaugsburg/ego/planetsim/Launcher.scala | Scala | gpl-2.0 | 711 |
package com.ambrosoft
/**
* Created by jacek on 7/8/16.
*
* Approach:
*
* can imagine all ways canonicalized by sorting from bigger coins to smaller
* thus if n > 25, at least one family of ways will start with 25
* followed by all the ways (n - 25) can be expressed, recursively (which therefore will include
* starting with the 2nd quarter, etc.
*
* Then we need to give a chance to starting with 10 (but not going back to 25!)
*
*/
object CoinWays extends App {
// use diminishing coin values
def countWays(n: Int) = count(List(25, 10, 5, 1), n)
private def test(n: Int) = {
val res = countWays(n)
println(s"$n -> $res")
}
private def count(denominations: List[Int], amount: Int): Int = {
println(s"$amount\t\t$denominations")
if (amount == 0)
1 // successful recursion bottom: count this change
else
denominations match {
case highest :: smaller =>
val smallerCoinChangeCount = if (smaller.nonEmpty) count(smaller, amount) else 0
if (amount >= highest)
count(denominations, amount - highest) + smallerCoinChangeCount
else
smallerCoinChangeCount
}
}
test(20)
(1 to 30).foreach(amount => test(amount))
test(3000)
}
| JacekAmbroziak/Ambrosoft | src/main/scala/com/ambrosoft/CoinWays.scala | Scala | apache-2.0 | 1,274 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC119A(value: Option[Int]) extends CtBoxIdentifier(name = "Intangible assets - Goodwill - Amortisation - Charge for year")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators{
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC119A.scala | Scala | apache-2.0 | 1,154 |
package com.softwaremill.codebrag.dao.repositorystatus
import com.softwaremill.codebrag.domain.RepositoryStatus
trait RepositoryStatusDAO {
def updateRepoStatus(newStatus: RepositoryStatus)
def getRepoStatus(repoName: String): Option[RepositoryStatus]
} | softwaremill/codebrag | codebrag-dao/src/main/scala/com/softwaremill/codebrag/dao/repositorystatus/RepositoryStatusDAO.scala | Scala | agpl-3.0 | 259 |
package io.github.reggert.reb4s.test
import org.scalacheck.{Arbitrary, Gen}
import Arbitrary.arbitrary
import io.github.reggert.reb4s.Literal
import Literal.{CharLiteral, StringLiteral}
trait LiteralGenerators {
implicit val arbCharLiteral: Arbitrary[CharLiteral] = Arbitrary(genCharLiteral)
implicit val arbStringLiteral: Arbitrary[StringLiteral] =
Arbitrary(Gen.sized {size => if (size < 2) Gen.fail else Gen.choose(2, size) flatMap genStringLiteral })
implicit val arbLiteral: Arbitrary[Literal] =
Arbitrary(Gen.sized {size => if (size < 1) Gen.fail else Gen.choose(1, size) flatMap genLiteral })
def genCharLiteral : Gen[CharLiteral] =
for {c <- arbitrary[Char]} yield CharLiteral(c)
def genStringLiteral(size : Int) : Gen[StringLiteral] = {
require(size > 0, s"size=$size <= 0")
for {s <- Gen.listOfN(size, arbitrary[Char])} yield StringLiteral(s.mkString)
}
def genLiteral(size : Int) : Gen[Literal] = size match {
case 1 => genCharLiteral
case _ => genStringLiteral(size)
}
} | reggert/reb4s | src/test/scala/io/github/reggert/reb4s/test/LiteralGenerators.scala | Scala | lgpl-3.0 | 1,019 |
package com.twitter.inject.app
import com.google.inject.{Module, Stage}
import com.twitter.app.Flag
import com.twitter.inject.app.internal.InstalledModules
import com.twitter.inject.{Injector, TwitterModule}
object TestInjector {
/* Public */
def apply(modules: Module*): Injector = {
apply(modules = modules)
}
def apply(
clientFlags: Map[String, String] = Map(),
modules: Seq[Module],
overrideModules: Seq[Module] = Seq(),
stage: Stage = Stage.DEVELOPMENT): Injector = {
val moduleFlags = InstalledModules.findModuleFlags(modules ++ overrideModules)
parseClientFlags(
clientFlags,
moduleFlags)
InstalledModules.create(
flags = moduleFlags,
modules = modules,
overrideModules = overrideModules,
stage = stage).injector
}
/* Private */
/*
* First we try to parse module flags with client provided flags. If a
* module flag isn't found, we set a system property which allows us to
* set GlobalFlags (e.g. resolverMap) that aren't found in modules.
* Note: We originally tried classpath scanning for the GlobalFlags using the Flags class,
* but this added many seconds to each test and also regularly ran out of perm gen...
*/
private def parseClientFlags(clientFlags: Map[String, String], moduleFlags: Seq[Flag[_]]) {
val moduleFlagsMap = moduleFlags groupBy {_.name} mapValues {_.head}
/* Parse module flags with client supplied flag values */
for (moduleFlag <- moduleFlags) {
clientFlags.get(moduleFlag.name) match {
case Some(clientFlagValue) => moduleFlag.parse(clientFlagValue)
case _ => moduleFlag.parse()
}
}
/* Set system property for clientFlags not found in moduleFlags */
for {
(clientFlagName, clientFlagValue) <- clientFlags
if !moduleFlagsMap.contains(clientFlagName)
} {
System.setProperty(clientFlagName, clientFlagValue)
}
}
}
| nkhuyu/finatra | inject/inject-app/src/test/scala/com/twitter/inject/app/TestInjector.scala | Scala | apache-2.0 | 1,938 |
package dk.bayes.clustergraph.infer
import org.junit._
import org.junit.Assert._
import dk.bayes.clustergraph.testutil._
import dk.bayes.clustergraph.testutil.AssertUtil._
import dk.bayes.clustergraph.testutil.StudentBN._
import dk.bayes.clustergraph.factor.Var
import dk.bayes.clustergraph.factor.Factor
import dk.bayes.clustergraph.infer.LoopyBP
class LoopyBPStudentTest {
val studentGraph = createStudentGraph()
def progress(iterNum: Int) = println("Loopy BP iter= " + iterNum)
val loopyBP = LoopyBP(studentGraph)
/**
* Tests for marginal() method
*/
@Test def marginal:Unit = {
loopyBP.calibrate(progress)
val difficultyMarginal = loopyBP.marginal(difficultyVar.id)
val intelliMarginal = loopyBP.marginal(intelliVar.id)
val gradeMarginal = loopyBP.marginal(gradeVar.id)
val satMarginal = loopyBP.marginal(satVar.id)
val letterMarginal = loopyBP.marginal(letterVar.id)
assertFactor(Factor(Var(1, 2), Array(0.6, 0.4)), difficultyMarginal, 0.0001)
assertFactor(Factor(Var(2, 2), Array(0.7, 0.3)), intelliMarginal, 0.0001)
assertFactor(Factor(Var(3, 3), Array(0.3620, 0.2884, 0.3496)), gradeMarginal, 0.0001)
assertFactor(Factor(Var(4, 2), Array(0.725, 0.275)), satMarginal, 0.0001)
assertFactor(Factor(Var(5, 2), Array(0.4976, 0.5023)), letterMarginal, 0.0001)
}
@Test def marginal_given_sat_is_high:Unit = {
loopyBP.setEvidence(satVar.id, 0)
loopyBP.calibrate(progress)
val difficultyMarginal = loopyBP.marginal(difficultyVar.id)
val intelliMarginal = loopyBP.marginal(intelliVar.id)
val gradeMarginal = loopyBP.marginal(gradeVar.id)
val satMarginal = loopyBP.marginal(satVar.id)
val letterMarginal = loopyBP.marginal(letterVar.id)
assertFactor(Factor(Var(1, 2), Array(0.6, 0.4)), difficultyMarginal, 0.0001)
assertFactor(Factor(Var(2, 2), Array(0.9172, 0.0827)), intelliMarginal, 0.0001)
assertFactor(Factor(Var(3, 3), Array(0.2446, 0.3257, 0.4295)), gradeMarginal, 0.0001)
assertFactor(Factor(Var(4, 2), Array(1d, 0)), satMarginal, 0.0001)
assertFactor(Factor(Var(5, 2), Array(0.58, 0.4199)), letterMarginal, 0.0001)
}
/**
* Tests for clusterBelief() method
*/
@Test def cluster_belief:Unit = {
loopyBP.calibrate(progress)
val difficultyClusterBelief = loopyBP.clusterBelief(1)
val intelliClusterBelief = loopyBP.clusterBelief(2)
val gradeClusterBelief = loopyBP.clusterBelief(3)
val satClusterBelief = loopyBP.clusterBelief(4)
val letterClusterBelief = loopyBP.clusterBelief(5)
assertFactor(Factor(Var(1, 2), Array(0.6, 0.4)), difficultyClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Array(0.7, 0.3)), intelliClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(1, 2), Var(3, 3),
Array(0.1260, 0.1680, 0.1260, 0.0140, 0.0700, 0.1960, 0.1620, 0.0144, 0.0036, 0.0600, 0.0360, 0.0240)), gradeClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(4, 2), Array(0.6650, 0.0350, 0.0600, 0.2400)), satClusterBelief, 0.0001)
assertFactor(Factor(Var(3, 3), Var(5, 2), Array(0.0362, 0.3258, 0.1154, 0.1730, 0.3461, 0.0035)), letterClusterBelief, 0.0001)
}
@Test def cluster_belief_given_sat_is_high:Unit = {
loopyBP.setEvidence(satVar.id, 0)
loopyBP.calibrate(progress)
val difficultyClusterBelief = loopyBP.clusterBelief(1)
val intelliClusterBelief = loopyBP.clusterBelief(2)
val gradeClusterBelief = loopyBP.clusterBelief(3)
val satClusterBelief = loopyBP.clusterBelief(4)
val letterClusterBelief = loopyBP.clusterBelief(5)
assertFactor(Factor(Var(1, 2), Array(0.6, 0.4)), difficultyClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Array(0.9172, 0.0828)), intelliClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(1, 2), Var(3, 3),
Array(0.1651, 0.2201, 0.1651, 0.0183, 0.0917, 0.2568, 0.0447, 0.0040, 0.0010, 0.0166, 0.0099, 0.0066)), gradeClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(4, 2), Array(0.9172, 0.0000, 0.0828, 0.0000)), satClusterBelief, 0.0001)
assertFactor(Factor(Var(3, 3), Var(5, 2), Array(0.0245, 0.2202, 0.1303, 0.1955, 0.4252, 0.0043)), letterClusterBelief, 0.0001)
}
@Test def cluster_belief_given_full_evidence:Unit = {
loopyBP.setEvidence(difficultyVar.id, 0)
loopyBP.setEvidence(intelliVar.id, 1)
loopyBP.setEvidence(gradeVar.id, 0)
loopyBP.setEvidence(satVar.id, 0)
loopyBP.setEvidence(letterVar.id, 1)
loopyBP.calibrate(progress)
val difficultyClusterBelief = loopyBP.clusterBelief(1)
val intelliClusterBelief = loopyBP.clusterBelief(2)
val gradeClusterBelief = loopyBP.clusterBelief(3)
val satClusterBelief = loopyBP.clusterBelief(4)
val letterClusterBelief = loopyBP.clusterBelief(5)
assertFactor(Factor(Var(1, 2), Array(1d, 0)), difficultyClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Array(0d, 1)), intelliClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(1, 2), Var(3, 3),
Array(0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0)), gradeClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(4, 2), Array(0, 0, 1, 0.0000)), satClusterBelief, 0.0001)
assertFactor(Factor(Var(3, 3), Var(5, 2), Array(0, 1, 0, 0, 0, 0)), letterClusterBelief, 0.0001)
}
/**
* Tests for logLikelihood() method
*/
@Test(expected = classOf[IllegalArgumentException]) def logLikelihood_empty_assignment:Unit = {
loopyBP.logLikelihood(Array())
}
@Test(expected = classOf[IllegalArgumentException]) def logLikelihood_partial_assignment:Unit = {
val assignment = Array((1, 0), (2, 0))
loopyBP.logLikelihood(assignment)
}
@Test(expected = classOf[IllegalArgumentException]) def logLikelihood_assignment_not_unique:Unit = {
val assignment = Array((1, 0), (2, 0), (1, 0), (3, 0))
loopyBP.logLikelihood(assignment)
}
@Test def logLikelihood:Unit = {
val assignment = Array((1, 0), (2, 1), (3, 1), (4, 0), (5, 1))
val llh = loopyBP.logLikelihood(assignment)
assertEquals(-6.3607, llh, 0.0001)
}
/**
* Tests for logLikelihood of evidence
*
*/
@Test def logLikelihood_of_sat_is_high:Unit = {
val logLikelihood = loopyBP.calibrateWithEvidence(List((satVar.id, 0)), progress)
assertEquals(-0.32158, logLikelihood, 0.0001)
val difficultyClusterBelief = loopyBP.clusterBelief(1)
val intelliClusterBelief = loopyBP.clusterBelief(2)
val gradeClusterBelief = loopyBP.clusterBelief(3)
val satClusterBelief = loopyBP.clusterBelief(4)
val letterClusterBelief = loopyBP.clusterBelief(5)
assertFactor(Factor(Var(1, 2), Array(0.6, 0.4)), difficultyClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Array(0.9172, 0.0828)), intelliClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(1, 2), Var(3, 3),
Array(0.1651, 0.2201, 0.1651, 0.0183, 0.0917, 0.2568, 0.0447, 0.0040, 0.0010, 0.0166, 0.0099, 0.0066)), gradeClusterBelief, 0.0001)
assertFactor(Factor(Var(2, 2), Var(4, 2), Array(0.9172, 0.0000, 0.0828, 0.0000)), satClusterBelief, 0.0001)
assertFactor(Factor(Var(3, 3), Var(5, 2), Array(0.0245, 0.2202, 0.1303, 0.1955, 0.4252, 0.0043)), letterClusterBelief, 0.0001)
}
@Test def logLikelihood_of_full_evidence:Unit = {
val evidence = List((difficultyVar.id, 0), (intelliVar.id, 1), (gradeVar.id, 0), (satVar.id, 0), (letterVar.id, 1))
val logLikelihood = loopyBP.calibrateWithEvidence(evidence, progress)
assertEquals(-3.5349, logLikelihood, 0.0001)
}
} | danielkorzekwa/bayes-scala | src/test/scala/dk/bayes/clustergraph/infer/LoopyBPStudentTest.scala | Scala | bsd-2-clause | 7,491 |
package com.twitter.concurrent
import com.twitter.util.{Future, Promise}
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.collection.immutable.Queue
object AsyncQueue {
private sealed trait State[+T]
private case object Idle extends State[Nothing]
private case class Offering[T](q: Queue[T]) extends State[T]
private case class Polling[T](q: Queue[Promise[T]]) extends State[T]
private case class Excepting(exc: Throwable) extends State[Nothing]
}
/**
* An asynchronous FIFO queue. In addition to providing {{offer()}}
* and {{poll()}}, the queue can be "failed", flushing current
* pollers.
*/
class AsyncQueue[T] {
import AsyncQueue._
private[this] val state = new AtomicReference[State[T]](Idle)
def size: Int = state.get match {
case Offering(q) => q.size
case _ => 0
}
/**
* Retrieves and removes the head of the queue, completing the
* returned future when the element is available.
*/
@tailrec
final def poll(): Future[T] = state.get match {
case s@Idle =>
val p = new Promise[T]
if (state.compareAndSet(s, Polling(Queue(p)))) p else poll()
case s@Polling(q) =>
val p = new Promise[T]
if (state.compareAndSet(s, Polling(q.enqueue(p)))) p else poll()
case s@Offering(q) =>
val (elem, nextq) = q.dequeue
val nextState = if (nextq.nonEmpty) Offering(nextq) else Idle
if (state.compareAndSet(s, nextState)) Future.value(elem) else poll()
case Excepting(exc) =>
Future.exception(exc)
}
/**
* Insert the given element at the tail of the queue.
*/
@tailrec
final def offer(elem: T): Unit = state.get match {
case s@Idle =>
if (!state.compareAndSet(s, Offering(Queue(elem))))
offer(elem)
case s@Offering(q) =>
if (!state.compareAndSet(s, Offering(q.enqueue(elem))))
offer(elem)
case s@Polling(q) =>
val (waiter, nextq) = q.dequeue
val nextState = if (nextq.nonEmpty) Polling(nextq) else Idle
if (state.compareAndSet(s, nextState))
waiter.setValue(elem)
else
offer(elem)
case Excepting(_) =>
// Drop.
}
/**
* Fail the queue: current and subsequent pollers will be completed
* with the given exception.
*/
@tailrec
final def fail(exc: Throwable): Unit = state.get match {
case s@Idle =>
if (!state.compareAndSet(s, Excepting(exc)))
fail(exc)
case s@Polling(q) =>
if (!state.compareAndSet(s, Excepting(exc))) fail(exc) else
q foreach(_.setException(exc))
case s@Offering(_) =>
if (!state.compareAndSet(s, Excepting(exc))) fail(exc)
case Excepting(_) => // Just take the first one.
}
override def toString = "AsyncQueue<%s>".format(state.get)
}
| mosesn/util | util-core/src/main/scala/com/twitter/concurrent/AsyncQueue.scala | Scala | apache-2.0 | 2,793 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.tools.data
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.kudu.data.KuduDataStore
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand.KuduParams
import org.locationtech.geomesa.kudu.tools.data.KuduDeleteCatalogCommand.KuduDeleteCatalogParams
import org.locationtech.geomesa.tools.data.{DeleteCatalogCommand, DeleteCatalogParams}
class KuduDeleteCatalogCommand extends DeleteCatalogCommand[KuduDataStore] with KuduDataStoreCommand {
override val params = new KuduDeleteCatalogParams
}
object KuduDeleteCatalogCommand {
@Parameters(commandDescription = "Delete a GeoMesa catalog completely (and all features in it)")
class KuduDeleteCatalogParams extends DeleteCatalogParams with KuduParams
}
| ddseapy/geomesa | geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/data/KuduDeleteCatalogCommand.scala | Scala | apache-2.0 | 1,303 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import events.Event
import org.scalatest.events.TestFailed
import java.io.PrintStream
import Reporter.propagateDispose
private[scalatest] class StopOnFailureReporter(dispatch: Reporter, stopper: Stopper, val out: PrintStream) extends CatchReporter {
def doApply(event: Event): Unit = {
event match {
case testFailed: TestFailed => stopper.requestStop()
case _ =>
}
dispatch(event)
}
def doDispose(): Unit = {
propagateDispose(dispatch)
}
}
| dotty-staging/scalatest | scalatest/src/main/scala/org/scalatest/StopOnFailureReporter.scala | Scala | apache-2.0 | 1,110 |
import sbt._
import Keys._
object BuildSettings {
val paradiseVersion = "2.0.0"
val buildSettings = Defaults.defaultSettings ++ Seq(
organization := "be.angelcorp.scala-glsl",
version := "1.0.0-SNAPSHOT",
scalacOptions ++= Seq(),
scalaVersion := "2.11.1",
crossScalaVersions := Seq("2.11.0", "2.11.1"),
resolvers += Resolver.sonatypeRepo("snapshots"),
resolvers += Resolver.sonatypeRepo("releases"),
libraryDependencies ++= Seq(
"org.scalatest" % "scalatest_2.11" % "2.2.0" % Test,
"ch.qos.logback" % "logback-classic" % "1.1.2"
),
addCompilerPlugin("org.scalamacros" % "paradise" % paradiseVersion cross CrossVersion.full),
publishMavenStyle := true,
publishArtifact in Test := false,
publishTo := {
val nexus = "http://jetty.angelcorp.be/nexus/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "content/repositories/releases")
}
)
}
object MyBuild extends Build {
import BuildSettings._
lazy val root: Project = Project(
"root",
file("."),
settings = buildSettings ++ Seq(
run <<= run in Compile in core
)
) aggregate(macros, core)
lazy val macros: Project = Project(
"macros",
file("macros"),
settings = buildSettings ++ Seq(
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _),
libraryDependencies ++= (
if (scalaVersion.value.startsWith("2.10")) List("org.scalamacros" %% "quasiquotes" % paradiseVersion)
else Nil
)
)
)
lazy val core: Project = Project(
"core",
file("core"),
settings = buildSettings
) dependsOn(macros)
}
| AODtorusan/scala-glsl | project/Build.scala | Scala | mit | 1,737 |
package org.mozartoz.bootcompiler
package transform
import oz._
import ast._
import bytecode._
import symtab._
object CodeGen extends Transformer with TreeDSL {
def code = abstraction.codeArea
private implicit def symbol2reg(symbol: Symbol) =
code.registerFor(symbol)
private implicit def varorconst2reg(expr: VarOrConst) =
code.registerFor(expr)
private implicit def reg2ops[A <: Register](self: A) = new {
def := (source: Register)(implicit ev: A <:< XOrYReg) {
code += OpMove(source, self)
}
def === (rhs: Register) {
code += OpUnify(self, rhs)
}
}
private implicit def symbol2ops2(self: Symbol) = new {
def toReg = symbol2reg(self)
}
private implicit def value2ops(self: OzValue) = new {
def toReg = code.registerFor(self)
}
def initArrayWith(values: List[Expression]) {
for (value <- values) {
varorconst2reg(value.asInstanceOf[VarOrConst]) match {
case v:XReg => code += SubOpArrayFillX(v)
case v:YReg => code += SubOpArrayFillY(v)
case v:GReg => code += SubOpArrayFillG(v)
case v:KReg => code += SubOpArrayFillK(v)
}
}
}
override def applyToAbstraction() {
// Allocate local variables
val localCount = abstraction.formals.size + abstraction.locals.size
if (localCount != 0)
code += OpAllocateY(localCount)
// Save formals in local variables
for ((formal, index) <- abstraction.formals.zipWithIndex)
code += OpMove(XReg(index), formal.toReg.asInstanceOf[XOrYReg])
// Create new variables for the other locals
for (local <- abstraction.locals)
code += OpCreateVarY(local.toReg.asInstanceOf[YReg])
// Actual codegen
generate(abstraction.body)
// Return
code += OpReturn()
}
def generate(statement: Statement) {
statement match {
case SkipStatement() =>
// skip
case CompoundStatement(statements) =>
for (stat <- statements)
generate(stat)
case Variable(lhs) === Variable(rhs) =>
lhs.toReg === rhs.toReg
case Variable(lhs) === Constant(rhs) =>
lhs.toReg === rhs.toReg
case Variable(lhs) === (rhs @ Record(_, fields)) if rhs.isCons =>
val List(RecordField(_, head:VarOrConst),
RecordField(_, tail:VarOrConst)) = fields
code += OpCreateConsUnify(lhs)
initArrayWith(List(head, tail))
case Variable(lhs) === (rhs @ Record(Constant(label), fields))
if rhs.isTuple =>
code += OpCreateTupleUnify(label.toReg, fields.size, lhs)
initArrayWith(fields map (_.value))
case Variable(lhs) === (rhs @ Record(_, fields))
if rhs.hasConstantArity =>
code += OpCreateRecordUnify(rhs.getConstantArity.toReg,
fields.size, lhs)
initArrayWith(fields map (_.value))
case Variable(lhs) === (rhs @ CreateAbstraction(
Constant(body), globals)) =>
code += OpCreateAbstractionUnify(body.toReg, globals.size, lhs)
initArrayWith(globals)
case IfStatement(cond:Variable, trueStat, falseStat) =>
XReg(0) := cond.symbol
val condBranchHole = code.addHole()
var branchHoleInFalseBranch: CodeArea#Hole = null
var branchHoleInTrueBranch: CodeArea#Hole = null
val trueBranchSize = code.counting {
generate(trueStat)
branchHoleInTrueBranch = code.addHole(2)
}
val falseBranchSize = code.counting {
generate(falseStat)
branchHoleInFalseBranch = code.addHole(2)
}
val errorSize = code.counting {
// TODO generate proper error code
code += OpMove(code.registerFor(OzAtom("condBranchError")), XReg(0))
code += OpCallBuiltin(
code.registerFor(OzBuiltin(program.builtins.raiseError)),
1, List(XReg(0)))
}
condBranchHole fillWith OpCondBranch(XReg(0),
trueBranchSize, trueBranchSize + falseBranchSize)
branchHoleInTrueBranch fillWith OpBranch(falseBranchSize + errorSize)
branchHoleInFalseBranch fillWith OpBranch(errorSize)
case MatchStatement(Variable(value), clauses, elseStat) =>
val matchHole = code.addHole()
val clauseCount = clauses.size
val patterns = new Array[OzValue](clauseCount)
val branchToAfterHoles = new Array[CodeArea#Hole](clauseCount+1)
val jumpOffsets = new Array[Int](clauseCount+1)
jumpOffsets(0) = code.counting {
generate(elseStat)
if (clauseCount > 0)
branchToAfterHoles(0) = code.addHole(2)
}
for ((clause, index) <- clauses.zipWithIndex) {
// Pattern, which must be constant at this point
val Constant(pattern) = clause.pattern
patterns(index) = OzSharp(List(
pattern, OzInt(jumpOffsets(index))))
// The guard must be empty at this point
assert(clause.guard.isEmpty)
// Body
jumpOffsets(index+1) = jumpOffsets(index) + code.counting {
// Captures
var captureIndex = 0
def walk(value: OzValue): Unit = value match {
case OzPatMatCapture(symbol) =>
captureIndex += 1
symbol.captureIndex = captureIndex
val reg = code.registerFor(symbol).asInstanceOf[YReg]
reg := XReg(captureIndex)
case OzRecord(label, fields) =>
for (OzRecordField(_, fieldValue) <- fields)
walk(fieldValue)
case OzPatMatOpenRecord(label, fields) =>
for (OzRecordField(_, fieldValue) <- fields)
walk(fieldValue)
case OzPatMatConjunction(parts) =>
parts foreach walk
case _ => ()
}
walk(pattern)
// Actual body
generate(clause.body)
if (index+1 < clauseCount)
branchToAfterHoles(index+1) = code.addHole(2)
}
}
val totalSize = jumpOffsets(clauseCount)
val patternsInfo = OzSharp(patterns.toList)
matchHole fillWith OpPatternMatch(value, patternsInfo.toReg)
for (index <- 0 until clauseCount) {
branchToAfterHoles(index) fillWith OpBranch(
totalSize - jumpOffsets(index))
}
case TryStatement(body, Variable(exceptionVar), catchBody) =>
val setupHandlerHole = code.addHole()
var branchHole: CodeArea#Hole = null
val catchSize = code.counting {
exceptionVar.toReg.asInstanceOf[YReg] := XReg(0)
generate(catchBody)
branchHole = code.addHole(2)
}
val bodySize = code.counting {
generate(body)
code += OpPopExceptionHandler()
}
setupHandlerHole fillWith OpSetupExceptionHandler(catchSize)
branchHole fillWith OpBranch(bodySize)
case CallStatement(Constant(callable @ OzBuiltin(builtin)), args) =>
val argCount = args.size
if (argCount != builtin.arity) {
program.reportError(
"Wrong arity for builtin application of " + builtin +
" (%d expected but %d found)".format(builtin.arity, argCount),
statement.pos)
} else {
val paramKinds = builtin.paramKinds
val argsWithKindAndIndex = args.zip(paramKinds).zipWithIndex
for {
((arg:VarOrConst, kind), index) <- argsWithKindAndIndex
if kind == Builtin.ParamKind.In
} {
XReg(index) := arg
}
val argsRegs = (0 until argCount).toList map XReg
if (builtin.inlineable)
code += OpCallBuiltinInline(builtin.inlineOpCode, argsRegs)
else {
val builtinReg = code.registerFor(callable)
code += OpCallBuiltin(builtinReg, argCount, argsRegs)
}
for {
((arg:VarOrConst, kind), index) <- argsWithKindAndIndex
if kind == Builtin.ParamKind.Out
} {
XReg(index) === arg
}
}
case CallStatement(Variable(target), args) =>
for ((arg:VarOrConst, index) <- args.zipWithIndex)
XReg(index) := arg
code += OpCall(target, args.size)
}
}
}
| layus/mozart2 | bootcompiler/src/main/scala/org/mozartoz/bootcompiler/transform/CodeGen.scala | Scala | bsd-2-clause | 8,343 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller, Michael Cotterell
* @version 1.2
* @date Wed Nov 2 22:32:00 EDT 2011
* @see LICENSE (MIT style license file).
*/
package scalation.math
import scala.collection.mutable.{IndexedSeq, ArrayBuffer}
import scala.math.{floor, sqrt}
import scala.util.control.Breaks.{breakable, break}
import scala.util.Random
import scalation.util.Swap.swap
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Primes` object provides an array of 1000 prime numbers as well as methods
* to generate prime numbers within a given range.
*/
object Primes
{
/** Scala's random number generator
*/
private val rn = new Random ()
/** Precomputed and randomized array of 1000 4-digit prime numbers
*/
val prime = Array [Int] (
4919, 8689, 3089, 1447, 2371, 9923, 1811, 2671, 9433, 6581,
4339, 4397, 7879, 4549, 9907, 2609, 4751, 8731, 2381, 4567,
5573, 3137, 9851, 4957, 2713, 2633, 8693, 1153, 3923, 2857,
9619, 5557, 6229, 3217, 6917, 2447, 7417, 1831, 5119, 1429,
1409, 6703, 1171, 1663, 1399, 2731, 8237, 9859, 5519, 5099,
3433, 8941, 6211, 8243, 3343, 9371, 8443, 4937, 1217, 1423,
1301, 9011, 3221, 9461, 5881, 3163, 8219, 2011, 6469, 7823,
8311, 5521, 6163, 7027, 3701, 6277, 1721, 3889, 6967, 3167,
9013, 4481, 7001, 4159, 9929, 9467, 3463, 3911, 7907, 2879,
6547, 2621, 3041, 1901, 5653, 7673, 5813, 4987, 1297, 3797,
5021, 5839, 9091, 9677, 4801, 6203, 5393, 6067, 3853, 4253,
5023, 5531, 9377, 1109, 3779, 9281, 3761, 7919, 9067, 2971,
8741, 2143, 9967, 5087, 5417, 8999, 6337, 3467, 4523, 6563,
5147, 7577, 7039, 4289, 6449, 6991, 2473, 2591, 8191, 8581,
5441, 1709, 2549, 1039, 8573, 5591, 3559, 3739, 3557, 6653,
4073, 4229, 9257, 7283, 8963, 2551, 9109, 6073, 2243, 2141,
3299, 4363, 3257, 2851, 8269, 5051, 1543, 1289, 5669, 8293,
6779, 1777, 1699, 7369, 7297, 8893, 7621, 3581, 6263, 6737,
6857, 9161, 3947, 7213, 8761, 5449, 1307, 1279, 4517, 6257,
9473, 5077, 4871, 8861, 4463, 7949, 7841, 2843, 3727, 4679,
4111, 4561, 5407, 4493, 4597, 9719, 4451, 5779, 2719, 3517,
2399, 4273, 5039, 2423, 7643, 6553, 5903, 8887, 6679, 7459,
1327, 7013, 6247, 2777, 3659, 2903, 7489, 3203, 4211, 8011,
9643, 1483, 4391, 4591, 1453, 1907, 6481, 8263, 2647, 9769,
4051, 2467, 9887, 6823, 7757, 4787, 7817, 1879, 3307, 5939,
8629, 8599, 8863, 8111, 3119, 3907, 1609, 4057, 4373, 3677,
5851, 9431, 3733, 1783, 1061, 3457, 2281, 9437, 2087, 3209,
3643, 6101, 4703, 6689, 1621, 5507, 9697, 9511, 3331, 2749,
6793, 9029, 7589, 2309, 3881, 9181, 1553, 9901, 6151, 5737,
7703, 1607, 6869, 2383, 4639, 5153, 5443, 7561, 4657, 8681,
2663, 7687, 1187, 2837, 5623, 6571, 8467, 9323, 2029, 1193,
3769, 3637, 4093, 4139, 3001, 7331, 7937, 3989, 4951, 7681,
3967, 2251, 9791, 5827, 1471, 6397, 5701, 2293, 2269, 5711,
2999, 3229, 6883, 6719, 2347, 8539, 3823, 1033, 7669, 6701,
1319, 1877, 1213, 5309, 8737, 5323, 4621, 5059, 4877, 6983,
8377, 9811, 6661, 7433, 5261, 4019, 4519, 3461, 1511, 3613,
1367, 9941, 7541, 6451, 7517, 6217, 9137, 8291, 3083, 6949,
2003, 1049, 2707, 2819, 2791, 9157, 8923, 1693, 1861, 7901,
3079, 6709, 1093, 8513, 2357, 4421, 5927, 4243, 4261, 9049,
9857, 6871, 6007, 7109, 8719, 2617, 8009, 6673, 7219, 6047,
5011, 4099, 2221, 4231, 2953, 7607, 3109, 7309, 2437, 5651,
1579, 7753, 6911, 8053, 4817, 6599, 5501, 4507, 1451, 3851,
9679, 3469, 2521, 6299, 6343, 7789, 4007, 3347, 9767, 4447,
4973, 2711, 6803, 7573, 9103, 9403, 6607, 7193, 2179, 7927,
9241, 6173, 7537, 1151, 1847, 6761, 7559, 7951, 2833, 9293,
8419, 1667, 2083, 2213, 7321, 1973, 4637, 8783, 7549, 5843,
7393, 2339, 6133, 2311, 2417, 2153, 6691, 6367, 4933, 6863,
7741, 6113, 6763, 9391, 3391, 8363, 1873, 9277, 1433, 7043,
3533, 2377, 9203, 2203, 1759, 2741, 7411, 1361, 5563, 3583,
7759, 3709, 3491, 8837, 8369, 3539, 4013, 6271, 8803, 9781,
2111, 2687, 7547, 5857, 7867, 8521, 4663, 2017, 8663, 3389,
1627, 5743, 2129, 8929, 6373, 6961, 3931, 5527, 2579, 6907,
6833, 3863, 1087, 6011, 4177, 5233, 5167, 8171, 7933, 9421,
5749, 3019, 7307, 8087, 5741, 2963, 7717, 2341, 4603, 4337,
6121, 4003, 3011, 5801, 1013, 8849, 4831, 8447, 4441, 1933,
2411, 5683, 1949, 1459, 1069, 5003, 4969, 2069, 9533, 3673,
7079, 1303, 6959, 6079, 8753, 7333, 5861, 2677, 4157, 5479,
8329, 3571, 5791, 9479, 8429, 5641, 1657, 2099, 9661, 6427,
1381, 4931, 6269, 2753, 6971, 9739, 7829, 6131, 5101, 4129,
1129, 5783, 6529, 7247, 7019, 4513, 7351, 7207, 5399, 6037,
9133, 3617, 1091, 2237, 1753, 9787, 8209, 3691, 7103, 5647,
3319, 4783, 2861, 7477, 7793, 3943, 1697, 7853, 8831, 1871,
7057, 6221, 6781, 1163, 3449, 2027, 9341, 1487, 5231, 3301,
4733, 7649, 5279, 6029, 9833, 3511, 1117, 4813, 2543, 9629,
5387, 5381, 1993, 8623, 4861, 2729, 6091, 4651, 1229, 4457,
4219, 7603, 2267, 7451, 2089, 9227, 5303, 3329, 9871, 4217,
4723, 7129, 6491, 5693, 5657, 8273, 1571, 5981, 1259, 6827,
3833, 7069, 6791, 5471, 8221, 6421, 3547, 2797, 3271, 1427,
7523, 6899, 8069, 3803, 8161, 5179, 1733, 6043, 9721, 8089,
3929, 9187, 9343, 8101, 2503, 9613, 1439, 8147, 8081, 6089,
1741, 3631, 8543, 1867, 1231, 9127, 9283, 9319, 3767, 3037,
7349, 4283, 1669, 9649, 1481, 2039, 7127, 3371, 3407, 4789,
1223, 8933, 1583, 4271, 1999, 8167, 9817, 9059, 4357, 5639,
4889, 9001, 4079, 5281, 3719, 7963, 4799, 2239, 8287, 9397,
1249, 1889, 6317, 3541, 6947, 1549, 8093, 7487, 7727, 8627,
5351, 8527, 4583, 7457, 9311, 7151, 4297, 5879, 8839, 6569,
7723, 9601, 3023, 7237, 8389, 6521, 3529, 9973, 1321, 6287,
7159, 8819, 1201, 9337, 6619, 5923, 9151, 1747, 4943, 2909,
3361, 8669, 9587, 9419, 7877, 1123, 5197, 1277, 6197, 1637,
8233, 4909, 4649, 2113, 2459, 5297, 5431, 6659, 9043, 9539,
2137, 4027, 4127, 6551, 1987, 7243, 2801, 6301, 3623, 1567,
2389, 1597, 7529, 5419, 2081, 7591, 8353, 2789, 4409, 3697,
1373, 3191, 5009, 9497, 5483, 8563, 6733, 7177, 2273, 4153,
1531, 2287, 9803, 7499, 2887, 2393, 1913, 8677, 1051, 5689,
3187, 6977, 8867, 9413, 3917, 5477, 6829, 8059, 1031, 7993,
8609, 4729, 9743, 9439, 8821, 4349, 9491, 5867, 1723, 4241,
9883, 8461, 8297, 1619, 1291, 2161, 6199, 4483, 7211, 2803,
2767, 6637, 3169, 8431, 3821, 8807, 8597, 9521, 4903, 2957,
4673, 8317, 5273, 9551, 9221, 9007, 1523, 7507, 2683, 1237,
7699, 6353, 6389, 2131, 9949, 8387, 8641, 7253, 9463, 4759,
3049, 4021, 4133, 4423, 1613, 5081, 1097, 1789, 8117, 8647,
2693, 5717, 1063, 4793, 3413, 2557, 8779, 4643, 7187, 3067,
3607, 3181, 9829, 2351, 2539, 8423, 2531, 3253, 3313, 2897,
5807, 1103, 1493, 6379, 4091, 8501, 2333, 4259, 1559, 7873,
4001, 5107, 7229, 6323, 2441, 8969, 9749, 8039, 4999, 8971,
6361, 6577, 5953, 1997, 8951, 3593, 1019, 2659, 5437, 1601,
5171, 3061, 5659, 6359, 6053, 9209, 4721, 5113, 4049, 1283,
9199, 8707, 3259, 9349, 8231, 3877, 3671, 1181, 7121, 6473,
1931, 3323, 1489, 5581, 8537, 4993, 7691, 8747, 7481, 1499,
9547, 9689, 5869, 5897, 9931, 3527, 5333, 2477, 5821, 2939)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Make an array of prime numbers for storing within a program.
*/
def makePrimeList ()
{
val prime = genPrimesSoA ()
shuffle (prime)
println ("makePrimeList: " + prime.size + " prime numbers")
println (" val prime = Array (")
printAll (prime)
} // makePrimeList
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Shuffle the elements in the array buffer to randomize the prime numbers.
*/
def shuffle (a: ArrayBuffer [Int])
{
for (i <- 0 until a.size) swap (a, i, rn.nextInt (a.size))
} // shuffle
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Generate, based on the Sieve of Atkin (SoA), prime numbers between
* integers 'lb' and 'ub'.
* @see http://en.wikipedia.org/wiki/Sieve_of_Atkin
* @param lb the lower bound
* @param ub the upper bound
*/
def genPrimesSoA (lb: Int = 1000, ub: Int = 10000): ArrayBuffer [Int] =
{
val sieve = IndexedSeq.fill [Boolean] (ub + 1)(false) // the sieve
val primes = ArrayBuffer [Int] () // array of prime numbers
val limit = sqrt (ub).toInt // square root of upper bound
var n = 1 // candidate numbers
var xx = 0 // x^2
var yy = 0 // y^2
var nn = 0 // n^2
var knn = 0 // k * n^2
// put in candidate primes:
// integers having odd number of representations by certain quadratic forms
for (x <- 1 to limit; y <- 1 to limit) {
xx = x * x; yy = y * y
n = 4*xx + yy
if (n <= ub && (n % 12 == 1 || n % 12 == 5)) sieve(n) = ! sieve(n)
n -= xx
if (n <= ub && n % 12 == 7) sieve(n) = ! sieve(n)
n = 3*xx - yy
if (x > y && n <= ub && n % 12 == 11) sieve(n) = ! sieve(n)
} // for
// eliminate composite numbers by sieving:
// if n is prime, omit multiples of its square
for (n <- 5 to limit if sieve(n)) {
nn = n * n
knn = nn
breakable { for (k <- 1 to ub) {
if (knn > ub) break
sieve(knn) = false
knn += nn
}} // for
} // for
for (n <- 2 to 3) sieve(n) = true // initialize the first two primes
for (n <- lb to ub if sieve(n)) primes += n // create the prime number array
primes // return the array of primes
} // genPrimesSoA
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Generate, based on the Sieve of Eratosthenes (SoE), prime numbers between
* integers 'lb' and 'ub". This generator is simpler, but less efficient.
* @see http://en.wikipedia.org/wiki/Formula_for_primes
* @param lb the lower bound
* @param ub the upper bound
*/
def genPrimesSoE (lb: Int = 1000, ub: Int = 10000): ArrayBuffer [Int] =
{
val primes = ArrayBuffer [Int] () // array to hold prime numbers
var jroot = 0.0 // floor of square root of j
var sum = 0.0 // sum of terms
for (j <- lb to ub) { // if integer j is prime, add to array
jroot = floor (sqrt (j))
sum = 0.0
for (s <- 2.0 to jroot by 1.0) sum += floor (j / s) - floor ((j-1) / s)
if ((floor (-1.0 * sum / j)).toInt == 0) primes += j
} // for
primes // return the array of primes
} // genPrimesSoE
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Print an array buffer of prime numbers.
* @param primes the prime numbers to print
*/
def printAll (primes: ArrayBuffer [Int])
{
for (i <- 0 until primes.size) {
print ("%6d, ".format (primes(i)))
if (i % 10 == 9) println ()
} // for
println ()
} // printAll
} // Primes object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PrimesTest` object is use to perform timing test on the `Primes` object.
*/
object PrimesTest extends App
{
import Primes._
/** Timings for generating primes using SoA
*/
val timeGenSoA = for (trial <- 1 to 10) yield time ("genPrimesSoA") { genPrimesSoA () }
/** Timings for generating primes using SoE
*/
val timeGenSoE = for (trial <- 1 to 10) yield time ("genPrimesSoE") { genPrimesSoE () }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Time the execution of method/function f.
* @param title display indicator
* @param f the method/function to time
*/
def time (title: String) (f: => Unit): String =
{
val start = System.currentTimeMillis
f
val stop = System.currentTimeMillis
"TIMING: %s = %dms".format (title, (stop - start))
} // time
for (s <- timeGenSoA) println (s)
for (s <- timeGenSoE) println (s)
makePrimeList ()
} // PrimesTest
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/math/Primes.scala | Scala | mit | 15,031 |
package com.alanjz.microstrike.gear
class Kevlar {
}
| spacenut/microstrike | src/com/alanjz/microstrike/gear/Kevlar.scala | Scala | gpl-2.0 | 55 |
package cross
class DummyClass extends Serializable {
val var1 = "đĐe"
val var2 = 123
val var3 = 0.999
val var4 = null
val var5 = new {
val x = Map(0 -> 123.4567, "k1" -> "value1", "k2" -> "Tiếng Việt")
}
val var6 = "a dynamic var"
}
object Data {
val dataFile = "php-ser.data"
val dummy = new DummyClass
val data = Array(
123456789012345D,
123456789012345D,
123.456,
123.4567F,
"Tiếng Việt",
Array("Cường", "Đỗ", "Đức", "Gia", "Bảo"),
Map(0 -> "Cường", 1 -> "Đỗ", 2 -> "Đức", 3 -> "Gia", 4 -> "Bảo"),
Array(123, "abc"),
Map(0 -> 123, 1 -> "abc"),
Map("k1" -> "value1", "k2" -> "Tiếng Việt"),
Map("k1" -> 123, "k2" ->
"aAàÀảẢãÃáÁạẠăĂằẰẳẲẵẴắẮặẶâÂầẦẩẨẫẪấẤậẬbBcCdDđĐeEèÈẻẺẽẼéÉẹẸêÊềỀểỂễỄếẾệỆfFgGhHiIìÌỉỈĩĨíÍịỊjJkKlLmMnNoOòÒỏỎõÕóÓọỌôÔồỒổỔỗỖốỐộỘơƠờỜởỞỡỠớỚợỢpPqQrRsStTuUùÙủỦũŨúÚụỤưƯừỪửỬữỮứỨựỰvVwWxXyYỳỲỷỶỹỸýÝỵỴzZ")
)
val dummyExpected = "cross\\\\DummyClass" -> Map(
"var1" -> dummy.var1,
"var2" -> dummy.var2,
"var3" -> dummy.var3,
"var4" -> dummy.var4,
"var5" -> ("stdClass", Map("x" -> Map("0" -> 123.4567, "k1" -> "value1", "k2" -> "Tiếng Việt"))),
"var6" -> dummy.var6
)
val expectedData = Map(
0 -> data(0),
1 -> data(1),
2 -> data(2),
3 -> 123.4567, //java Float ser then unser => Double
4 -> data(4),
5 -> Map(0 -> "Cường", 1 -> "Đỗ", 2 -> "Đức", 3 -> "Gia", 4 -> "Bảo"), //Array ser then unser => Map[String,_]
6 -> Map(0 -> "Cường", 1 -> "Đỗ", 2 -> "Đức", 3 -> "Gia", 4 -> "Bảo"), //Map[Int,_] --> Map[String,_]
7 -> Map(0 -> 123, 1 -> "abc"),
8 -> Map(0 -> 123, 1 -> "abc"),
9 -> data(9),
10 -> data(10)
)
}
| giabao/php-utils | src/test/scala/cross/Data.scala | Scala | apache-2.0 | 1,953 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import org.apache.hadoop.mapreduce.Job
import org.apache.spark.internal.Logging
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.sql.connector.write.{BatchWrite, DataWriterFactory, WriterCommitMessage}
import org.apache.spark.sql.execution.datasources.{WriteJobDescription, WriteTaskResult}
import org.apache.spark.sql.execution.datasources.FileFormatWriter.processStats
class FileBatchWrite(
job: Job,
description: WriteJobDescription,
committer: FileCommitProtocol)
extends BatchWrite with Logging {
override def commit(messages: Array[WriterCommitMessage]): Unit = {
val results = messages.map(_.asInstanceOf[WriteTaskResult])
committer.commitJob(job, results.map(_.commitMsg))
logInfo(s"Write Job ${description.uuid} committed.")
processStats(description.statsTrackers, results.map(_.summary.stats))
logInfo(s"Finished processing stats for write job ${description.uuid}.")
}
override def useCommitCoordinator(): Boolean = false
override def abort(messages: Array[WriterCommitMessage]): Unit = {
committer.abortJob(job)
}
override def createBatchWriterFactory(): DataWriterFactory = {
FileWriterFactory(description, committer)
}
}
| caneGuy/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileBatchWrite.scala | Scala | apache-2.0 | 2,083 |
package distrimon.master
import akka.actor.ActorRef
import akka.io.Tcp.{ConnectionClosed, Write, Close, Received}
import akka.util.ByteString
import distrimon.{Connection, Envolope, State}
import Manager._
import MinionTunnel._
class MinionHandler(val manager: ActorRef, val conn: ActorRef) extends Connection {
val initial = Unidentified
register(conn)
case object Unidentified extends State { is {
case _: ConnectionClosed => context stop self
case Received(data) =>
val minionId = data.utf8String.toInt
log.info(s"identifying minion: $minionId")
manager ! GetMinion(minionId)
become(Identifying)
}}
case object Identifying extends State { is {
case _: ConnectionClosed => context stop self
case NoSuchMinion =>
log.error("identifying failed")
conn ! Write(ByteString("FAIL"))
conn ! Close
context stop self
case minion: Manager.Minion =>
val tunnel = minion.tunnel
log.error("succesfully identified")
conn ! Write(ByteString("OK"))
tunnel ! ConnUp
become(Identified(tunnel, conn))
}}
case class Identified(minionTunnel: ActorRef, conn: ActorRef) extends Connected with Delivery {
def handleClose(): Unit = {
log.error("connection closed unexpectedly")
minionTunnel ! ConnDown
become(Disconnected(minionTunnel))
}
}
case class Disconnected(minionTunnel: ActorRef) extends super.Disconnected with Delivery { is {
case e: Envolope => retry(e)
}}
trait Delivery {
val minionTunnel: ActorRef
def deliver(e: Envolope): Unit = minionTunnel ! Recv(e)
def retry(e: Envolope): Unit = minionTunnel ! e
}
} | ConnorDillon/distrimon | src/main/scala/distrimon/master/MinionHandler.scala | Scala | gpl-3.0 | 1,670 |
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.sampling.lhs
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
object LHS {
implicit def isSampling: IsSampling[LHS] = lhs ⇒ {
def apply = FromContext { p ⇒
import p._
val s = lhs.sample.from(context)
val vectorSize = lhs.factor.map(_.size(context)).sum
def values = LHS.lhsValues(vectorSize, s, random())
values.map(v ⇒ ScalarOrSequenceOfDouble.unflatten(lhs.factor, v).from(context)).iterator
}
Sampling(
apply,
lhs.factor.map(_.prototype),
lhs.factor.flatMap(_.inputs),
lhs.sample.validate
)
}
def lhsValues(dimensions: Int, samples: Int, rng: scala.util.Random) = Array.fill(dimensions) {
org.openmole.tool.random.shuffled(0 until samples)(rng).map { i ⇒ (i + rng.nextDouble) / samples }.toArray
}.transpose
}
case class LHS(sample: FromContext[Int], factor: Seq[ScalarOrSequenceOfDouble])
| openmole/openmole | openmole/plugins/org.openmole.plugin.sampling.lhs/src/main/scala/org/openmole/plugin/sampling/lhs/LHS.scala | Scala | agpl-3.0 | 1,658 |
package com.twitter.finagle.zipkin.thrift
import com.twitter.finagle.NoStacktrace
import com.twitter.finagle.stats.{DefaultStatsReceiver, NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.tracing.{TraceId, Record, Tracer, Annotation, Trace}
import com.twitter.finagle.zipkin.{host => Host, initialSampleRate => sampleRateFlag}
import com.twitter.io.Buf
import com.twitter.util.events.{Event, Sink}
import com.twitter.util.{Time, Return, Throw, Try}
private object Json {
import com.fasterxml.jackson.annotation.{JsonTypeInfo, JsonInclude}
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind.{ObjectMapper, JavaType, JsonNode}
import com.fasterxml.jackson.databind.annotation.JsonDeserialize
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import java.lang.reflect.{Type, ParameterizedType}
// Note: This type is a just a convenience for deserialization in other
// other Event.Type constructions, but we actually require it for Trace
// because we're using Jackson's default typing mechanism for Annotation.
// If we use a Map, somewhere in Jackson's type resolution the type of
// Annotation is forgotten, and it is passed into the type resolver as an
// Object. Defining this Envelope preserves the type information.
@JsonInclude(JsonInclude.Include.NON_NULL)
case class Envelope(
id: String,
when: Long,
// We require an annotation here, because for small numbers, this gets
// deserialized with a runtime type of int.
// See: https://github.com/FasterXML/jackson-module-scala/issues/106.
@JsonDeserialize(contentAs=classOf[java.lang.Long]) traceId: Option[Long],
@JsonDeserialize(contentAs=classOf[java.lang.Long]) spanId: Option[Long],
data: Annotation)
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
// Configures the mapper to include class information for Annotation.
object TypeResolverBuilder
extends ObjectMapper.DefaultTypeResolverBuilder(ObjectMapper.DefaultTyping.NON_FINAL) {
override def useForType(typ: JavaType) =
// Note: getRawClass would be an Object if not for `Envelope`.
typ.getRawClass == classOf[Annotation]
}
mapper.setDefaultTyping(
TypeResolverBuilder
.init(JsonTypeInfo.Id.CLASS, null)
.inclusion(JsonTypeInfo.As.WRAPPER_ARRAY))
def serialize(o: AnyRef): String = mapper.writeValueAsString(o)
def deserialize[T: Manifest](value: String): T =
mapper.readValue(value, typeReference[T])
def deserialize[T: Manifest](node: JsonNode): T =
mapper.readValue(node.traverse, typeReference[T])
private[this] def typeReference[T: Manifest] = new TypeReference[T] {
override def getType = typeFromManifest(manifest[T])
}
private[this] def typeFromManifest(m: Manifest[_]): Type =
if (m.typeArguments.isEmpty) m.runtimeClass else new ParameterizedType {
def getRawType = m.runtimeClass
def getActualTypeArguments = m.typeArguments.map(typeFromManifest).toArray
def getOwnerType = null
}
}
object ZipkinTracer {
lazy val default: Tracer = mk()
/**
* The [[com.twitter.util.events.Event.Type Event.Type]] for trace events.
*/
val Trace: Event.Type = {
new Event.Type {
val id = "Trace"
def serialize(event: Event) = event match {
case Event(etype, _, _, _: Annotation.BinaryAnnotation, _, _, _) if etype eq this =>
Throw(new IllegalArgumentException("unsupported format: " + event) with NoStacktrace)
case Event(etype, when, _, ann: Annotation, _, tid, sid) if etype eq this =>
val (t, s) = serializeTrace(tid, sid)
val data = Json.Envelope(id, when.inMilliseconds, t, s, ann)
Try(Buf.Utf8(Json.serialize(data)))
case _ =>
Throw(new IllegalArgumentException("unknown format: " + event))
}
def deserialize(buf: Buf) = for {
env <- Buf.Utf8.unapply(buf) match {
case None => Throw(new IllegalArgumentException("unknown format"))
case Some(str) => Try(Json.deserialize[Json.Envelope](str))
}
if env.id == id
} yield {
val when = Time.fromMilliseconds(env.when)
// This line fails without the JsonDeserialize annotation in Envelope.
val tid = env.traceId.getOrElse(Event.NoTraceId)
val sid = env.spanId.getOrElse(Event.NoSpanId)
Event(this, when, objectVal = env.data, traceIdVal = tid, spanIdVal = sid)
}
}
}
/**
* @param scribeHost Host to send trace data to
* @param scribePort Port to send trace data to
* @param statsReceiver Where to log information about tracing success/failures
* @param sampleRate How much data to collect. Default sample rate 0.1%. Max is 1, min 0.
*/
@deprecated("Use mk() instead", "6.1.0")
def apply(
scribeHost: String = Host().getHostName,
scribePort: Int = Host().getPort,
statsReceiver: StatsReceiver = NullStatsReceiver,
sampleRate: Float = Sampler.DefaultSampleRate
): Tracer.Factory = () => mk(scribeHost, scribePort, statsReceiver, sampleRate)
/**
* @param host Host to send trace data to
* @param port Port to send trace data to
* @param statsReceiver Where to log information about tracing success/failures
* @param sampleRate How much data to collect. Default sample rate 0.1%. Max is 1, min 0.
*/
def mk(
host: String = Host().getHostName,
port: Int = Host().getPort,
statsReceiver: StatsReceiver = NullStatsReceiver,
sampleRate: Float = Sampler.DefaultSampleRate
): Tracer =
new ZipkinTracer(
RawZipkinTracer(host, port, statsReceiver),
sampleRate)
/**
* Util method since named parameters can't be called from Java
* @param sr stats receiver to send successes/failures to
*/
@deprecated("Use mk() instead", "6.1.0")
def apply(sr: StatsReceiver): Tracer.Factory = () =>
mk(Host().getHostName, Host().getPort, sr, Sampler.DefaultSampleRate)
/**
* Util method since named parameters can't be called from Java
* @param statsReceiver stats receiver to send successes/failures to
*/
def mk(statsReceiver: StatsReceiver): Tracer =
mk(Host().getHostName, Host().getPort, statsReceiver, Sampler.DefaultSampleRate)
}
/**
* Tracer that supports sampling. Will pass through a subset of the records.
* @param underlyingTracer Underlying tracer that accumulates the traces and sends off
* to the collector.
* @param initialSampleRate Start off with this sample rate. Can be changed later.
* @param sink where to send sampled trace events to.
*/
class SamplingTracer(
underlyingTracer: Tracer,
initialSampleRate: Float,
sink: Sink)
extends Tracer
{
/**
* Tracer that supports sampling. Will pass through a subset of the records.
* @param underlyingTracer Underlying tracer that accumulates the traces and sends off
* to the collector.
* @param initialSampleRate Start off with this sample rate. Can be changed later.
*/
def this(underlyingTracer: Tracer, initialSampleRate: Float) =
this(underlyingTracer, initialSampleRate, Sink.default)
/**
* Tracer that supports sampling. Will pass through a subset of the records.
*/
def this() = this(
RawZipkinTracer(Host().getHostName, Host().getPort, DefaultStatsReceiver.scope("zipkin")),
sampleRateFlag())
private[this] val sampler = new Sampler
setSampleRate(initialSampleRate)
def sampleTrace(traceId: TraceId): Option[Boolean] = sampler.sampleTrace(traceId)
def setSampleRate(sampleRate: Float): Unit = sampler.setSampleRate(sampleRate)
def getSampleRate: Float = sampler.sampleRate
def record(record: Record) {
if (sampler.sampleRecord(record)) {
underlyingTracer.record(record)
if (Trace.hasId) {
sink.event(ZipkinTracer.Trace, objectVal = record.annotation,
traceIdVal = Trace.id.traceId.self, spanIdVal = Trace.id.spanId.self)
} else {
sink.event(ZipkinTracer.Trace, objectVal = record.annotation)
}
}
}
}
class ZipkinTracer(tracer: RawZipkinTracer, initialRate: Float)
extends SamplingTracer(tracer, initialRate)
| travisbrown/finagle | finagle-zipkin/src/main/scala/com/twitter/finagle/zipkin/thrift/ZipkinTracer.scala | Scala | apache-2.0 | 8,199 |
package scalacookbook.chapter03
/**
* Created by liguodong on 2016/6/20.
*/
object LoopWithForAndForeach extends App{
val a = Array("apple", "banana", "orange")
for (e <- a) println(e)
println("=================")
//Returning values from a for loop
for (e <- a) {
// imagine this requires multiple lines
val s = e.toUpperCase
println(s)
}
println("=================")
val newArray = for (e <- a) yield e.toUpperCase
println("newArray(0) : "+newArray(0))
for(i<-newArray) println(i)
val newArray2 = for (e <- a) yield {
// imagine this requires multiple lines
val s = e.toUpperCase
s
}
newArray2.map(_+" ").foreach(print)
println
//for loop counters
for (i <- 0 until a.length) {
println(s"$i is ${a(i)}")
}
for ((e, count) <- a.zipWithIndex) {
println(s"$count is $e")
}
//Generators and guards
for (i <- 1 to 3) println(i)
println(1 to 3)
for (i <- 1 to 10 if i % 4==0) println(i)
//Looping over a Map
val names = Map("fname" -> "Robert",
"lname" -> "Goren")
for ((k,v) <- names) println(s"key: $k, value: $v")
//Discussion
a.foreach(println)
println("------before------")
//before
a.foreach { e =>
val s = e.toUpperCase
println(s)
}
println("------after------")
//after
a.foreach(e => println(e.toUpperCase))
println("循环遍历集合转化成foreach遍历集合")
// original List code
val nums = List(1,2,3)
for (i <- nums) println(i)
// translation performed by the compiler
nums.foreach(((i) => println(i)))
// #1 - input (my code)
for (i <- 1 to 10) println(i)
// #1 - compiler output
1.to(10).foreach(((i) => println(i)))
println("~~~~~~~~~~~~~")
// #2 - input code
for {
i <- 1 to 10
if i % 2 == 0
} println(i)
// #2 - translated output
1.to(10).withFilter(i=>i%2==0).foreach(((i) =>
println(i)))
// #3 - input code
for {
i <- 1 to 10
if i != 2
if i % 2 == 0
} println(i)
// #3 - translated output
1.to(10).withFilter(((i) => i!=2))
.withFilter(((i) => i % 2 == 0)).foreach(((i) => println(i)))
// #4 - input code
for { i <- 1 to 10 } yield print(i)
println
// #4 - output
1.to(10).map(((i) => print(i)))
println
// #5 - input code (for loop, guard, and yield)
for {
i <- 1 to 10
if i % 2 == 0
} yield print(i)
println
// #5 - translated code
1.to(10).withFilter(((i) => i % 2 == 0)).map(((i) => print(i)))
println
}
| liguodongIOT/java-scala-mix-sbt | src/main/scala/scalacookbook/chapter03/LoopWithForAndForeach.scala | Scala | apache-2.0 | 2,488 |
package com.github.nearbydelta.deepspark.data
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import breeze.linalg.{DenseMatrix, DenseVector}
import com.esotericsoftware.kryo._
import com.esotericsoftware.kryo.io.{Input, Output}
import com.github.nearbydelta.deepspark.layer._
import com.github.nearbydelta.deepspark.network.{GeneralNetwork, SimpleNetwork}
import com.github.nearbydelta.deepspark.word._
import com.github.nearbydelta.deepspark.word.layer._
import com.twitter.chill.{Kryo, ScalaKryoInstantiator}
import org.apache.hadoop.io.{BytesWritable, NullWritable}
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
object KryoWrap{
val customRegister = ArrayBuffer[Kryo ⇒ Unit]()
def addRegisterFunction(fn: Kryo ⇒ Unit): Unit = {
customRegister += fn
}
def get = new KryoWrap(customRegister)
/**
* Register all classes in DeepSpark library into given Kryo object.
* @param kryo Kryo object to register classes in DeepSpark library.
*/
def init(kryo: Kryo): Unit = {
kryo.setClassLoader(this.getClass.getClassLoader)
// Please, preserve this ordering.
kryo.register(classOf[LedgerWords])
kryo.register(classOf[LedgerModel])
// To register specialized types in scala, instantiate things
kryo.register(DenseVector(0.0).getClass, DataVectorSerializer)
kryo.register(DenseMatrix(0.0).getClass, MatrixSerializer)
kryo.register(classOf[DenseVector[Double]], DataVectorSerializer)
kryo.register(classOf[DenseMatrix[Double]], MatrixSerializer)
kryo.register(classOf[String])
kryo.register(classOf[Weight[_]])
kryo.register(classOf[GeneralNetwork[_, _]])
kryo.register(classOf[SimpleNetwork[_]])
kryo.register(classOf[AverageLedger])
kryo.register(classOf[BasicLayer])
kryo.register(classOf[ConcatLedger])
kryo.register(classOf[FullTensorLayer])
kryo.register(classOf[LinearTransformLayer])
kryo.register(classOf[RBFLayer])
kryo.register(classOf[RNNLedger])
kryo.register(classOf[SplitTensorLayer])
kryo.register(HardSigmoid.getClass)
kryo.register(HardTanh.getClass)
kryo.register(HyperbolicTangent.getClass)
kryo.register(Linear.getClass)
kryo.register(Rectifier.getClass)
kryo.register(Sigmoid.getClass)
kryo.register(Softmax.getClass)
kryo.register(Softplus.getClass)
kryo.register(GaussianRBF.getClass)
kryo.register(InverseQuadRBF.getClass)
kryo.register(HardGaussianRBF.getClass)
kryo.register(LeakyReLU.getClass)
kryo.register(classOf[LedgerGroupLayer])
kryo.register(classOf[AdaDelta])
kryo.register(classOf[AdaGrad])
kryo.register(classOf[StochasticGradientDescent])
kryo.register(classOf[LedgerAdaDelta])
kryo.register(classOf[LedgerAdaGrad])
kryo.register(classOf[LedgerSGD])
kryo.register(SoftmaxCEE.getClass)
kryo.register(classOf[FixedAverageLedger])
kryo.register(classOf[VectorRBFLayer])
}
}
/**
* Kryo wrapper, which all classes in DeepSpark library are registered.
*/
class KryoWrap(val customRegister: ArrayBuffer[Kryo ⇒ Unit] = ArrayBuffer()) extends Serializable{
def kryo = {
val k = new ScalaKryoInstantiator().newKryo()
KryoWrap.init(k)
customRegister.foreach(fn ⇒ fn(k))
k
}
def readKryoFile[X](sc: SparkContext, path: String)(implicit evidence$1: ClassTag[X]): RDD[X] = {
sc.sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], 2)
.mapPartitions{
lazy val k = kryo
_.flatMap{
case (_, bytes) ⇒
val bis = new ByteArrayInputStream(bytes.getBytes)
val input = new Input(bis)
k.readClassAndObject(input).asInstanceOf[Array[X]]
}
}
}
def saveAsKryoFile[X](rdd: RDD[X], path: String)
(implicit evidence$1: ClassTag[X], evidence$2: ClassTag[Array[X]]) = {
rdd.mapPartitions{
lazy val k = kryo
iter ⇒
iter.grouped[X](10).map{ x ⇒
val arr = x.toArray[X]
val bos = new ByteArrayOutputStream()
val out = new Output(bos, 1024*1024)
k.writeClassAndObject(out, arr)
out.flush()
(NullWritable.get(), new BytesWritable(bos.toByteArray))
}
}.saveAsSequenceFile(path)
}
}
/**
* Customized serializer for Vector type.
*/
object DataVectorSerializer extends Serializer[DataVec] {
override def read(kryo: Kryo, input: Input, aClass: Class[DataVec]): DataVec = {
val dim = input.readInt()
val array = (0 until dim).map(_ ⇒ input.readDouble()).toArray
DenseVector(array)
}
override def write(kryo: Kryo, output: Output, t: DataVec): Unit = {
output.writeInt(t.length)
t.data.foreach(output.writeDouble)
}
}
/**
* Customized serializer for Matrix type.
*/
object MatrixSerializer extends Serializer[Matrix] {
override def read(kryo: Kryo, input: Input, aClass: Class[Matrix]): Matrix = {
val rows = input.readInt()
val cols = input.readInt()
val array = (0 until (rows * cols)).map(_ ⇒ input.readDouble()).toArray
DenseMatrix.create(rows, cols, array)
}
override def write(kryo: Kryo, output: Output, t: Matrix): Unit = {
output.writeInt(t.rows)
output.writeInt(t.cols)
t.data.foreach(output.writeDouble)
}
} | nearbydelta/deepspark | src/main/scala/com/github/nearbydelta/deepspark/data/KryoWrap.scala | Scala | gpl-2.0 | 5,356 |
package models
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
import play.api.test._
import play.api.test.Helpers._
import controllers.Screener.ScreenParams
@RunWith(classOf[JUnitRunner])
class TradeSpec extends Specification {
val itmTrade = {
running(FakeApplication()) {
controllers.Screener.screen(ScreenParams(Strategy.BullCalls, Seq("all"), Some("itm"))).head
}
}
val otmTrade = {
running(FakeApplication()) {
controllers.Screener.screen(ScreenParams(Strategy.BullCalls, Seq("all"), Some("otm"))).head
}
}
"Trade" should {
"calculate the number of days until expiration" in {
val expireUnix = itmTrade.expires
val nowUnix = System.currentTimeMillis / 1000
val secondsInDay = 60 * 60 * 24
val secondsToExpire = expireUnix - nowUnix
itmTrade.daysToExpire must be_==(secondsToExpire / secondsInDay)
}
"determine if legs are calls or puts" in {
running(FakeApplication()) {
val callTrade = controllers.Screener.screen(ScreenParams(Strategy.BullCalls)).head
val putTrade = controllers.Screener.screen(ScreenParams(Strategy.BullPuts)).head
callTrade.callOrPut must equalTo("C")
putTrade.callOrPut must equalTo("P")
}
}
"calculate the max profit percent" in {
itmTrade.profitPercent must equalTo(itmTrade.twoDigit(itmTrade.maxProfitAmount / itmTrade.maxLossAmount * 100))
otmTrade.profitPercent must equalTo(otmTrade.twoDigit(otmTrade.maxProfitAmount / otmTrade.maxLossAmount * 100))
}
"calculate the max profit percent per day" in {
val itmProfit = itmTrade.maxProfitAmount / itmTrade.maxLossAmount * 100
itmTrade.profitPercentPerDay must equalTo(itmTrade.twoDigit(itmProfit / itmTrade.daysToExpire))
val otmProfit = otmTrade.maxProfitAmount / otmTrade.maxLossAmount * 100
otmTrade.profitPercentPerDay must equalTo(otmTrade.twoDigit(otmProfit / otmTrade.daysToExpire))
}
"calculate percent per day to max profit" in {
itmTrade.percentPerDayToMaxProfit must equalTo(itmTrade.twoDigit(itmTrade.percentToMaxProfit / itmTrade.daysToExpire))
otmTrade.percentPerDayToMaxProfit must equalTo(otmTrade.twoDigit(otmTrade.percentToMaxProfit / otmTrade.daysToExpire))
}
"calculate percent per day to max loss" in {
itmTrade.percentPerDayToMaxLoss must equalTo(itmTrade.twoDigit(itmTrade.percentToMaxLoss / itmTrade.daysToExpire))
otmTrade.percentPerDayToMaxLoss must equalTo(otmTrade.twoDigit(otmTrade.percentToMaxLoss / otmTrade.daysToExpire))
}
"calculate percent per day to breakeven" in {
itmTrade.percentPerDayToBreakeven must equalTo(itmTrade.twoDigit(itmTrade.percentToBreakeven / itmTrade.daysToExpire))
otmTrade.percentPerDayToBreakeven must equalTo(otmTrade.twoDigit(otmTrade.percentToBreakeven / otmTrade.daysToExpire))
}
}
} | Exupery/optionometer | test/models/TradeSpec.scala | Scala | mit | 2,939 |
package org.bitcoins.core.util
import org.bitcoins.core.crypto._
import org.bitcoins.core.number.UInt32
import org.bitcoins.core.protocol.CompactSizeUInt
import org.bitcoins.core.protocol.script.{CLTVScriptPubKey, CSVScriptPubKey, EmptyScriptPubKey, _}
import org.bitcoins.core.protocol.transaction.{Transaction, WitnessTransaction}
import org.bitcoins.core.script.ScriptProgram.PreExecutionScriptProgramImpl
import org.bitcoins.core.script.constant._
import org.bitcoins.core.script.crypto.{OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY, OP_CHECKSIG, OP_CHECKSIGVERIFY}
import org.bitcoins.core.script.flag.{ScriptFlag, ScriptFlagUtil}
import org.bitcoins.core.script.result.{ScriptError, ScriptErrorPubKeyType, ScriptErrorWitnessPubKeyType}
import org.bitcoins.core.script.{ExecutionInProgressScriptProgram, ScriptOperation, ScriptProgram, ScriptSettings}
import scala.annotation.tailrec
import scala.util.Try
/**
* Created by chris on 3/2/16.
*/
trait BitcoinScriptUtil extends BitcoinSLogger {
/** Takes in a sequence of script tokens and converts them to their hexadecimal value */
def asmToHex(asm : Seq[ScriptToken]) : String = {
val hex = asm.map(_.hex).mkString
hex
}
/** Converts a sequence of script tokens to them to their byte values */
def asmToBytes(asm : Seq[ScriptToken]) : Seq[Byte] = BitcoinSUtil.decodeHex(asmToHex(asm))
/**
* Filters out push operations in our sequence of script tokens
* this removes OP_PUSHDATA1, OP_PUSHDATA2, OP_PUSHDATA4 and all ByteToPushOntoStack tokens */
def filterPushOps(asm : Seq[ScriptToken]) : Seq[ScriptToken] = {
//TODO: This does not remove the following script number after a OP_PUSHDATA
asm.filterNot(op => op.isInstanceOf[BytesToPushOntoStack]
|| op == OP_PUSHDATA1
|| op == OP_PUSHDATA2
|| op == OP_PUSHDATA4)
}
/**
* Returns true if the given script token counts towards our max script operations in a script
* See https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.cpp#L269-L271
* which is how bitcoin core handles this */
def countsTowardsScriptOpLimit(token : ScriptToken) : Boolean = token match {
case scriptOp : ScriptOperation if (scriptOp.opCode > OP_16.opCode) => true
case _ : ScriptToken => false
}
/**
* Counts the amount of sigops in a script
* https://github.com/bitcoin/bitcoin/blob/master/src/script/script.cpp#L156-L202
* @param script the script whose sigops are being counted
* @return the number of signature operations in the script
*/
def countSigOps(script : Seq[ScriptToken]) : Long = {
val checkSigCount = script.count(token => token == OP_CHECKSIG || token == OP_CHECKSIGVERIFY)
val multiSigOps = Seq(OP_CHECKMULTISIG,OP_CHECKMULTISIGVERIFY)
val multiSigCount : Long = script.zipWithIndex.map { case (token, index) =>
if (multiSigOps.contains(token) && index != 0) {
script(index-1) match {
case scriptNum : ScriptNumber => scriptNum.underlying
case scriptConstant : ScriptConstant => ScriptNumberUtil.toLong(scriptConstant.hex)
case _ : ScriptToken => ScriptSettings.maxPublicKeysPerMultiSig
}
} else 0
}.sum
checkSigCount + multiSigCount
}
/**
* Parses the number of signatures on the stack
* This can only be called when an OP_CHECKMULTISIG operation is about to be executed
* on the stack
* For instance if this was a 2/3 multisignature script, it would return the number 3
*/
def numPossibleSignaturesOnStack(program : ScriptProgram) : ScriptNumber = {
require(program.script.headOption == Some(OP_CHECKMULTISIG) || program.script.headOption == Some(OP_CHECKMULTISIGVERIFY),
"We can only parse the nubmer of signatures the stack when we are executing a OP_CHECKMULTISIG or OP_CHECKMULTISIGVERIFY op")
val nPossibleSignatures : ScriptNumber = program.stack.head match {
case s : ScriptNumber => s
case s : ScriptConstant => ScriptNumber(s.bytes)
case _ : ScriptToken => throw new RuntimeException("n must be a script number or script constant for OP_CHECKMULTISIG")
}
nPossibleSignatures
}
/**
* Returns the number of required signatures on the stack, for instance if this was a
* 2/3 multisignature script, it would return the number 2
*/
def numRequiredSignaturesOnStack(program : ScriptProgram) : ScriptNumber = {
require(program.script.headOption == Some(OP_CHECKMULTISIG) || program.script.headOption == Some(OP_CHECKMULTISIGVERIFY),
"We can only parse the nubmer of signatures the stack when we are executing a OP_CHECKMULTISIG or OP_CHECKMULTISIGVERIFY op")
val nPossibleSignatures = numPossibleSignaturesOnStack(program)
val stackWithoutPubKeys = program.stack.tail.slice(nPossibleSignatures.toInt,program.stack.tail.size)
val mRequiredSignatures : ScriptNumber = stackWithoutPubKeys.head match {
case s: ScriptNumber => s
case s : ScriptConstant => ScriptNumber(s.bytes)
case _ : ScriptToken => throw new RuntimeException("m must be a script number or script constant for OP_CHECKMULTISIG")
}
mRequiredSignatures
}
/**
* Determines if a script contains only script operations
* This is equivalent to
* [[https://github.com/bitcoin/bitcoin/blob/master/src/script/script.cpp#L213]]
*/
def isPushOnly(script : Seq[ScriptToken]) : Boolean = {
@tailrec
def loop(tokens: Seq[ScriptToken], accum: List[Boolean]): Seq[Boolean] = tokens match {
case h :: t => h match {
case scriptOp : ScriptOperation => loop(t, (scriptOp.opCode < OP_16.opCode) :: accum)
case _ : ScriptToken => loop(t, true :: accum)
}
case Nil => accum
}
!loop(script, List()).exists(_ == false)
}
/**
* Determines if the token being pushed onto the stack is being pushed by the SMALLEST push operation possible
* This is equivalent to
* [[https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.cpp#L209]]
* @param pushOp the operation that is pushing the data onto the stack
* @param token the token that is being pushed onto the stack by the pushOp
* @return
*/
def isMinimalPush(pushOp : ScriptToken, token : ScriptToken) : Boolean = token match {
case scriptNumOp : ScriptNumberOperation =>
scriptNumOp == pushOp
case ScriptConstant.zero | ScriptConstant.negativeZero =>
//weird case where OP_0 pushes an empty byte vector on the stack, NOT "00" or "81"
//so we can push the constant "00" or "81" onto the stack with a BytesToPushOntoStack pushop
pushOp == BytesToPushOntoStack(1)
case _ : ScriptToken if ( token.bytes.size == 1 && ScriptNumberOperation.fromNumber(token.toLong.toInt).isDefined) =>
//could have used the ScriptNumberOperation to push the number onto the stack
false
case token : ScriptToken => token.bytes.size match {
case size if (size == 0) => pushOp == OP_0
case size if (size == 1 && token.bytes.head == OP_1NEGATE.opCode) =>
pushOp == OP_1NEGATE
case size if (size <= 75) => token.bytes.size == pushOp.toLong
case size if (size <= 255) => pushOp == OP_PUSHDATA1
case size if (size <= 65535) => pushOp == OP_PUSHDATA2
case size =>
//default case is true because we have to use the largest push op as possible which is OP_PUSHDATA4
true
}
}
/** Calculates the push operation for the given [[ScriptToken]] */
def calculatePushOp(scriptToken : ScriptToken) : Seq[ScriptToken] = {
//push ops following an OP_PUSHDATA operation are interpreted as unsigned numbers
val scriptTokenSize = UInt32(scriptToken.bytes.size)
val bytes = scriptTokenSize.bytes
if (scriptTokenSize <= UInt32(75)) Seq(BytesToPushOntoStack(scriptToken.bytes.size))
else if (scriptTokenSize <= UInt32(OP_PUSHDATA1.max)) {
//we need the push op to be only 1 byte in size
val pushConstant = ScriptConstant(BitcoinSUtil.flipEndianness(bytes.slice(bytes.length-1,bytes.length)))
Seq(OP_PUSHDATA1, pushConstant)
}
else if (scriptTokenSize <= UInt32(OP_PUSHDATA2.max)) {
//we need the push op to be only 2 bytes in size
val pushConstant = ScriptConstant(BitcoinSUtil.flipEndianness(bytes.slice(bytes.length-2,bytes.length)))
Seq(OP_PUSHDATA2, pushConstant)
}
else if (scriptTokenSize <= UInt32(OP_PUSHDATA4.max)) {
val pushConstant = ScriptConstant(BitcoinSUtil.flipEndianness(bytes))
Seq(OP_PUSHDATA4, pushConstant)
}
else throw new IllegalArgumentException("ScriptToken is to large for pushops, size: " + scriptTokenSize)
}
def calculatePushOp(bytes: Seq[Byte]): Seq[ScriptToken] = calculatePushOp(ScriptConstant(bytes))
/**
* Whenever a [[ScriptConstant]] is interpreted to a number BIP62 could enforce that number to be encoded
* in the smallest encoding possible
* [[https://github.com/bitcoin/bitcoin/blob/a6a860796a44a2805a58391a009ba22752f64e32/src/script/script.h#L220-L237]] */
def isShortestEncoding(constant : ScriptConstant) : Boolean = isShortestEncoding(constant.bytes)
def isShortestEncoding(bytes : Seq[Byte]) : Boolean = {
// If the most-significant-byte - excluding the sign bit - is zero
// then we're not minimal. Note how this test also rejects the
// negative-zero encoding, 0x80.
if ((bytes.size > 0 && (bytes.last & 0x7f) == 0)) {
// One exception: if there's more than one byte and the most
// significant bit of the second-most-significant-byte is set
// it would conflict with the sign bit. An example of this case
// is +-255, which encode to 0xff00 and 0xff80 respectively.
// (big-endian).
if (bytes.size <= 1 || (bytes(bytes.size - 2) & 0x80) == 0) {
false
} else true
} else true
}
/**
* Whenever a script constant is interpreted to a number BIP62 should enforce that number to be encoded
* in the smallest encoding possible
* https://github.com/bitcoin/bitcoin/blob/a6a860796a44a2805a58391a009ba22752f64e32/src/script/script.h#L220-L237
*/
def isShortestEncoding(hex : String) : Boolean = isShortestEncoding(BitcoinSUtil.decodeHex(hex))
/**
* Checks the [[ECPublicKey]] encoding according to bitcoin core's function:
* [[https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.cpp#L202]]. */
def checkPubKeyEncoding(key : ECPublicKey, program : ScriptProgram) : Boolean = checkPubKeyEncoding(key,program.flags)
def checkPubKeyEncoding(key : ECPublicKey, flags : Seq[ScriptFlag]) : Boolean = {
if (ScriptFlagUtil.requireStrictEncoding(flags) &&
!isCompressedOrUncompressedPubKey(key)) false else true
}
/** Returns true if the key is compressed or uncompressed, false otherwise
* https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.cpp#L66
* @param key the public key that is being checked
* @return true if the key is compressed/uncompressed otherwise false
*/
def isCompressedOrUncompressedPubKey(key : ECPublicKey) : Boolean = {
if (key.bytes.size < 33) {
// Non-canonical public key: too short
return false
}
if (key.bytes.head == 0x04) {
if (key.bytes.size != 65) {
// Non-canonical public key: invalid length for uncompressed key
return false
}
} else if (isCompressedPubKey(key)) {
return true
} else {
// Non-canonical public key: neither compressed nor uncompressed
return false
}
true
}
/** Checks if the given public key is a compressed public key */
def isCompressedPubKey(key: ECPublicKey): Boolean = {
(key.bytes.size == 33) && (key.bytes.head == 0x02 || key.bytes.head == 0x03)
}
def minimalScriptNumberRepresentation(num : ScriptNumber) : ScriptNumber = {
val op = ScriptNumberOperation.fromNumber(num.toInt)
if (op.isDefined) op.get else num
}
/**
* Determines if the given pubkey is valid in accordance to the given [[ScriptFlag]]s
* Mimics this function inside of Bitcoin Core
* [[https://github.com/bitcoin/bitcoin/blob/528472111b4965b1a99c4bcf08ac5ec93d87f10f/src/script/interpreter.cpp#L214-L223]]
*/
def isValidPubKeyEncoding(pubKey: ECPublicKey, flags: Seq[ScriptFlag]): Option[ScriptError] = {
if (ScriptFlagUtil.requireStrictEncoding(flags) &&
!BitcoinScriptUtil.isCompressedOrUncompressedPubKey(pubKey)) {
Some(ScriptErrorPubKeyType)
}
else if (ScriptFlagUtil.requireScriptVerifyWitnessPubKeyType(flags) &&
!BitcoinScriptUtil.isCompressedPubKey(pubKey)) {
Some(ScriptErrorWitnessPubKeyType)
} else None
}
/** Prepares the script we spending to be serialized for our transaction signature serialization algorithm
* We need to check if the scriptSignature has a redeemScript
* In that case, we need to pass the redeemScript to the TransactionSignatureChecker
*
* In the case we have a P2SH(P2WSH) we need to pass the witness's redeem script to the [[TransactionSignatureChecker]]
* instead of passing the [[WitnessScriptPubKey]] inside of the [[P2SHScriptSignature]]'s redeem script.
* */
def calculateScriptForChecking(txSignatureComponent: TransactionSignatureComponent,
signature: ECDigitalSignature, script: Seq[ScriptToken]): Seq[ScriptToken] = {
val scriptForChecking = calculateScriptForSigning(txSignatureComponent, script)
logger.debug("sig for removal: " + signature)
logger.debug("script: " + script)
logger.debug("scriptWithSigRemoved: " + scriptForChecking)
txSignatureComponent.sigVersion match {
case SigVersionBase => removeSignatureFromScript(signature,scriptForChecking)
case SigVersionWitnessV0 =>
//BIP143 removes requirement for calling FindAndDelete
//https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki#no-findanddelete
scriptForChecking
}
}
def calculateScriptForSigning(txSignatureComponent: TransactionSignatureComponent, script: Seq[ScriptToken]): Seq[ScriptToken] = txSignatureComponent.scriptPubKey match {
case p2shScriptPubKey: P2SHScriptPubKey =>
val p2shScriptSig = P2SHScriptSignature(txSignatureComponent.scriptSignature.bytes)
val sigsRemoved = removeSignaturesFromScript(p2shScriptSig.signatures,p2shScriptSig.redeemScript.asm)
sigsRemoved
case w: WitnessScriptPubKey =>
txSignatureComponent match {
case wtxSigComponent: WitnessV0TransactionSignatureComponent =>
val scriptEither: Either[(Seq[ScriptToken], ScriptPubKey), ScriptError] = w.witnessVersion.rebuild(wtxSigComponent.witness,w.witnessProgram)
parseScriptEither(scriptEither)
case base : BaseTransactionSignatureComponent =>
//shouldn't have BaseTransactionSignatureComponent with a witness scriptPubKey
script
}
case _ : P2PKHScriptPubKey | _ : P2PKScriptPubKey | _ : MultiSignatureScriptPubKey |
_ : NonStandardScriptPubKey | _ : CLTVScriptPubKey | _ : CSVScriptPubKey | _ : WitnessCommitment | EmptyScriptPubKey =>
script
}
/** Removes the given [[ECDigitalSignature]] from the list of [[ScriptToken]] if it exists. */
def removeSignatureFromScript(signature : ECDigitalSignature, script : Seq[ScriptToken]) : Seq[ScriptToken] = {
if (script.contains(ScriptConstant(signature.hex))) {
//replicates this line in bitcoin core
//https://github.com/bitcoin/bitcoin/blob/master/src/script/interpreter.cpp#L872
val sigIndex = script.indexOf(ScriptConstant(signature.hex))
logger.debug("SigIndex: " + sigIndex)
//remove sig and it's corresponding BytesToPushOntoStack
val sigRemoved = script.slice(0,sigIndex-1) ++ script.slice(sigIndex+1,script.size)
logger.debug("sigRemoved: " + sigRemoved)
sigRemoved
} else script
}
/** Removes the list of [[ECDigitalSignature]] from the list of [[ScriptToken]] */
def removeSignaturesFromScript(sigs : Seq[ECDigitalSignature], script : Seq[ScriptToken]) : Seq[ScriptToken] = {
@tailrec
def loop(remainingSigs : Seq[ECDigitalSignature], scriptTokens : Seq[ScriptToken]) : Seq[ScriptToken] = {
remainingSigs match {
case Nil => scriptTokens
case h :: t =>
val newScriptTokens = removeSignatureFromScript(h,scriptTokens)
loop(t,newScriptTokens)
}
}
loop(sigs,script)
}
/** Removes the [[org.bitcoins.core.script.crypto.OP_CODESEPARATOR]] in the original script according to
* the last code separator index in the script. */
def removeOpCodeSeparator(program : ExecutionInProgressScriptProgram) : Seq[ScriptToken] = {
if (program.lastCodeSeparator.isDefined) {
program.originalScript.slice(program.lastCodeSeparator.get+1, program.originalScript.size)
} else program.originalScript
}
private def parseScriptEither(scriptEither: Either[(Seq[ScriptToken], ScriptPubKey), ScriptError]): Seq[ScriptToken] = scriptEither match {
case Left((_,scriptPubKey)) =>
logger.debug("Script pubkey asm inside calculateForSigning: " + scriptPubKey.asm)
scriptPubKey.asm
case Right(_) => Nil //error
}
/** Given a tx, scriptPubKey and the input index we are checking the tx, it derives the appropriate [[SignatureVersion]] to use */
@tailrec
final def parseSigVersion(tx: Transaction, scriptPubKey: ScriptPubKey, inputIndex: UInt32): SignatureVersion = scriptPubKey match {
case _ : WitnessScriptPubKeyV0 | _: UnassignedWitnessScriptPubKey =>
SigVersionWitnessV0
case _ : P2SHScriptPubKey =>
//every p2sh scriptPubKey HAS to have a p2shScriptSig since we no longer have require scripts to be standard
val s = P2SHScriptSignature(tx.inputs(inputIndex.toInt).scriptSignature.bytes)
parseSigVersion(tx,s.redeemScript,inputIndex)
case _: P2PKScriptPubKey | _: P2PKHScriptPubKey | _: MultiSignatureScriptPubKey | _: NonStandardScriptPubKey
| _: CLTVScriptPubKey | _: CSVScriptPubKey | _ : WitnessCommitment | EmptyScriptPubKey => SigVersionBase
}
/** Casts the given script token to a boolean value
* Mimics this function inside of Bitcoin Core
* [[https://github.com/bitcoin/bitcoin/blob/8c1dbc5e9ddbafb77e60e8c4e6eb275a3a76ac12/src/script/interpreter.cpp#L38]]
* All bytes in the byte vector must be zero, unless it is the last byte, which can be 0 or 0x80 (negative zero)
* */
def castToBool(token: ScriptToken): Boolean = {
token.bytes.zipWithIndex.exists {
case (b,index) =>
val byteNotZero = b.toByte != 0
val lastByteNotNegativeZero = !(index == token.bytes.size - 1 && b.toByte == 0x80.toByte)
byteNotZero && lastByteNotNegativeZero
}
}
}
object BitcoinScriptUtil extends BitcoinScriptUtil
| SuredBits/bitcoin-s-sidechains | src/main/scala/org/bitcoins/core/util/BitcoinScriptUtil.scala | Scala | mit | 18,782 |
package com.eevolution.context.dictionary.domain.api.repository
import com.eevolution.context.dictionary._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 02/11/17.
*/
trait PrintTableFormatRepository [PrintTableFormat , Int] extends api.Repostory [PrintTableFormat , Int] {
} | adempiere/ADReactiveSystem | dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/repository/PrintTableFormatRepository.scala | Scala | gpl-3.0 | 1,151 |
package com.temportalist.chalked.common
import com.temportalist.origin.wrapper.common.ProxyWrapper
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.tileentity.TileEntity
import net.minecraft.world.World
/**
*
*
* @author TheTemportalist
*/
class CommonProxy() extends ProxyWrapper {
override def registerRender(): Unit = {
}
override def getClientElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int,
z: Int, tileEntity: TileEntity): AnyRef = {
// NOOP
null
}
override def getServerElement(ID: Int, player: EntityPlayer, world: World, x: Int, y: Int,
z: Int, tileEntity: TileEntity): AnyRef = {
// NOOP
null
}
}
| TheTemportalist/Chalked | src/main/scala/com/temportalist/chalked/common/CommonProxy.scala | Scala | apache-2.0 | 678 |
package gitbucket.core.service
import gitbucket.core.GitBucketCoreModule
import gitbucket.core.util.{DatabaseConfig, FileUtil}
import gitbucket.core.util.SyntaxSugars._
import io.github.gitbucket.solidbase.Solidbase
import liquibase.database.core.H2Database
import liquibase.database.jvm.JdbcConnection
import gitbucket.core.model._
import gitbucket.core.model.Profile._
import gitbucket.core.model.Profile.profile._
import gitbucket.core.model.Profile.profile.blockingApi._
import org.apache.commons.io.FileUtils
import java.sql.DriverManager
import java.io.File
import scala.util.Random
trait ServiceSpecBase {
def withTestDB[A](action: (Session) => A): A = {
FileUtil.withTmpDir(new File(FileUtils.getTempDirectory(), Random.alphanumeric.take(10).mkString)) { dir =>
val (url, user, pass) = (DatabaseConfig.url(Some(dir.toString)), DatabaseConfig.user, DatabaseConfig.password)
org.h2.Driver.load()
using(DriverManager.getConnection(url, user, pass)) { conn =>
val solidbase = new Solidbase()
val db = new H2Database()
db.setConnection(new JdbcConnection(conn)) // TODO Remove setConnection in the future
solidbase.migrate(conn, Thread.currentThread.getContextClassLoader, db, GitBucketCoreModule)
}
Database.forURL(url, user, pass).withSession { session =>
action(session)
}
}
}
def generateNewAccount(name: String)(implicit s: Session): Account = {
AccountService.createAccount(name, name, name, s"${name}@example.com", false, None, None)
user(name)
}
def user(name: String)(implicit s: Session): Account = AccountService.getAccountByUserName(name).get
lazy val dummyService = new RepositoryService with AccountService with ActivityService with IssuesService
with PullRequestService with CommitsService with CommitStatusService with LabelsService with MilestonesService
with PrioritiesService with WebHookService with WebHookPullRequestService
with WebHookPullRequestReviewCommentService {}
def generateNewUserWithDBRepository(userName: String, repositoryName: String)(implicit s: Session): Account = {
val ac = AccountService.getAccountByUserName(userName).getOrElse(generateNewAccount(userName))
dummyService.insertRepository(repositoryName, userName, None, false)
ac
}
def generateNewIssue(userName: String, repositoryName: String, loginUser: String = "root")(
implicit s: Session
): Int = {
dummyService.insertIssue(
owner = userName,
repository = repositoryName,
loginUser = loginUser,
title = "issue title",
content = None,
assignedUserName = None,
milestoneId = None,
priorityId = None,
isPullRequest = true
)
}
def generateNewPullRequest(base: String, request: String, loginUser: String = null)(
implicit s: Session
): (Issue, PullRequest) = {
val Array(baseUserName, baseRepositoryName, baesBranch) = base.split("/")
val Array(requestUserName, requestRepositoryName, requestBranch) = request.split("/")
val issueId = generateNewIssue(baseUserName, baseRepositoryName, Option(loginUser).getOrElse(requestUserName))
dummyService.createPullRequest(
originUserName = baseUserName,
originRepositoryName = baseRepositoryName,
issueId = issueId,
originBranch = baesBranch,
requestUserName = requestUserName,
requestRepositoryName = requestRepositoryName,
requestBranch = requestBranch,
commitIdFrom = baesBranch,
commitIdTo = requestBranch
)
dummyService.getPullRequest(baseUserName, baseRepositoryName, issueId).get
}
}
| mann-ed/gitbucket | src/test/scala/gitbucket/core/service/ServiceSpecBase.scala | Scala | apache-2.0 | 3,626 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.pbj
import optimization.{PbjLoopDetection, PbjOptimizer}
import pbjdata._
import java.io.{
File => JFile,
BufferedInputStream => JBufferedInputStream,
ByteArrayInputStream => JByteArrayInputStream,
ByteArrayOutputStream => JByteArrayOutputStream,
FileInputStream => JFileInputStream,
FileOutputStream => JFileOutputStream,
InputStream => JInputStream,
OutputStream => JOutputStream}
import apparat.utils.IO._
import collection.mutable.ListBuffer
import apparat.utils.{IndentingPrintWriter, Dumpable}
object Pbj {
val loopDetection = new PbjLoopDetection(16)
def main(args: Array[String]): Unit = {
val pbj = fromFile(args(0))
println(pbj.toFragmentShader)
}
def fromByteArray(byteArray: Array[Byte]) = {
val pbj = new Pbj
pbj read byteArray
pbj
}
def fromFile(file: JFile): Pbj = {
val pbj = new Pbj
pbj read file
pbj
}
def fromFile(pathname: String): Pbj = fromFile(new JFile(pathname))
def fromInputStream(input: JInputStream) = {
val pbj = new Pbj
pbj read input
pbj
}
}
/**
* @author Joa Ebert
*/
class Pbj extends Dumpable {
var version = 1
var name = ""
var metadata = List.empty[PMeta]
var parameters: List[(PParam, List[PMeta])] = (OutCoord, List.empty[PMeta]) :: Nil
var textures = List.empty[PTexture]
var code = List.empty[POp]
def parametersAsArray = parameters.toArray
def read(file: JFile): Unit = using(new JBufferedInputStream(new JFileInputStream(file), 0x1000)) { read(_) }
def read(pathname: String): Unit = read(new JFile(pathname))
def read(input: JInputStream): Unit = using(new PbjInputStream(input)) { read(_) }
def read(data: Array[Byte]): Unit = using(new JByteArrayInputStream(data)) { read(_) }
def read(input: PbjInputStream): Unit = {
var metadataBuffer = List.empty[PMeta]
var parameterBuffer = List.empty[(PParam, ListBuffer[PMeta])]
var parameterMetadataBuffer = ListBuffer.empty[PMeta]
var textureBuffer = List.empty[PTexture]
var codeBuffer = List.empty[POp]
for(op <- input) op match {
case PKernelMetaData(value) => metadataBuffer = value :: metadataBuffer
case PParameterData(value) =>
parameterMetadataBuffer = ListBuffer.empty[PMeta]
parameterBuffer = (value -> parameterMetadataBuffer) :: parameterBuffer
case PParameterMetaData(value) => parameterMetadataBuffer += value
case PTextureData(value) => textureBuffer = value :: textureBuffer
case PKernelName(value) => name = value
case PVersionData(value) => version = value
case _ => codeBuffer = op :: codeBuffer
}
metadata = metadataBuffer.reverse
parameters = parameterBuffer.reverse map { x => x._1 -> x._2.toList }
textures = textureBuffer.reverse
code = codeBuffer.reverse
}
def write(file: JFile): Unit = using(new JFileOutputStream(file)) { write(_) }
def write(pathname: String): Unit = write(new JFile(pathname))
def write(output: JOutputStream): Unit = using(new PbjOutputStream(output)) { write(_) }
def write(output: PbjOutputStream): Unit = {
@inline def writeOp(value: POp) = output writeOp value
@inline def mapAndWrite[A, B <: POp](l: List[A], m: A => B) = l map m foreach writeOp
writeOp(PVersionData(version))
writeOp(PKernelName(name))
mapAndWrite(metadata, PKernelMetaData(_: PMeta))
for((p, m) <- parameters) {
writeOp(PParameterData(p))
mapAndWrite(m, PParameterMetaData(_: PMeta))
}
mapAndWrite(textures, PTextureData(_: PTexture))
code foreach writeOp
output.flush()
}
override def dump(writer: IndentingPrintWriter) = {
writer <= "Pbj:"
writer withIndent {
writer <= "Version: "+version
writer <= "Name: "+name
writer <= metadata.length+" metadata:"
writer <<< metadata
writer <= parameters.length+" parameter(s):"
writer <<< parameters
writer <= textures.length+" texture(s):"
writer <<< textures
writer <= "Code:"
writer <<< code
}
}
def toByteArray = {
val byteArrayOutputStream = new JByteArrayOutputStream()
using(byteArrayOutputStream) { write(_) }
byteArrayOutputStream.toByteArray
}
def toVertexShader = "void main(){gl_Position=ftransform();}"/*void main(){"+
((textures map { _.index } map { "gl_TexCoord[%1$d] = gl_MultiTexCoord%1$d;" format _ }).foldLeft("") { _ + _ } )+
"gl_Position = ftransform();}"*/
def toFragmentShader = {
val builder = new StringBuilder()
val outReg = (parameters map { _._1 } find { case POutParameter(_, _, _) => true; case _ => false } map { _.register }) getOrElse error("Could not find output parameter.")
var ints = Set.empty[Int]
var floats = Set.empty[Int]
var mat2 = Set.empty[Int]
var mat3 = Set.empty[Int]
var mat4 = Set.empty[Int]
@inline def add(r: PReg) = r match {
case PIntReg(index, _) => ints += index
case PFloatReg(index, swizzle) => swizzle match {
case PChannelM2x2 :: Nil => mat2 += index
case PChannelM3x3 :: Nil => mat3 += index
case PChannelM4x4 :: Nil => mat4 += index
case _ => floats += index
}
}
code foreach {
case PCopy(dst, src) => {
if(src.swizzle != Nil && src.swizzle.length == 1) {
src.swizzle.head match {
case PChannelM2x2 => mat2 += dst.index
case PChannelM3x3 => mat3 += dst.index
case PChannelM4x4 => mat4 += dst.index
case _ =>
add(dst)
add(src)
}
} else {
add(dst)
add(src)
}
}
case op: PLogical =>
add(op.dst)
add(op.src)
ints += 0
case op: PDstAndSrc =>
add(op.dst)
add(op.src)
case op: PSrc => add(op.src)
case op: PDst => add(op.dst)
case _ =>
}
@inline def write(value: String) = builder.append(value+"\n")
@inline def swizzleToString(swizzle: List[PChannel]) = {
if(swizzle.length == 0) "" else {
val result = (swizzle map { _ match {
case PChannelR => "x"
case PChannelG => "y"
case PChannelB => "z"
case PChannelA => "w"
case _ => ""
}}).foldLeft("") { _ + _ }
if(result.length != 0) "."+result else ""
}
}
@inline def regToString(reg: PReg) = reg match {
case PFloatReg(index, swizzle) => index match {
case x if x == outReg.index => "gl_FragColor"+swizzleToString(swizzle)
case y => swizzle match {
case PChannelM2x2 :: Nil => "m2"+y
case PChannelM3x3 :: Nil => "m3"+y
case PChannelM4x4 :: Nil => "m4"+y
case _ => "f"+y+swizzleToString(swizzle)
}
}
case PIntReg(index, swizzle) => "i"+index+swizzleToString(swizzle)
}
@inline def binop(dst: PReg, src: PReg, operator: String = "?"): Unit = {
write(regToString(dst)+"="+regToString(dst)+operator+regToString(src)+";")
}
@inline def unop(dst: PReg, src: PReg, operator: String = "?"): Unit = write(regToString(dst)+"="+operator+regToString(src)+";")
@inline def logical(dst: PReg, src: PReg, operator: String = "?"): Unit = write("i0.x=int("+regToString(dst)+operator+regToString(src)+");")
@inline def call2(dst: PReg, src: PReg, name: String = "?"): Unit = {
if(dst.swizzle.length > 1) {
name match {
//todo we need to cast here to the swizzle for older graphics cards...
case "dot" =>
write(regToString(dst)+"="+cast(dst).get+"("+name+"("+regToString(dst)+","+regToString(src)+"));")
case _ =>
write(regToString(dst)+"="+name+"("+regToString(dst)+","+regToString(src)+");")
}
} else {
write(regToString(dst)+"="+name+"("+regToString(dst)+","+regToString(src)+");")
}
}
@inline def call1(dst: PReg, src: PReg, name: String = "?"): Unit = write(regToString(dst)+"="+name+"("+regToString(src)+");")
@inline def visit(dst: PReg, src: PReg, operator: String = "?"): Unit = write(regToString(dst)+"="+regToString(dst)+operator+regToString(src)+";")
@inline def cast(reg: PReg): Option[String] = {
val swizzle = reg.swizzle
if(swizzle == Nil) return None
if(swizzle.length == 1) {
swizzle.head match {
case PChannelM2x2 | PChannelM3x3 | PChannelM4x4 => return None
case _ =>
}
}
val isFloat = reg match { case PFloatReg(_, _) => true; case PIntReg(_, _) => false }
Some(glslType(swizzle.length match {
case 1 => if(isFloat) PFloatType else PIntType
case 2 => if(isFloat) PFloat2Type else PInt2Type
case 3 => if(isFloat) PFloat3Type else PInt3Type
case 4 => if(isFloat) PFloat4Type else PInt4Type
case _ => error("Invalid swizzle "+swizzle)
}))
}
@inline def explicitCast(reg: PReg, value: String) = reg match {
case PIntReg(index, swizzle) =>
if(swizzle.isEmpty) "ivec4("+value+")"
else if(swizzle.length == 1) "int("+value+")"
else if(swizzle.length == 2) "int2("+value+")"
else if(swizzle.length == 3) "int3("+value+")"
else if(swizzle.length == 4) "int4("+value+")"
else error("Unexpected swizzle "+swizzle+".")
case PFloatReg(index, swizzle) =>
if(swizzle.isEmpty) "vec4("+value+")"
else if(swizzle.length == 1) {
swizzle.head match {
case PChannelM2x2 => "mat2("+value+")"
case PChannelM3x3 => "mat3("+value+")"
case PChannelM4x4 => "mat4("+value+")"
case _ => "float("+value+")"
}
}
else if(swizzle.length == 2) "vec2("+value+")"
else if(swizzle.length == 3) "vec3("+value+")"
else if(swizzle.length == 4) "vec4("+value+")"
else error("Unexpected swizzle "+swizzle+".")
}
@inline def glslType(`type`: PNumeric): String = `type` match {
case PFloatType => "float"
case PFloat2Type => "vec2"
case PFloat3Type => "vec3"
case PFloat4Type => "vec4"
case PFloat2x2Type => "mat2"
case PFloat3x3Type => "mat3"
case PFloat4x4Type => "mat4"
case PIntType => "int"
case PInt2Type => "ivec2"
case PInt3Type => "ivec3"
case PInt4Type => "ivec4"
case PBoolType => "bool"
case PBool2Type => "bvec2"
case PBool3Type => "bvec3"
case PBool4Type => "bvec4"
}
//write("#version 100")
write("#extension GL_ARB_texture_rectangle : enable")
write("uniform vec4 PB_OFFSET;")
val inputs = parameters map { _._1 } collect { case in: PInParameter if in.name != "_OutCoord" => in }
textures map { _.index } map { "uniform sampler2DRect tex%d;" format _ } foreach write
inputs map { p => "uniform "+glslType(p.`type`)+" "+p.name+";" } foreach write
write("void main(){")
ints map { "ivec4 i"+_+";" } foreach write
floats map { "vec4 f"+_+";" } foreach write
mat2 map { "mat2 m2"+_+";" } foreach write
mat3 map { "mat3 m3"+_+";" } foreach write
mat4 map { "mat4 m4"+_+";" } foreach write
write("f0.xy=gl_FragCoord.xy-PB_OFFSET.xy;")
write("f0.y=PB_OFFSET.z+PB_OFFSET.w*f0.y;")
inputs map { p => regToString(p.register)+"="+p.name+";" } foreach write
case class BeginLoop(n: Int)
case object EndLoop
def whoWantsToLoopForever_?(code: List[POp]): List[Any] = {
val detection = Pbj loopDetection code
var r: List[Any] = code
for((value, ranges) <- detection) {
for((rangeStart, rangeEnd) <- ranges) {
val n = (rangeEnd - rangeStart) / value.length
if(n > 8) {
r = r.take(rangeStart - 1) ::: List(BeginLoop(n)) ::: value ::: List(EndLoop) ::: r.drop(rangeEnd)
}
}
}
r
}
var n = whoWantsToLoopForever_?(code)
var r = List.empty[Any]
//
// We add a basic simplification step here. Nothing fancy.
//
//
// r.x = x
// r.y = y
// r.z = z -> r.xyz = vec3(x,y,z)
//
// r = x
// r = r op y -> r = x op y
//
// r = 1/x
// r = r * y -> r = y / x
//
while(n.nonEmpty) {
n match {
case Nil =>
/*case PLoadFloat(PFloatReg(i, PChannelR :: Nil), a) :: PLoadFloat(PFloatReg(j, PChannelG :: Nil), b) :: PLoadFloat(PFloatReg(k, PChannelB :: Nil), c) :: xs if i == j && j == k =>
r = "f"+i+".xyz"+"=vec3("+a+","+b+","+c+");" :: r
n = xs
case PLoadFloat(d0, a) :: PAdd(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+a+"+"+regToString(s1)+";") :: r
n = xs
case PLoadFloat(d0, a) :: PSubtract(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+a+"-"+regToString(s1)+";") :: r
n = xs
case PLoadFloat(d0, a) :: PMultiply(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+a+"*"+regToString(s1)+";") :: r
n = xs
case PLoadFloat(d0, a) :: PDivide(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+a+"/"+regToString(s1)+";") :: r
n = xs
case PCopy(d0, s0) :: PAdd(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+regToString(s0)+"+"+regToString(s1)+";") :: r
n = xs
case PCopy(d0, s0) :: PSubtract(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+regToString(s0)+"-"+regToString(s1)+";") :: r
n = xs
case PCopy(d0, s0) :: PMultiply(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+regToString(s0)+"*"+regToString(s1)+";") :: r
n = xs
case PCopy(d0, s0) :: PDivide(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+regToString(s0)+"/"+regToString(s1)+";") :: r
n = xs
case PReciprocal(d0, s0) :: PMultiply(d1, s1) :: xs if d0 == d1 =>
r = (regToString(d1)+"="+regToString(s1)+"/"+regToString(s0)+";") :: r
n = xs*/
case x :: xs =>
r = x :: r
n = xs
}
}
r.reverse foreach {
case BeginLoop(n) => write("for(int ii=0;ii<"+n+";++ii){")
case EndLoop => write("}")
case x: String => write(x)
case PNop() =>
case PAdd(dst, src) => binop(dst, src, "+")
case PSubtract(dst, src) => binop(dst, src, "-")
case PMultiply(dst, src) => binop(dst, src, "*")
case PReciprocal(dst, src) => write(regToString(dst)+"=1.0/"+regToString(src)+";")
case PDivide(dst, src) => binop(dst, src, "/")
case PAtan2(dst, src) => call2(dst, src, "atan")
case PPow(dst, src) => call2(dst, src, "pow")
case PMod(dst, src) => call2(dst, src, "mod")
case PMin(dst, src) => call2(dst, src, "min")
case PMax(dst, src) => call2(dst, src, "max")
case PStep(dst, src) => call2(dst, src, "step")
case PSin(dst, src) => call1(dst, src, "sin")
case PCos(dst, src) => call1(dst, src, "cos")
case PTan(dst, src) => call1(dst, src, "tan")
case PASin(dst, src) => call1(dst, src, "asin")
case PACos(dst, src) => call1(dst, src, "acos")
case PATan(dst, src) => call1(dst, src, "atan")
case PExp(dst, src) => call1(dst, src, "exp")
case PExp2(dst, src) => call1(dst, src, "exp2")
case PLog(dst, src) => call1(dst, src, "log")
case PLog2(dst, src) => call1(dst, src, "log2")
case PSqrt(dst, src) => call1(dst, src, "sqrt")
case PRSqrt(dst, src) => write(regToString(dst)+"=1.0/sqrt("+regToString(src)+");")
case PAbs(dst, src) => call1(dst, src, "abs")
case PSign(dst, src) => call1(dst, src, "sign")
case PFloor(dst, src) => call1(dst, src, "floor")
case PCeil(dst, src) => call1(dst, src, "ceil")
case PFract(dst, src) => call1(dst, src, "fract")
case PCopy(dst, src) => src.swizzle match {
case PChannelM2x2 :: Nil => write("m2"+dst.index+"=mat2(f"+src.index+");")
case PChannelM3x3 :: Nil => write("m3"+dst.index+"=mat3(vec3(f"+src.index+"), vec3(f"+(src.index+1)+"), vec3(f"+(src.index+2)+"));")
case PChannelM4x4 :: Nil => write("m4"+dst.index+"=mat4(f"+src.index+");")
case _ => write(regToString(dst)+"="+regToString(src)+";")
}
case PFloatToInt(dst, src) => call1(dst, src, "int")
case PIntToFloat(dst, src) => call1(dst, src, "float")
case PMatrixMatrixMultiply(dst, src) => binop(dst, src, "*")
case PVectorMatrixMultiply(dst, src) => binop(dst, src, "*")
case PMatrixVectorMultiply(dst, src) => binop(dst, src, "*")
case PNormalize(dst, src) => call1(dst, src, "normalize")
case PLength(dst, src) => call1(dst, src, "length")
case PDistance(dst, src) => call1(dst, src, "distance")
case PDotProduct(dst, src) => call2(dst, src, "dot")
case PCrossProduct(dst, src) => call2(dst, src, "cross")
case PEqual(dst, src) => logical(dst, src, "==")
case PNotEqual(dst, src) => logical(dst, src, "!=")
case PLessThan(dst, src) => logical(dst, src, "<")
case PLessThanEqual(dst, src) => logical(dst, src, "<=")
case PLogicalNot(dst, src) => unop(dst, src, "~")
case PLogicalAnd(dst, src) => binop(dst, src, "&")
case PLogicalOr(dst, src) => binop(dst, src, "|")
case PLogicalXor(dst, src) => binop(dst, src, "^")
case PSampleNearest(dst, src, texture: Int) => write(regToString(dst)+"=texture2DRect(tex"+texture+","+regToString(src)+");")//+"/texs"+texture+");")
case PSampleBilinear(dst, src, texture: Int) => write(regToString(dst)+"=texture2DRect(tex"+texture+","+regToString(src)+");")//+"/texs"+texture+");")
case PLoadInt(dst: PReg, value: Int) => write(regToString(dst)+"="+value.toString+";")
case PLoadFloat(dst: PReg, value: Float) => {
dst match {
case floatReg: PFloatReg => write(regToString(floatReg)+"="+explicitCast(floatReg, value.toString)+";")
case intReg: PIntReg => write(regToString(intReg)+"="+explicitCast(intReg, value.toString)+";")
}
}
case PSelect(dst, src, src0, src1) => write(regToString(dst)+"=bool("+regToString(src)+")?"+regToString(src0)+":"+regToString(src1)+";")
case PIf(condition) => write("if(bool("+regToString(condition)+")){")
case PElse() => write("}else{")
case PEndif() => write("}")
case PFloatToBool(dst, src) => call1(dst, src, "bool")
case PBoolToFloat(dst, src) => call1(dst, src, "float")
case PIntToBool(dst, src) => call1(dst, src, "bool")
case PBoolToInt(dst, src) => call1(dst, src, "int")
case PVectorEqual(dst, src) => logical(dst, src, "==")
case PVectorNotEqual(dst, src) => logical(dst, src, "!=")
case PAny(dst, src) => visit(dst, src)
case PAll(dst, src) => visit(dst, src)
case PKernelMetaData(_) =>
case PParameterData(_) =>
case PParameterMetaData(_) =>
case PTextureData(_) =>
case PKernelName(_) =>
case PVersionData(_) =>
}
write("}")
builder.toString
}
}
| joa/apparat | apparat-core/src/main/scala/apparat/pbj/Pbj.scala | Scala | lgpl-2.1 | 18,586 |
/**
* Copyright (C) 2017 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cake.greeter.component
import java.nio.file.Path
import org.apache.commons.configuration.PropertiesConfiguration
import resource.managed
import scala.io.Source
/**
* The GeneralProperties contains the application's version number and properties
*/
trait PropertiesComponent {
val properties: GeneralProperties
trait GeneralProperties {
val version: String
val properties: PropertiesConfiguration
}
object GeneralProperties {
def apply(home: Path): GeneralProperties = new GeneralProperties {
override val version: String = managed(Source.fromFile(home.resolve("version").toFile)).acquireAndGet(_.mkString)
override val properties = new PropertiesConfiguration(home.resolve("cfg/application.properties").toFile)
}
}
}
| rvanheest/easy-greeter | src/main/scala/cake/greeter/component/PropertiesComponent.scala | Scala | apache-2.0 | 1,429 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.workspace.scripts
import de.fuberlin.wiwiss.silk.workspace.scripts.RunResult.Run
import de.fuberlin.wiwiss.silk.learning.LearningResult
/**
* Holds the result of a sequence of learning runs.
*/
case class RunResult(runs: Seq[Run]) {
override def toString = runs.mkString("\\n")
}
object RunResult {
/**
* Holds the results of a single learning run.
*/
case class Run(results: Seq[LearningResult]) {
override def toString = results.mkString(", ")
/**
* Compute the number of iterations needed to reach a specific F-measure.
*/
def iterations(fMeasure: Double): Int = {
results.indexWhere(_.validationResult.fMeasure >= fMeasure) match {
case -1 => 50//throw new IllegalArgumentException("Target F-measure " + fMeasure + " never reached.")
case i => i
}
}
}
} | fusepoolP3/p3-silk | silk-workspace/src/main/scala/de/fuberlin/wiwiss/silk/workspace/scripts/RunResult.scala | Scala | apache-2.0 | 1,426 |
package edu.cmu.dynet
/** Builder method for creating GRUs, as in the C++ code. For its public methods see
* [[edu.cmu.dynet.RnnBuilder]].
*/
class GruBuilder private[dynet](private[dynet] val builder: internal.GRUBuilder)
extends RnnBuilder(builder) {
/** Create a new, empty GruBuilder. */
def this() { this(new internal.GRUBuilder()) }
/** Create a GruBuilder with the specified parameters.
*/
def this(layers: Long, inputDim: Long, hiddenDim: Long, model: Model) {
this(new internal.GRUBuilder(layers, inputDim, hiddenDim, model.model))
}
}
| cherryc/dynet | contrib/swig/src/main/scala/edu/cmu/dynet/GRUBuilder.scala | Scala | apache-2.0 | 575 |
/*
* Copyright (c) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.users
import java.sql.Connection
import java.time.ZonedDateTime
import java.util.UUID
import anorm.SqlParser.get
import anorm.{NamedParameter, SQL, ~}
import utils.ClassnameLogger
/**
* User Files cross ref
*/
/**
*
* @param uuid
* @param users_accountsubject
* @param originalfilename the name of the file when uploaded
* @param linkreference the full Google Cloud Bucket linkref
* @param laststatustoken
* @param laststatuschange
*/
final case class UserFile(uuid: UUID,
users_accountsubject: String,
originalfilename: String,
linkreference: String,
laststatustoken: String,
laststatuschange: ZonedDateTime) extends ClassnameLogger
object UserFile extends ClassnameLogger {
/*
CREATE TABLE userfiles (
uuid varchar(255) NOT NULL,
users_accountsubject varchar(255) REFERENCES users(accountsubject),
originalfilename TEXT NOT NULL,
linkreference TEXT NOT NULL,
laststatustoken varchar(255) NOT NULL,
laststatuschange TIMESTAMP WITH TIME ZONE NOT NULL,
PRIMARY KEY (uuid)
);
*/
val userFilesParser = {
get[String]("uuid") ~
get[String]("users_accountsubject") ~
get[String]("originalfilename") ~
get[String]("linkreference") ~
get[String]("laststatustoken") ~
get[ZonedDateTime]("laststatuschange") map {
case uuid ~ users_accountsubject ~ originalfilename ~ linkreference ~ laststatustoken ~ laststatuschange =>
UserFile(UUID.fromString(uuid), users_accountsubject, originalfilename, linkreference, laststatustoken, laststatuschange)
}
}
def getAllUserFiles()(implicit connection: Connection): Seq[UserFile] = {
SQL(s"select * from $table_userfiles").as(userFilesParser *)
}
def findUserFileByAccountSubject(users_accountsubject: String)(implicit connection: Connection): Seq[UserFile] = {
SQL(s"select * from $table_userfiles where users_accountsubject = {users_accountsubject}").on(
'users_accountsubject -> users_accountsubject
).as(userFilesParser *)
}
def findUserFileByUuid(uuid: UUID)(implicit connection: Connection): Option[UserFile] = {
SQL(s"select * from $table_userfiles where uuid = {uuid}").on(
'uuid -> uuid.toString
).as(userFilesParser.singleOpt)
}
def findUserFilesByLink(link: String)(implicit connection: Connection): Seq[UserFile] = {
SQL(s"select * from $table_userfiles where linkreference LIKE '%{link}%' OR originalfilename LIKE '%{link}%'").on(
'link -> link
).as(userFilesParser *)
}
def createUserFile(userFile: UserFile)(implicit connection: Connection): Option[UserFile] = {
val nps = Seq[NamedParameter](// Tuples as NamedParameter
"uuid" -> userFile.uuid.toString,
"users_accountsubject" -> userFile.users_accountsubject,
"originalfilename" -> userFile.originalfilename,
"linkreference" -> userFile.linkreference,
"laststatustoken" -> userFile.laststatustoken,
"laststatuschange" -> userFile.laststatuschange)
val rowCount = SQL(
s"""
insert into $table_userfiles values (
{uuid}, {users_accountsubject}, {originalfilename}, {linkreference}, {laststatustoken}, {laststatuschange}
)
""").on(nps: _*).executeUpdate()
rowCount match {
case 1 => Some(userFile)
case _ => None
}
}
def updateUserFile(userFile: UserFile)(implicit connection: Connection): Option[UserFile] = ???
def deleteUserFile(userFile: UserFile)(implicit connection: Connection): Boolean = {
deleteUserFile(userFile.uuid)
}
def deleteUserFile(uuid: UUID)(implicit connection: Connection): Boolean = {
val rowCount = SQL(s"delete from $table_userfiles where uuid = {uuid}").on(
'uuid -> uuid.toString
).executeUpdate()
rowCount match {
case 1 => true
case _ => false
}
}
} | ZGIS/smart-portal-backend | app/models/users/UserFile.scala | Scala | apache-2.0 | 4,803 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import com.esotericsoftware.kryo.Serializer
import org.apache.flink.annotation.{Internal, Public, PublicEvolving}
import org.apache.flink.api.common.io.{FileInputFormat, FilePathFilter, InputFormat}
import org.apache.flink.api.common.restartstrategy.RestartStrategies.RestartStrategyConfiguration
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.api.java.typeutils.runtime.kryo.KryoSerializer
import org.apache.flink.api.scala.ClosureCleaner
import org.apache.flink.configuration.{Configuration, ReadableConfig}
import org.apache.flink.core.execution.{JobClient, JobListener}
import org.apache.flink.runtime.state.AbstractStateBackend
import org.apache.flink.runtime.state.StateBackend
import org.apache.flink.streaming.api.environment.{StreamExecutionEnvironment => JavaEnv}
import org.apache.flink.streaming.api.functions.source._
import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext
import org.apache.flink.streaming.api.{CheckpointingMode, TimeCharacteristic}
import org.apache.flink.util.SplittableIterator
import scala.collection.JavaConverters._
import _root_.scala.language.implicitConversions
@Public
class StreamExecutionEnvironment(javaEnv: JavaEnv) {
/**
* @return the wrapped Java environment
*/
def getJavaEnv: JavaEnv = javaEnv
/**
* Gets the config object.
*/
def getConfig = javaEnv.getConfig
/**
* Gets cache files.
*/
def getCachedFiles = javaEnv.getCachedFiles
/**
* Gets the config JobListeners.
*/
@PublicEvolving
def getJobListeners = javaEnv.getJobListeners
/**
* Sets the parallelism for operations executed through this environment.
* Setting a parallelism of x here will cause all operators (such as join, map, reduce) to run
* with x parallel instances. This value can be overridden by specific operations using
* [[DataStream#setParallelism(int)]].
*/
def setParallelism(parallelism: Int): Unit = {
javaEnv.setParallelism(parallelism)
}
/**
* Sets the maximum degree of parallelism defined for the program.
* The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
* defines the number of key groups used for partitioned state.
**/
def setMaxParallelism(maxParallelism: Int): Unit = {
javaEnv.setMaxParallelism(maxParallelism)
}
/**
* Returns the default parallelism for this execution environment. Note that this
* value can be overridden by individual operations using [[DataStream#setParallelism(int)]]
*/
def getParallelism = javaEnv.getParallelism
/**
* Returns the maximum degree of parallelism defined for the program.
*
* The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
* defines the number of key groups used for partitioned state.
*
*/
def getMaxParallelism = javaEnv.getMaxParallelism
/**
* Sets the maximum time frequency (milliseconds) for the flushing of the
* output buffers. By default the output buffers flush frequently to provide
* low latency and to aid smooth developer experience. Setting the parameter
* can result in three logical modes:
*
* <ul>
* <li>A positive integer triggers flushing periodically by that integer</li>
* <li>0 triggers flushing after every record thus minimizing latency</li>
* <li>-1 triggers flushing only when the output buffer is full thus maximizing throughput</li>
* </ul>
*/
def setBufferTimeout(timeoutMillis: Long): StreamExecutionEnvironment = {
javaEnv.setBufferTimeout(timeoutMillis)
this
}
/**
* Gets the default buffer timeout set for this environment
*/
def getBufferTimeout = javaEnv.getBufferTimeout
/**
* Disables operator chaining for streaming operators. Operator chaining
* allows non-shuffle operations to be co-located in the same thread fully
* avoiding serialization and de-serialization.
*
*/
@PublicEvolving
def disableOperatorChaining(): StreamExecutionEnvironment = {
javaEnv.disableOperatorChaining()
this
}
// ------------------------------------------------------------------------
// Checkpointing Settings
// ------------------------------------------------------------------------
/**
* Gets the checkpoint config, which defines values like checkpoint interval, delay between
* checkpoints, etc.
*/
def getCheckpointConfig = javaEnv.getCheckpointConfig()
/**
* Enables checkpointing for the streaming job. The distributed state of the streaming
* dataflow will be periodically snapshotted. In case of a failure, the streaming
* dataflow will be restarted from the latest completed checkpoint.
*
* The job draws checkpoints periodically, in the given interval. The state will be
* stored in the configured state backend.
*
* NOTE: Checkpointing iterative streaming dataflows in not properly supported at
* the moment. If the "force" parameter is set to true, the system will execute the
* job nonetheless.
*
* @param interval
* Time interval between state checkpoints in millis.
* @param mode
* The checkpointing mode, selecting between "exactly once" and "at least once" guarantees.
* @param force
* If true checkpointing will be enabled for iterative jobs as well.
*/
@deprecated
@PublicEvolving
def enableCheckpointing(interval : Long,
mode: CheckpointingMode,
force: Boolean) : StreamExecutionEnvironment = {
javaEnv.enableCheckpointing(interval, mode, force)
this
}
/**
* Enables checkpointing for the streaming job. The distributed state of the streaming
* dataflow will be periodically snapshotted. In case of a failure, the streaming
* dataflow will be restarted from the latest completed checkpoint.
*
* The job draws checkpoints periodically, in the given interval. The system uses the
* given [[CheckpointingMode]] for the checkpointing ("exactly once" vs "at least once").
* The state will be stored in the configured state backend.
*
* NOTE: Checkpointing iterative streaming dataflows in not properly supported at
* the moment. For that reason, iterative jobs will not be started if used
* with enabled checkpointing. To override this mechanism, use the
* [[enableCheckpointing(long, CheckpointingMode, boolean)]] method.
*
* @param interval
* Time interval between state checkpoints in milliseconds.
* @param mode
* The checkpointing mode, selecting between "exactly once" and "at least once" guarantees.
*/
def enableCheckpointing(interval : Long,
mode: CheckpointingMode) : StreamExecutionEnvironment = {
javaEnv.enableCheckpointing(interval, mode)
this
}
/**
* Enables checkpointing for the streaming job. The distributed state of the streaming
* dataflow will be periodically snapshotted. In case of a failure, the streaming
* dataflow will be restarted from the latest completed checkpoint.
*
* The job draws checkpoints periodically, in the given interval. The program will use
* [[CheckpointingMode.EXACTLY_ONCE]] mode. The state will be stored in the
* configured state backend.
*
* NOTE: Checkpointing iterative streaming dataflows in not properly supported at
* the moment. For that reason, iterative jobs will not be started if used
* with enabled checkpointing. To override this mechanism, use the
* [[enableCheckpointing(long, CheckpointingMode, boolean)]] method.
*
* @param interval
* Time interval between state checkpoints in milliseconds.
*/
def enableCheckpointing(interval : Long) : StreamExecutionEnvironment = {
enableCheckpointing(interval, CheckpointingMode.EXACTLY_ONCE)
}
/**
* Method for enabling fault-tolerance. Activates monitoring and backup of streaming
* operator states. Time interval between state checkpoints is specified in in millis.
*
* Setting this option assumes that the job is used in production and thus if not stated
* explicitly otherwise with calling the [[setRestartStrategy]] method in case of
* failure the job will be resubmitted to the cluster indefinitely.
*/
@deprecated
@PublicEvolving
def enableCheckpointing() : StreamExecutionEnvironment = {
javaEnv.enableCheckpointing()
this
}
def getCheckpointingMode = javaEnv.getCheckpointingMode()
/**
* Sets the state backend that describes how to store and checkpoint operator state. It defines
* both which data structures hold state during execution (for example hash tables, RockDB,
* or other data stores) as well as where checkpointed data will be persisted.
*
* State managed by the state backend includes both keyed state that is accessible on
* [[org.apache.flink.streaming.api.datastream.KeyedStream keyed streams]], as well as
* state maintained directly by the user code that implements
* [[org.apache.flink.streaming.api.checkpoint.CheckpointedFunction CheckpointedFunction]].
*
* The [[org.apache.flink.runtime.state.memory.MemoryStateBackend]], for example,
* maintains the state in heap memory, as objects. It is lightweight without extra dependencies,
* but can checkpoint only small states (some counters).
*
* In contrast, the [[org.apache.flink.runtime.state.filesystem.FsStateBackend]]
* stores checkpoints of the state (also maintained as heap objects) in files.
* When using a replicated file system (like HDFS, S3, MapR FS, Alluxio, etc) this will guarantee
* that state is not lost upon failures of individual nodes and that streaming program can be
* executed highly available and strongly consistent.
*/
@PublicEvolving
def setStateBackend(backend: StateBackend): StreamExecutionEnvironment = {
javaEnv.setStateBackend(backend)
this
}
/**
* @deprecated Use [[StreamExecutionEnvironment.setStateBackend(StateBackend)]] instead.
*/
@Deprecated
@PublicEvolving
def setStateBackend(backend: AbstractStateBackend): StreamExecutionEnvironment = {
setStateBackend(backend.asInstanceOf[StateBackend])
}
/**
* Returns the state backend that defines how to store and checkpoint state.
*/
@PublicEvolving
def getStateBackend: StateBackend = javaEnv.getStateBackend()
/**
* Sets the restart strategy configuration. The configuration specifies which restart strategy
* will be used for the execution graph in case of a restart.
*
* @param restartStrategyConfiguration Restart strategy configuration to be set
*/
@PublicEvolving
def setRestartStrategy(restartStrategyConfiguration: RestartStrategyConfiguration): Unit = {
javaEnv.setRestartStrategy(restartStrategyConfiguration)
}
/**
* Returns the specified restart strategy configuration.
*
* @return The restart strategy configuration to be used
*/
@PublicEvolving
def getRestartStrategy: RestartStrategyConfiguration = {
javaEnv.getRestartStrategy()
}
/**
* Sets the number of times that failed tasks are re-executed. A value of zero
* effectively disables fault tolerance. A value of "-1" indicates that the system
* default value (as defined in the configuration) should be used.
*
* @deprecated This method will be replaced by [[setRestartStrategy()]]. The
* FixedDelayRestartStrategyConfiguration contains the number of execution retries.
*/
@PublicEvolving
def setNumberOfExecutionRetries(numRetries: Int): Unit = {
javaEnv.setNumberOfExecutionRetries(numRetries)
}
/**
* Gets the number of times the system will try to re-execute failed tasks. A value
* of "-1" indicates that the system default value (as defined in the configuration)
* should be used.
*
* @deprecated This method will be replaced by [[getRestartStrategy]]. The
* FixedDelayRestartStrategyConfiguration contains the number of execution retries.
*/
@PublicEvolving
def getNumberOfExecutionRetries = javaEnv.getNumberOfExecutionRetries
// --------------------------------------------------------------------------------------------
// Registry for types and serializers
// --------------------------------------------------------------------------------------------
/**
* Adds a new Kryo default serializer to the Runtime.
* <p/>
* Note that the serializer instance must be serializable (as defined by
* java.io.Serializable), because it may be distributed to the worker nodes
* by java serialization.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializer
* The serializer to use.
*/
def addDefaultKryoSerializer[T <: Serializer[_] with Serializable](
`type`: Class[_],
serializer: T)
: Unit = {
javaEnv.addDefaultKryoSerializer(`type`, serializer)
}
/**
* Adds a new Kryo default serializer to the Runtime.
*
* @param type
* The class of the types serialized with the given serializer.
* @param serializerClass
* The class of the serializer to use.
*/
def addDefaultKryoSerializer(`type`: Class[_], serializerClass: Class[_ <: Serializer[_]]) {
javaEnv.addDefaultKryoSerializer(`type`, serializerClass)
}
/**
* Registers the given type with the serializer at the [[KryoSerializer]].
*
* Note that the serializer instance must be serializable (as defined by java.io.Serializable),
* because it may be distributed to the worker nodes by java serialization.
*/
def registerTypeWithKryoSerializer[T <: Serializer[_] with Serializable](
clazz: Class[_],
serializer: T)
: Unit = {
javaEnv.registerTypeWithKryoSerializer(clazz, serializer)
}
/**
* Registers the given type with the serializer at the [[KryoSerializer]].
*/
def registerTypeWithKryoSerializer(clazz: Class[_], serializer: Class[_ <: Serializer[_]]) {
javaEnv.registerTypeWithKryoSerializer(clazz, serializer)
}
/**
* Registers the given type with the serialization stack. If the type is eventually
* serialized as a POJO, then the type is registered with the POJO serializer. If the
* type ends up being serialized with Kryo, then it will be registered at Kryo to make
* sure that only tags are written.
*
*/
def registerType(typeClass: Class[_]) {
javaEnv.registerType(typeClass)
}
// --------------------------------------------------------------------------------------------
// Time characteristic
// --------------------------------------------------------------------------------------------
/**
* Sets the time characteristic for all streams create from this environment, e.g., processing
* time, event time, or ingestion time.
*
* If you set the characteristic to IngestionTime of EventTime this will set a default
* watermark update interval of 200 ms. If this is not applicable for your application
* you should change it using
* [[org.apache.flink.api.common.ExecutionConfig#setAutoWatermarkInterval(long)]]
*
* @param characteristic The time characteristic.
*/
@PublicEvolving
def setStreamTimeCharacteristic(characteristic: TimeCharacteristic) : Unit = {
javaEnv.setStreamTimeCharacteristic(characteristic)
}
/**
* Gets the time characteristic/
*
* @see #setStreamTimeCharacteristic
* @return The time characteristic.
*/
@PublicEvolving
def getStreamTimeCharacteristic = javaEnv.getStreamTimeCharacteristic()
/**
* Sets all relevant options contained in the [[ReadableConfig]] such as e.g.
* [[org.apache.flink.streaming.api.environment.StreamPipelineOptions#TIME_CHARACTERISTIC]].
* It will reconfigure [[StreamExecutionEnvironment]],
* [[org.apache.flink.api.common.ExecutionConfig]] and
* [[org.apache.flink.streaming.api.environment.CheckpointConfig]].
*
* It will change the value of a setting only if a corresponding option was set in the
* `configuration`. If a key is not present, the current value of a field will remain
* untouched.
*
* @param configuration a configuration to read the values from
* @param classLoader a class loader to use when loading classes
*/
@PublicEvolving
def configure(configuration: ReadableConfig, classLoader: ClassLoader): Unit = {
javaEnv.configure(configuration, classLoader)
}
// --------------------------------------------------------------------------------------------
// Data stream creations
// --------------------------------------------------------------------------------------------
/**
* Creates a new DataStream that contains a sequence of numbers. This source is a parallel source.
* If you manually set the parallelism to `1` the emitted elements are in order.
*/
def generateSequence(from: Long, to: Long): DataStream[Long] = {
new DataStream[java.lang.Long](javaEnv.generateSequence(from, to))
.asInstanceOf[DataStream[Long]]
}
/**
* Creates a DataStream that contains the given elements. The elements must all be of the
* same type.
*
* Note that this operation will result in a non-parallel data source, i.e. a data source with
* a parallelism of one.
*/
def fromElements[T: TypeInformation](data: T*): DataStream[T] = {
fromCollection(data)
}
/**
* Creates a DataStream from the given non-empty [[Seq]]. The elements need to be serializable
* because the framework may move the elements into the cluster if needed.
*
* Note that this operation will result in a non-parallel data source, i.e. a data source with
* a parallelism of one.
*/
def fromCollection[T: TypeInformation](data: Seq[T]): DataStream[T] = {
require(data != null, "Data must not be null.")
val typeInfo = implicitly[TypeInformation[T]]
val collection = scala.collection.JavaConversions.asJavaCollection(data)
asScalaStream(javaEnv.fromCollection(collection, typeInfo))
}
/**
* Creates a DataStream from the given [[Iterator]].
*
* Note that this operation will result in a non-parallel data source, i.e. a data source with
* a parallelism of one.
*/
def fromCollection[T: TypeInformation] (data: Iterator[T]): DataStream[T] = {
val typeInfo = implicitly[TypeInformation[T]]
asScalaStream(javaEnv.fromCollection(data.asJava, typeInfo))
}
/**
* Creates a DataStream from the given [[SplittableIterator]].
*/
def fromParallelCollection[T: TypeInformation] (data: SplittableIterator[T]):
DataStream[T] = {
val typeInfo = implicitly[TypeInformation[T]]
asScalaStream(javaEnv.fromParallelCollection(data, typeInfo))
}
/**
* Creates a DataStream that represents the Strings produced by reading the
* given file line wise. The file will be read with the system's default
* character set.
*/
def readTextFile(filePath: String): DataStream[String] =
asScalaStream(javaEnv.readTextFile(filePath))
/**
* Creates a data stream that represents the Strings produced by reading the given file
* line wise. The character set with the given name will be used to read the files.
*/
def readTextFile(filePath: String, charsetName: String): DataStream[String] =
asScalaStream(javaEnv.readTextFile(filePath, charsetName))
/**
* Reads the given file with the given input format. The file path should be passed
* as a URI (e.g., "file:///some/local/file" or "hdfs://host:port/file/path").
*/
def readFile[T: TypeInformation](inputFormat: FileInputFormat[T], filePath: String):
DataStream[T] =
asScalaStream(javaEnv.readFile(inputFormat, filePath))
/**
* Creates a DataStream that contains the contents of file created while
* system watches the given path. The file will be read with the system's
* default character set. The user can check the monitoring interval in milliseconds,
* and the way file modifications are handled. By default it checks for only new files
* every 100 milliseconds.
*
*/
@Deprecated
def readFileStream(StreamPath: String, intervalMillis: Long = 100,
watchType: FileMonitoringFunction.WatchType =
FileMonitoringFunction.WatchType.ONLY_NEW_FILES): DataStream[String] =
asScalaStream(javaEnv.readFileStream(StreamPath, intervalMillis, watchType))
/**
* Reads the contents of the user-specified path based on the given [[FileInputFormat]].
* Depending on the provided [[FileProcessingMode]].
*
* @param inputFormat
* The input format used to create the data stream
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path")
* @param watchType
* The mode in which the source should operate, i.e. monitor path and react
* to new data, or process once and exit
* @param interval
* In the case of periodic path monitoring, this specifies the interval (in millis)
* between consecutive path scans
* @param filter
* The files to be excluded from the processing
* @return The data stream that represents the data read from the given file
* @deprecated Use [[FileInputFormat#setFilesFilter(FilePathFilter)]] to set a filter and
* [[StreamExecutionEnvironment#readFile(FileInputFormat, String, FileProcessingMode, long)]]
*/
@PublicEvolving
@Deprecated
def readFile[T: TypeInformation](
inputFormat: FileInputFormat[T],
filePath: String,
watchType: FileProcessingMode,
interval: Long,
filter: FilePathFilter): DataStream[T] = {
asScalaStream(javaEnv.readFile(inputFormat, filePath, watchType, interval, filter))
}
/**
* Reads the contents of the user-specified path based on the given [[FileInputFormat]].
* Depending on the provided [[FileProcessingMode]], the source
* may periodically monitor (every `interval` ms) the path for new data
* ([[FileProcessingMode.PROCESS_CONTINUOUSLY]]), or process
* once the data currently in the path and exit
* ([[FileProcessingMode.PROCESS_ONCE]]). In addition,
* if the path contains files not to be processed, the user can specify a custom
* [[FilePathFilter]]. As a default implementation you can use
* [[FilePathFilter.createDefaultFilter()]].
*
* ** NOTES ON CHECKPOINTING: ** If the `watchType` is set to
* [[FileProcessingMode#PROCESS_ONCE]], the source monitors the path ** once **,
* creates the [[org.apache.flink.core.fs.FileInputSplit FileInputSplits]]
* to be processed, forwards them to the downstream
* [[ContinuousFileReaderOperator readers]] to read the actual data,
* and exits, without waiting for the readers to finish reading. This
* implies that no more checkpoint barriers are going to be forwarded
* after the source exits, thus having no checkpoints after that point.
*
* @param inputFormat
* The input format used to create the data stream
* @param filePath
* The path of the file, as a URI (e.g., "file:///some/local/file" or
* "hdfs://host:port/file/path")
* @param watchType
* The mode in which the source should operate, i.e. monitor path and react
* to new data, or process once and exit
* @param interval
* In the case of periodic path monitoring, this specifies the interval (in millis)
* between consecutive path scans
* @return The data stream that represents the data read from the given file
*/
@PublicEvolving
def readFile[T: TypeInformation](
inputFormat: FileInputFormat[T],
filePath: String,
watchType: FileProcessingMode,
interval: Long): DataStream[T] = {
val typeInfo = implicitly[TypeInformation[T]]
asScalaStream(javaEnv.readFile(inputFormat, filePath, watchType, interval, typeInfo))
}
/**
* Creates a new DataStream that contains the strings received infinitely
* from socket. Received strings are decoded by the system's default
* character set. The maximum retry interval is specified in seconds, in case
* of temporary service outage reconnection is initiated every second.
*/
@PublicEvolving
def socketTextStream(hostname: String, port: Int, delimiter: Char = '\n', maxRetry: Long = 0):
DataStream[String] =
asScalaStream(javaEnv.socketTextStream(hostname, port))
/**
* Generic method to create an input data stream with a specific input format.
* Since all data streams need specific information about their types, this method needs to
* determine the type of the data produced by the input format. It will attempt to determine the
* data type by reflection, unless the input format implements the ResultTypeQueryable interface.
*/
@PublicEvolving
def createInput[T: TypeInformation](inputFormat: InputFormat[T, _]): DataStream[T] =
if (inputFormat.isInstanceOf[ResultTypeQueryable[_]]) {
asScalaStream(javaEnv.createInput(inputFormat))
} else {
asScalaStream(javaEnv.createInput(inputFormat, implicitly[TypeInformation[T]]))
}
/**
* Create a DataStream using a user defined source function for arbitrary
* source functionality. By default sources have a parallelism of 1.
* To enable parallel execution, the user defined source should implement
* ParallelSourceFunction or extend RichParallelSourceFunction.
* In these cases the resulting source will have the parallelism of the environment.
* To change this afterwards call DataStreamSource.setParallelism(int)
*
*/
def addSource[T: TypeInformation](function: SourceFunction[T]): DataStream[T] = {
require(function != null, "Function must not be null.")
val cleanFun = scalaClean(function)
val typeInfo = implicitly[TypeInformation[T]]
asScalaStream(javaEnv.addSource(cleanFun, typeInfo))
}
/**
* Create a DataStream using a user defined source function for arbitrary
* source functionality.
*/
def addSource[T: TypeInformation](function: SourceContext[T] => Unit): DataStream[T] = {
require(function != null, "Function must not be null.")
val sourceFunction = new SourceFunction[T] {
val cleanFun = scalaClean(function)
override def run(ctx: SourceContext[T]) {
cleanFun(ctx)
}
override def cancel() = {}
}
addSource(sourceFunction)
}
/**
* Triggers the program execution. The environment will execute all parts of
* the program that have resulted in a "sink" operation. Sink operations are
* for example printing results or forwarding them to a message queue.
*
* The program execution will be logged and displayed with a generated
* default name.
*
* @return The result of the job execution, containing elapsed time and accumulators.
*/
def execute() = javaEnv.execute()
/**
* Triggers the program execution. The environment will execute all parts of
* the program that have resulted in a "sink" operation. Sink operations are
* for example printing results or forwarding them to a message queue.
*
* The program execution will be logged and displayed with the provided name.
*
* @return The result of the job execution, containing elapsed time and accumulators.
*/
def execute(jobName: String) = javaEnv.execute(jobName)
/**
* Register a [[JobListener]] in this environment. The [[JobListener]] will be
* notified on specific job status changed.
*/
@PublicEvolving
def registerJobListener(jobListener: JobListener): Unit = {
javaEnv.registerJobListener(jobListener)
}
/**
* Clear all registered [[JobListener]]s.
*/
@PublicEvolving def clearJobListeners(): Unit = {
javaEnv.clearJobListeners()
}
/**
* Triggers the program execution asynchronously. The environment will execute all parts of
* the program that have resulted in a "sink" operation. Sink operations are
* for example printing results or forwarding them to a message queue.
*
* The program execution will be logged and displayed with a generated
* default name.
*
* <b>ATTENTION:</b> The caller of this method is responsible for managing the lifecycle
* of the returned [[JobClient]]. This means calling [[JobClient#close()]] at the end of
* its usage. In other case, there may be resource leaks depending on the JobClient
* implementation.
*
* @return A [[JobClient]] that can be used to communicate with the submitted job,
* completed on submission succeeded.
*/
@PublicEvolving
def executeAsync(): JobClient = javaEnv.executeAsync()
/**
* Triggers the program execution asynchronously. The environment will execute all parts of
* the program that have resulted in a "sink" operation. Sink operations are
* for example printing results or forwarding them to a message queue.
*
* The program execution will be logged and displayed with the provided name.
*
* <b>ATTENTION:</b> The caller of this method is responsible for managing the lifecycle
* of the returned [[JobClient]]. This means calling [[JobClient#close()]] at the end of
* its usage. In other case, there may be resource leaks depending on the JobClient
* implementation.
*
* @return A [[JobClient]] that can be used to communicate with the submitted job,
* completed on submission succeeded.
*/
@PublicEvolving
def executeAsync(jobName: String): JobClient = javaEnv.executeAsync(jobName)
/**
* Creates the plan with which the system will execute the program, and
* returns it as a String using a JSON representation of the execution data
* flow graph. Note that this needs to be called, before the plan is
* executed.
*/
def getExecutionPlan = javaEnv.getExecutionPlan
/**
* Getter of the [[org.apache.flink.streaming.api.graph.StreamGraph]] of the streaming job.
* This call clears previously registered
* [[org.apache.flink.api.dag.Transformation transformations]].
*
* @return The StreamGraph representing the transformations
*/
@Internal
def getStreamGraph = javaEnv.getStreamGraph
/**
* Getter of the [[org.apache.flink.streaming.api.graph.StreamGraph]] of the streaming job.
* This call clears previously registered
* [[org.apache.flink.api.dag.Transformation transformations]].
*
* @param jobName Desired name of the job
* @return The StreamGraph representing the transformations
*/
@Internal
def getStreamGraph(jobName: String) = javaEnv.getStreamGraph(jobName)
/**
* Getter of the [[org.apache.flink.streaming.api.graph.StreamGraph]] of the streaming job
* with the option to clear previously registered
* [[org.apache.flink.api.dag.Transformation transformations]]. Clearing the transformations
* allows, for example, to not re-execute the same operations when calling
* [[execute()]] multiple times.
*
* @param jobName Desired name of the job
* @param clearTransformations Whether or not to clear previously registered transformations
* @return The StreamGraph representing the transformations
*/
@Internal
def getStreamGraph(jobName: String, clearTransformations: Boolean) =
javaEnv.getStreamGraph(jobName, clearTransformations)
/**
* Getter of the wrapped [[org.apache.flink.streaming.api.environment.StreamExecutionEnvironment]]
*
* @return The encased ExecutionEnvironment
*/
@Internal
def getWrappedStreamExecutionEnvironment = javaEnv
/**
* Returns a "closure-cleaned" version of the given function. Cleans only if closure cleaning
* is not disabled in the [[org.apache.flink.api.common.ExecutionConfig]]
*/
private[flink] def scalaClean[F <: AnyRef](f: F): F = {
if (getConfig.isClosureCleanerEnabled) {
ClosureCleaner.clean(f, true, getConfig.getClosureCleanerLevel)
} else {
ClosureCleaner.ensureSerializable(f)
}
f
}
/**
* Registers a file at the distributed cache under the given name. The file will be accessible
* from any user-defined function in the (distributed) runtime under a local path. Files
* may be local files (which will be distributed via BlobServer), or files in a distributed file
* system. The runtime will copy the files temporarily to a local cache, if needed.
*
* The [[org.apache.flink.api.common.functions.RuntimeContext]] can be obtained inside UDFs
* via [[org.apache.flink.api.common.functions.RichFunction#getRuntimeContext()]] and
* provides access [[org.apache.flink.api.common.cache.DistributedCache]] via
* [[org.apache.flink.api.common.functions.RuntimeContext#getDistributedCache()]].
*
* @param filePath The path of the file, as a URI (e.g. "file:///some/path" or
* "hdfs://host:port/and/path")
* @param name The name under which the file is registered.
*/
def registerCachedFile(filePath: String, name: String): Unit = {
javaEnv.registerCachedFile(filePath, name)
}
/**
* Registers a file at the distributed cache under the given name. The file will be accessible
* from any user-defined function in the (distributed) runtime under a local path. Files
* may be local files (which will be distributed via BlobServer), or files in a distributed file
* system. The runtime will copy the files temporarily to a local cache, if needed.
*
* The [[org.apache.flink.api.common.functions.RuntimeContext]] can be obtained inside UDFs
* via [[org.apache.flink.api.common.functions.RichFunction#getRuntimeContext()]] and
* provides access [[org.apache.flink.api.common.cache.DistributedCache]] via
* [[org.apache.flink.api.common.functions.RuntimeContext#getDistributedCache()]].
*
* @param filePath The path of the file, as a URI (e.g. "file:///some/path" or
* "hdfs://host:port/and/path")
* @param name The name under which the file is registered.
* @param executable flag indicating whether the file should be executable
*/
def registerCachedFile(filePath: String, name: String, executable: Boolean): Unit = {
javaEnv.registerCachedFile(filePath, name, executable)
}
}
object StreamExecutionEnvironment {
/**
* Sets the default parallelism that will be used for the local execution
* environment created by [[createLocalEnvironment()]].
*
* @param parallelism The default parallelism to use for local execution.
*/
@PublicEvolving
def setDefaultLocalParallelism(parallelism: Int) : Unit =
JavaEnv.setDefaultLocalParallelism(parallelism)
/**
* Gets the default parallelism that will be used for the local execution environment created by
* [[createLocalEnvironment()]].
*/
@PublicEvolving
def getDefaultLocalParallelism: Int = JavaEnv.getDefaultLocalParallelism
// --------------------------------------------------------------------------
// context environment
// --------------------------------------------------------------------------
/**
* Creates an execution environment that represents the context in which the program is
* currently executed. If the program is invoked standalone, this method returns a local
* execution environment. If the program is invoked from within the command line client
* to be submitted to a cluster, this method returns the execution environment of this cluster.
*/
def getExecutionEnvironment: StreamExecutionEnvironment = {
new StreamExecutionEnvironment(JavaEnv.getExecutionEnvironment)
}
// --------------------------------------------------------------------------
// local environment
// --------------------------------------------------------------------------
/**
* Creates a local execution environment. The local execution environment will run the
* program in a multi-threaded fashion in the same JVM as the environment was created in.
*
* This method sets the environment's default parallelism to given parameter, which
* defaults to the value set via [[setDefaultLocalParallelism(Int)]].
*/
def createLocalEnvironment(parallelism: Int = JavaEnv.getDefaultLocalParallelism):
StreamExecutionEnvironment = {
new StreamExecutionEnvironment(JavaEnv.createLocalEnvironment(parallelism))
}
/**
* Creates a local execution environment. The local execution environment will run the
* program in a multi-threaded fashion in the same JVM as the environment was created in.
*
* @param parallelism The parallelism for the local environment.
* @param configuration Pass a custom configuration into the cluster.
*/
def createLocalEnvironment(parallelism: Int, configuration: Configuration):
StreamExecutionEnvironment = {
new StreamExecutionEnvironment(JavaEnv.createLocalEnvironment(parallelism, configuration))
}
/**
* Creates a [[StreamExecutionEnvironment]] for local program execution that also starts the
* web monitoring UI.
*
* The local execution environment will run the program in a multi-threaded fashion in
* the same JVM as the environment was created in. It will use the parallelism specified in the
* parameter.
*
* If the configuration key 'rest.port' was set in the configuration, that particular
* port will be used for the web UI. Otherwise, the default port (8081) will be used.
*
* @param config optional config for the local execution
* @return The created StreamExecutionEnvironment
*/
@PublicEvolving
def createLocalEnvironmentWithWebUI(config: Configuration = null): StreamExecutionEnvironment = {
val conf: Configuration = if (config == null) new Configuration() else config
new StreamExecutionEnvironment(JavaEnv.createLocalEnvironmentWithWebUI(conf))
}
// --------------------------------------------------------------------------
// remote environment
// --------------------------------------------------------------------------
/**
* Creates a remote execution environment. The remote environment sends (parts of) the program to
* a cluster for execution. Note that all file paths used in the program must be accessible from
* the cluster. The execution will use the cluster's default parallelism, unless the
* parallelism is set explicitly via [[StreamExecutionEnvironment.setParallelism()]].
*
* @param host The host name or address of the master (JobManager),
* where the program should be executed.
* @param port The port of the master (JobManager), where the program should be executed.
* @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the
* program uses
* user-defined functions, user-defined input formats, or any libraries,
* those must be
* provided in the JAR files.
*/
def createRemoteEnvironment(host: String, port: Int, jarFiles: String*):
StreamExecutionEnvironment = {
new StreamExecutionEnvironment(JavaEnv.createRemoteEnvironment(host, port, jarFiles: _*))
}
/**
* Creates a remote execution environment. The remote environment sends (parts of) the program
* to a cluster for execution. Note that all file paths used in the program must be accessible
* from the cluster. The execution will use the specified parallelism.
*
* @param host The host name or address of the master (JobManager),
* where the program should be executed.
* @param port The port of the master (JobManager), where the program should be executed.
* @param parallelism The parallelism to use during the execution.
* @param jarFiles The JAR files with code that needs to be shipped to the cluster. If the
* program uses
* user-defined functions, user-defined input formats, or any libraries,
* those must be
* provided in the JAR files.
*/
def createRemoteEnvironment(
host: String,
port: Int,
parallelism: Int,
jarFiles: String*): StreamExecutionEnvironment = {
val javaEnv = JavaEnv.createRemoteEnvironment(host, port, jarFiles: _*)
javaEnv.setParallelism(parallelism)
new StreamExecutionEnvironment(javaEnv)
}
}
| bowenli86/flink | flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/StreamExecutionEnvironment.scala | Scala | apache-2.0 | 41,443 |
package net.pointsgame.lift.snippet
import net.liftweb.common.Loggable
import net.liftweb.http._
import net.liftweb.util.Helpers._
import net.liftweb.util.{CssSel, Props}
class SimpleSnippets extends Loggable {
lazy val room = S.uri.split('/').lastOption getOrElse "TODO"
def specifyRoom: CssSel = {
if (room matches "[a-zA-Z0-9]{1,20}") {
".specifyRoom [data-lift+]" #> s"&name=$room" &
"* [class!]" #> "specifyRoom"
} else {
"*" #> "error constructing room"
}
}
def lang = "* *" #> S.locale.toString
def scalajsSuffix = "* [src+]" #> LiftRules.attachResourceId {
if (Props.productionMode) {
"-opt.js"
} else {
"-fastopt.js"
}
}
}
| vn971/points-wip | modules/lift-server/src/main/scala/net/pointsgame/lift/snippet/SimpleSnippets.scala | Scala | agpl-3.0 | 674 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.